aboutsummaryrefslogtreecommitdiffstats
path: root/nfvbench
diff options
context:
space:
mode:
Diffstat (limited to 'nfvbench')
-rw-r--r--[-rwxr-xr-x]nfvbench/cfg.default.yaml713
-rw-r--r--nfvbench/chain_clients.py596
-rw-r--r--nfvbench/chain_managers.py253
-rw-r--r--nfvbench/chain_router.py190
-rw-r--r--nfvbench/chain_runner.py224
-rw-r--r--nfvbench/chain_workers.py47
-rw-r--r--nfvbench/chaining.py1559
-rw-r--r--nfvbench/cleanup.py270
-rw-r--r--nfvbench/compute.py382
-rw-r--r--nfvbench/config.py16
-rw-r--r--nfvbench/config_plugin.py42
-rw-r--r--nfvbench/credentials.py112
-rw-r--r--nfvbench/factory.py50
-rw-r--r--nfvbench/fluentd.py4
-rw-r--r--nfvbench/network.py91
-rw-r--r--nfvbench/nfvbench.py696
-rw-r--r--nfvbench/nfvbenchd.py128
-rw-r--r--nfvbench/nfvbenchvm/nfvbenchvm.conf5
-rw-r--r--nfvbench/packet_analyzer.py64
-rw-r--r--nfvbench/packet_stats.py341
-rw-r--r--nfvbench/service_chain.py148
-rw-r--r--nfvbench/specs.py20
-rw-r--r--nfvbench/stats_collector.py51
-rw-r--r--nfvbench/stats_manager.py101
-rw-r--r--nfvbench/summarizer.py359
-rw-r--r--nfvbench/tor_client.py52
-rwxr-xr-xnfvbench/traffic_client.py1260
-rw-r--r--nfvbench/traffic_gen/dummy.py97
-rw-r--r--nfvbench/traffic_gen/traffic_base.py148
-rw-r--r--nfvbench/traffic_gen/traffic_utils.py53
-rw-r--r--nfvbench/traffic_gen/trex.py482
-rw-r--r--nfvbench/traffic_gen/trex_gen.py1208
-rw-r--r--nfvbench/traffic_server.py161
-rw-r--r--nfvbench/utils.py219
34 files changed, 6834 insertions, 3308 deletions
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index e1c05c3..c76e738 100755..100644
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -18,18 +18,41 @@
# Fields that can be over-ridden at the command line are marked with the corresponding
# option, e.g. "--interval"
-# The OpenStack openrc file to use (must be a valid full pathname). If running
+
+# The OpenStack openrc file to use - must be a valid full pathname. If running
# in a container, this path must be valid in the container.
#
# The only case where this field can be empty is when measuring a system that does not run
# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
# desirable. In that case the EXT service chain must be used.
+#
+# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# WARNING: Not used if clouds_detail is sets
openrc_file:
+# The OpenStack clouds configuration from clouds.yaml file to use.
+# clouds.yaml file must be in one of the following paths:
+# - ~/.config/openstack
+# - /etc/openstack
+# Note: If running in a container, this path must be valid in the container.
+# The only case where this field can be empty is when measuring a system that does not run
+# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
+# desirable. In that case the EXT service chain must be used.
+#
+# If user is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# If a value is sets, this parameter disable the use of openrc file
+clouds_detail:
+
# Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
vm_forwarder: testpmd
-# By default (empty) NFVBench will try to locate a VM image file
+# By default (empty) NFVbench will try to locate a VM image file
# from the package root directory named "nfvbench-<version>.qcow2" and
# upload that file. The image name will be "nfvbench-<version>"
# This can be overridden by specifying here a pathname of a file
@@ -45,16 +68,16 @@ vm_image_file:
# Otherwise, a new flavor will be created with attributes listed below.
flavor_type: 'nfvbench.medium'
-# Custom flavor attributes
+# Custom flavor attributes for the test VM
flavor:
- # Number of vCPUs for the flavor
+ # Number of vCPUs for the flavor, must be at least 2!
vcpus: 2
# Memory for the flavor in MB
ram: 4096
# Size of local disk in GB
disk: 0
# metadata are supported and can be added if needed, optional
- # note that if your openstack does not have NUMA optimization
+ # note that if your OpenStack does not have NUMA optimization
# (cpu pinning and huge pages)
# you must comment out extra_specs completely otherwise
# loopback VM creation will fail
@@ -62,43 +85,38 @@ flavor:
"hw:cpu_policy": dedicated
"hw:mem_page_size": large
+# Enable multiqueue for all test VM interfaces (PVP and PVVP only).
+# When enabled, the test VM image will get added the property to enable
+# multiqueue (hw_vif_multiqueue_enabled='true').
+# The number of queues per interace will be set to the number of vCPUs configured for
+# the VM.
+# By default there is only 1 queue per interface
+# The max allowed queue per interface is 8.
+# The valid range for this parameter is [1..min(8, vcpu_count)]
+# When multiqueue is used the recommended setting is to set it to same value as the
+# number of vCPU used - up to a max of 8 queues.
+# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
+# vif_multiqueue_size is set to 2, OpenStack will create 4 queues per interface but the
+# test VM will only use the first 2 queues.
+vif_multiqueue_size: 1
+
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
-# If the selected zone contains only 1 compute node and PVVP inter-node flow is selected,
-# application will use intra-node PVVP flow.
-# List of compute nodes can be specified, must be in given availability zone if not empty
-#availability_zone: 'nova'
+# availability_zone: 'nova'
+# If openrc is not admin set a valid value
availability_zone:
+# To force placement on a given hypervisor, set the name here
+# (if multiple names are provided, the first will be used)
+# Leave empty to let OpenStack pick the hypervisor
compute_nodes:
-
-
-# Credentials for SSH connection to TOR switches.
-tor:
- # Leave type empty or switch list empty to skip TOR switches configuration.
- # Preferably use 'no_tor_access' to achieve the same behavior.
- # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces
- # to the service chain under test, needed only if configured in access mode)
- type:
- # Switches are only needed if type is not empty.
- # You can configure 0, 1 or 2 switches
- # no switch: in this case NFVbench will not attempt to ssh to the switch
- # and stitching of traffic must be done externally
- # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch
- # 2 switches: this is the recommended setting wuth redundant switches, in this case each
- # traffic generator interface must be wired to a different switch
- switches:
- - host:
- username:
- password:
- port:
-
-# Skip TOR switch configuration and retrieving of stats
-# Can be overriden by --no-tor-access
-no_tor_access: false
-
-# Skip vswitch configuration and retrieving of stats
-# Can be overriden by --no-vswitch-access
-no_vswitch_access: false
+# If openrc is not admin set a valid value for hypervisor hostname
+# Example of value: hypervisor_hostname: "server1"
+hypervisor_hostname:
# Type of service chain to run, possible options are PVP, PVVP and EXT
# PVP - port to VM to port
@@ -112,6 +130,9 @@ service_chain: 'PVP'
# Can be overriden by --service-chain-count
service_chain_count: 1
+# Specifies if all chains share the same right/left/middle networks
+service_chain_shared_net: false
+
# Total number of traffic flows for all chains and directions generated by the traffic generator.
# Minimum is '2 * service_chain_count', it is automatically adjusted if too small
# value was configured. Must be even.
@@ -119,17 +140,21 @@ service_chain_count: 1
# Can be overriden by --flow-count
flow_count: 10000
-# Used by PVVP chain to spawn VMs on different compute nodes
-# Can be overriden by --inter-node
-inter_node: false
-
# set to true if service chains should use SRIOV
# This requires SRIOV to be available on compute nodes
sriov: false
-# Skip interfaces config on EXT service chain
-# Can be overriden by --no-int-config
-no_int_config: false
+# Perform port to port loopback (direct or through switch)
+# e.g. for unitary testing of the switch or the bench itself.
+# When selected, this mode forces EXT service chain and no ARP mode
+# Destination MAC for each port is set to the other (peer) port MAC.
+# VLAN tagging is defined by 'vlans' & 'vlan_tagging' properties.
+# Can be overriden by --l2-loopback (including vlan tagging spec).
+l2_loopback: false
+# No assumption is made about the loop implementation.
+# Multiple L2 vlan tagged service chains are allowed,
+# the vlan ID lists' size must be at least service_chain_count.
+# If not vlan tagging, the service chain count is forced to 1.
# Resources created by NFVbench will not be removed
# Can be overriden by --no-cleanup
@@ -146,69 +171,245 @@ traffic_generator:
default_profile: trex-local
# IP addresses for L3 traffic.
+ # This section describes the addresses to use to fill in the UDP packets sent by the
+ # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change.
+ # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs
+ # so that UDP packets can be routed back to the peer port of the traffic generator.
+
# All of the IPs are used as base for IP sequence computed based on chain or flow count.
+ # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ...
+ # -(vnf-right)---(tg-gateway-right)---(sim-devices-right)
#
# `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
+ # these are used for addressing virtual devices simulated by the traffic generator
+ # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs
# `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
- # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count
+ ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
+ ip_addrs_step: 0.0.0.1
+
+ #'ip_src_static': an attribute to precise the state of source IP during the generation of traffic, It indicates whether
+ # the IP source variate or remain constant. Use True for constant IP and False for varying IPs.
+ # default value is True
+ ip_src_static: True
+
+ # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs
+ # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
# `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
- # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count
+ tg_gateway_ip_addrs: ['192.168.1.100', '192.168.2.100']
+ tg_gateway_ip_cidrs: ['192.168.1.0/24','192.168.2.0/24']
+ tg_gateway_ip_addrs_step: 0.0.0.1
+ # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
+ # must correspond to the public IP on the left and right networks
+ # for each left-most and right-most VNF of every chain.
+ # must be the same subnet but not same IP as tg_gateway_ip_addrs.
+ # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used
# `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
+ gateway_ip_addrs: ['192.168.1.1', '192.168.2.1']
+ gateway_ip_addrs_step: 0.0.0.1
+
+ # UDP DEFINED VARIABLES
+ # TRex pick default UDP port (53) but the range of UDP source and destination ports are also
+ # defined from configuration file by using the following attributes:
+ #
# `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
# `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
- # `mac_addrs_left` & `mac_addrs_right`: Lists of MAC addresses corresponding to the number of chains
- # specified for `service_chain_count`.
+ # `udp_src_port` and `udp_dst_port` can be defined by a single port or a range. Example:
+ # udp_src_port: 80
+ # udp_dst_port: ['1024','65000']
+ # `udp_port_step`: the step between two generated ports, default is equal to '1'
+ #
+ # NOTICE:
+ # Following TRex functionalities, incrementation and decrementation of source port and destination
+ # port values occur simultaneously.
+ # So, in order to reach the highest possible number of packets, it's recommended that the range of source ports
+ # minus the range of destination ports should be different of 1
+ # i.e: |range[source_port] - range[destination_port]| = 1
+ udp_src_port:
+ udp_dst_port:
+ udp_port_step: '1'
+
+ # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay
+ # This is used if the vxlan tunnels are running on a specific VLAN.
+ # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
+ # for all VxLAN tunneled traffic
+ vtep_vlan:
+ # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
+ src_vteps:
+ # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
+ dst_vtep:
+ # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep.
+ # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep.
+ # This parameter is mandatory for MPLS only
+ vtep_gateway_ips:
+ # L2 ADDRESSING OF UDP PACKETS
+ # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
+ # Leave empty for PVP, PVVP, EXT with ARP
+ # Only used when `service_chain` is EXT and `no_arp` is true.
# - If both lists are empty the far end MAC of the traffic generator will be used for left and right
- # - The MAC addresses will only be used when `service_chain` is EXT and `no_arp` is true.
- # - The length of each list must match the number of chains being used.
+ # (this is typicaly used to loop back on the first hop switch or using a loopback cable)
+ # - The length of each list must match the number of chains being used!
# - The index of each list must correspond to the chain index to ensure proper pairing.
# - Below is an example of using two chains:
# - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00']
# - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01']
- ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
- ip_addrs_step: 0.0.0.1
- tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
- tg_gateway_ip_addrs_step: 0.0.0.1
- gateway_ip_addrs: ['1.1.0.2', '2.2.0.2']
- gateway_ip_addrs_step: 0.0.0.1
- udp_src_port:
- udp_dst_port:
+ # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and
+ # dest MAC '00:00:00:00:02:00' for chain #1
+ # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and
+ # dest MAC '00:00:00:00:02:01' for chain #1
+ # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC
+ # of the looping UDP packet so that it can reach back to the peer port of the traffic
+ # generator.
+ #
mac_addrs_left:
mac_addrs_right:
# Traffic Generator Profiles
# In case you have multiple testbeds or traffic generators,
# you can define one traffic generator profile per testbed/traffic generator.
+ # In most cases you only need to fill in the pci address for the 2 ports used by the
+ # traffic generator and leave all other fields unchanged
#
# Generator profiles are listed in the following format:
# `name`: Traffic generator profile name (use a unique name, no space or special character)
+ # Do not change this field
# `tool`: Traffic generator tool to be used (currently supported is `TRex`).
+ # Do not change this field
# `ip`: IP address of the traffic generator.
- # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local.
+ # The default loopback address is used when the traffic generator runs on the same host
+ # as NFVbench.
+ # `cores`: Specify the number of cores for running the TRex traffic generator.
+ # ONLY applies to trex-local.
# `software_mode`: Advice TRex to use software mode which provides the best compability. But
# note that TRex will not use any hardware acceleration technology under
# software mode, therefore the performance of TRex will be significantly
# lower. ONLY applies to trex-local.
+ # Recommended to leave the default value (false)
+ # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount
+ # of packet memory used. (Passed to dpdk as -m arg)
+ # ONLY applies to trex-local.
+ # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500).
+ # ONLY applies to trex-local.
+ # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501).
+ # ONLY applies to trex-local.
# `interfaces`: Configuration of traffic generator interfaces.
# `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
- # `interfaces.switch_port`: Leave empty (reserved for advanced use cases)
+ # `interfaces.switch_port`: Leave empty (deprecated)
# `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
+ # This field is required and cannot be empty
+ # Use lspci to list the PCI address of all devices
+ # Example of value: "0000:5e:00.0"
# `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
+ # Empty value (default) to use the speed discovered by the traffic generator.
+ # Recommended to leave this field empty.
+ # Do not use unless you want to override the speed discovered by the
+ # traffic generator. Expected format: 10Gbps
+ #
+ # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA.
+ # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration)
+ # for more details
+ # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set)
+ # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set)
+ # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set)
+ # Threads are pinned to cores, so specifying threads actually determines the hardware cores.
+ # Example of values:
+ # platform:
+ # master_thread_id: 0
+ # latency_thread_id: 2
+ # dual_if:
+ # - socket: 0
+ # threads: [1]
#
generator_profile:
- name: trex-local
tool: TRex
ip: 127.0.0.1
- cores: 3
+ cores: 4
software_mode: false
+ limit_memory: 1024
+ zmq_pub_port: 4500
+ zmq_rpc_port: 4501
interfaces:
- port: 0
- switch_port:
pci:
- - port: 1
switch_port:
+ - port: 1
pci:
- intf_speed: 10Gbps
+ switch_port:
+ intf_speed:
+ platform:
+ master_thread_id:
+ latency_thread_id:
+ dual_if:
+ - socket:
+ threads:
+
+# Use 'true' to force restart of local TRex server before next run
+# TRex local server will be restarted even if restart property is false in case of generator config changes between runs
+restart: false
+
+# Simpler override for trex core count and mbuf multilier factor
+# if empty defaults to the one specified in generator_profile.cores
+cores:
+
+# Simpler override for the interface speed
+# if empty, the current generator_profile.intf_speed parameter applies
+# if value = 'auto' the auto-detection is forced
+intf_speed:
+
+# 'cores' and 'intf_speed' parameters can be overriden themselves
+# by respective options --cores and --intf-speed on the command-line.
+
+# By default, the real ports line rate is detected and used as
+# the reference for computing the theoretical maximum traffic load (100%).
+# Note that specifying 'intf_speed' allows to artificially lower this
+# reference while not modifying the actual transmission bit rate.
+
+# The values of the following parameters are ignored on entry
+# they are defined here in order to appear in the reported configuration.
+# They will reflect the value active at run-time (after overriding or detection)
+cores_used:
+intf_speed_used:
+intf_speed_detected:
+
+# A cache size value is passed to the TRex field engine (FE) at packet generation.
+# Can be overridden by --cache-size
+# More information for TRex performance:
+# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
+# If cache_size = 0 (or empty): no cache will be used by TRex (default)
+# If cache_size < 0: cache_size will be set to flow count value
+cache_size: 0
+# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
+# Note that the resulting value is finally capped to 10000, whatever the requested size is (by design limitation).
+
+# Specification of the TRex behaviour dealing with the i40e network card driver issue: Trex-528
+# see https://trex-tgn.cisco.com/youtrack/issue/trex-528
+# This issue states that if other ports, in the same card,
+# are in kernel mode, they could impair traffic counting.
+# Can be overridden by --i40e-mixed
+# Values can be:
+# ignore - don't consider the case (default)
+# exit - should the case arise, exit (TRex default behaviour)
+# unbind - unbind kernel bound ports (the former NFVbench behaviour)
+# The 'ignore' option might be OK as soon as the issue has been fixed in the driver.
+# The 'unbind' option should not be used! who knows the current use of other ports?
+i40e_mixed:
+
+# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
+mbuf_64:
+
+# mbuffer ratio to use for TRex (see TRex documentation for more details)
+mbuf_factor: 0.2
+
+# A switch to disable hdrh
+# hdrh is enabled by default and requires TRex v2.58 or higher
+disable_hdrh: false
+
+# List of latency percentiles values returned using hdrh
+# elements should be int or float between 0.0 and 100.0
+lat_percentiles: [25, 75, 99]
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
@@ -225,21 +426,28 @@ generic_poll_sec: 2
# name of the loop VM
loop_vm_name: 'nfvbench-loop-vm'
-# Default names, subnets and CIDRs for PVP/PVVP networks
+# Default names, subnets and CIDRs for PVP/PVVP networks (OpenStack only)
+#
# If a network with given name already exists it will be reused.
# - PVP only uses left and right
# - PVVP uses left, middle and right
# - for EXT chains, this structure is not relevant - refer to external_networks
# Otherwise a new internal network will be created with that name, subnet and CIDR.
-#
-# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
-# will be assigned by Neutron.
-# Must be unique for each network
+#
+# network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN)
+# all 3 networks must use the same network type in this release
+# segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan)
+# by default (empty) the segmentation id will be assigned by Neutron.
+# If specified, it must be unique for each network
+# For multi-chaining, see notes below
# physical_network can be set to pick a specific phsyical network - by default (empty) the
# default physical network will be picked
-# In the case of SR-IOV, both physical_network and segmentation ID must be provided
-# For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet
+# SR-IOV: both physical_network and VLAN segmentation ID must be provided
+# VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations)
+#
+# For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet
# names under left.physical_network and right.physical_network.
+# For multi-chaining and non shared networks,
# Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1)
# using VLAN ID 2000 and 2001:
# internal_networks:
@@ -249,56 +457,286 @@ loop_vm_name: 'nfvbench-loop-vm'
# right:
# segmentation_id: 2001
# physical_network: phys_sriov1
+#
+# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS):
+# - the segmentation_id field if provided must be a list of values (as many as chains)
+# - segmentation_id auto-indexing:
+# the segmentation_id field can also be a single value that represents the base value from which
+# values for each chain is derived using the chain ID as an offset. For example
+# if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc...
+# The ranges of all the networks must not overlap.
+# - the physical_network can be a single name (all VFs to be allocated on same physnet)
+# of a list of physnet names to use different PFs
+#
+# Example of 2-chain VLAN configuration:
+# internal_networks:
+# left:
+# segmentation_id: [2000, 2001]
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: [2010, 2011]
+# physical_network: phys_sriov1
+# Equivalent to (using auto-indexing):
+# internal_networks:
+# left:
+# segmentation_id: 2000
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: 2010
+# physical_network: phys_sriov1
+#
+# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true)
+# this parameter doesn't support auto-indexing because this is not a typical scenario
+# expected the list of values in a range 256-1048575, one value per chain is expected
+#
+# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain
+# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain
+# Example of 2-chain MPLS configuration:
+# internal_networks:
+# left:
+# network_type: mpls
+# segmentation_id: [2000, 2001]
+# mpls_transport_labels: [10000, 10000]
+# physical_network: phys_sriov0
+# right:
+# network_type: mpls
+# segmentation_id: [2010, 2011]
+# mpls_transport_labels: [11000, 11000]
+# physical_network: phys_sriov1
+
internal_networks:
left:
- name: 'nfvbench-net0'
- subnet: 'nfvbench-subnet0'
+ name: 'nfvbench-lnet'
+ subnet: 'nfvbench-lsubnet'
cidr: '192.168.1.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
right:
- name: 'nfvbench-net1'
- subnet: 'nfvbench-subnet1'
+ name: 'nfvbench-rnet'
+ subnet: 'nfvbench-rsubnet'
cidr: '192.168.2.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
middle:
- name: 'nfvbench-net2'
- subnet: 'nfvbench-subnet2'
+ name: 'nfvbench-mnet'
+ subnet: 'nfvbench-msubnet'
cidr: '192.168.3.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
+
+# IDLE INTERFACES: PVP, PVVP and non shared net only.
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If service_chain_shared_net is false, additional virtual interfaces can be
+# added at VM creation time, these interfaces will not carry any traffic and
+# can be used to test the impact of idle interfaces in the overall performance.
+# All these idle interfaces will use normal ports (not direct).
+# Number of idle interfaces per VM (none by default)
+idle_interfaces_per_vm: 0
+
+# A new network is created for each idle interface.
+# If service_chain_shared_net is true, the options below will be ignored
+# and no idle interfaces will be added.
+idle_networks:
+ # Prefix for all idle networks, the final name will append the chain ID and idle index
+ # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
+ name: 'nfvbench-idle-net'
+ # Subnet name to use for all idle subnetworks
+ subnet: 'nfvbench-idle-subnet'
+ # CIDR to use for all idle networks (value should not matter)
+ cidr: '192.169.1.0/24'
+ # Type of network associated to the idle virtual interfaces (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the idle virtual interfaces
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ # Note that NFVbench will use as many consecutive segmentation IDs as needed.
+ # For example, for 4 PVP chains and 8 idle
+ # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
+ # starting from the value provided.
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
use_sriov_middle_net: false
-# EXT chain only. Names of edge networks which will be used to send traffic via traffic generator.
+# EXT chain only. Prefix names of edge networks or list of edge network names
+# used to send traffic via traffic generator.
+#
+# If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name.
+#
+# If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks.
+# left and right can take either a string prefix or a list of arbitrary network names
+# If a string prefix is passed, an index will be appended to each network name to form the final name.
+# Example:
+# external_networks:
+# left: 'ext-lnet'
+# right: 'ext-rnet'
+# ext-lnet0 ext-rnet0 for chain #0
+# ext-lnet1 ext-rnet1 for chain #1
+# etc...
+# If a list of strings is passed, each string in the list must be the name of the network used for the
+# chain indexed by the entry position in the list.
+# The list must have at least as many entries as there are chains
+# Example:
+# external_networks:
+# left: ['ext-lnet', 'ext-lnet2']
+# right: ['ext-rnet', 'ext-rnet2']
+#
external_networks:
- left: 'nfvbench-net0'
- right: 'nfvbench-net1'
+ left:
+ right:
+
+# PVP with L3 router in the packet path only.
+# Only use when l3_router option is True (see l3_router)
+# Prefix names of edge networks which will be used to send traffic via traffic generator.
+# If a network with given name already exists it will be reused.
+# Otherwise a new edge network will be created with that name, subnet and CIDR.
+#
+# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
+#
+# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
+# will be assigned by Neutron.
+# Must be unique for each network
+# physical_network can be set to pick a specific phsyical network - by default (empty) the
+# default physical network will be picked
+#
+edge_networks:
+ left:
+ name: 'nfvbench-net2'
+ router_name: 'router_left'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+ right:
+ name: 'nfvbench-net3'
+ router_name: 'router_right'
+ subnet: 'nfvbench-subnet3'
+ cidr: '192.168.4.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'vxlan'
+# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false
+# and vise versa
+vxlan: false
+# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'mpls'
+# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false'
+# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these
+# features are not supported at the moment. In future when these features will be supported they will require
+# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another
+# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration
+mpls: false
# Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
-# Leave empty you do not want the traffic generator to insert the VLAN tag. This is
-# needed for example if VLAN tagging is enabled on switch (trunk mode) or if you want to hook directly to a NIC
-# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex)
+# Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
+# needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
+# directly to a NIC).
+# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
+# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets
+# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
vlan_tagging: true
-# Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2).
-# Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above.
-# In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used.
-# Example: [1998, 1999]
+# Used only in the case of EXT chain and no OpenStack or not admin access to specify the VLAN IDs to use.
+# This property is ignored when OpenStack is used or when 'vlan_tagging' is disabled.
+# If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API.
+# If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values
+# If networks are not shared across chains (service_chain_shared_net=false), the list should have
+# 2 list of vlan IDs
+# Examples:
+# [1998, 1999] left network uses vlan 1998 right network uses vlan 1999
+# [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4
+# [1010, 1010] same vlan ID on both sides, for a typical l2-loopback test (*)
+# The vlan lists may be oversized, compared to the actual service chain count
+# (lowest indexes are used) but an exception is raised if they are too short.
vlans: []
-
-# Used only with EXT chain. MAC addresses of traffic generator ports are used as destination
-# if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses.
+# (*) actually there is no restriction, left/right IDs may differ
+# for some exotic purpose - see also the l2_loopback parameter.
+
+# ARP is used to discover the MAC address of VNFs that run L3 routing.
+# Used only with EXT chain.
+# False (default): ARP requests are sent to find out dest MAC addresses.
+# True: do not send ARP but use provisioned dest macs instead
+# (see mac_addrs_left and mac_addrs_right)
no_arp: false
+# Loop VM (VPP forwarder) can use ARP to discover next hop mac address
+# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP)
+# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW)
+loop_vm_arp: false
+
# Traffic Profiles
# You can add here more profiles as needed
# `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
@@ -325,9 +763,18 @@ traffic:
# Can be overriden by --no-traffic
no_traffic: false
-# Do not reset tx/rx counters prior to running
-# Can be overriden by --no-reset
-no_reset: false
+# Use an L3 router in the packet path. This option if set will create or reuse an OpenStack neutron
+# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
+# Can be overriden by --l3-router
+l3_router: false
+
+# If l3_router is true and depending on ARP stale time SUT configuration
+# Gratuitous ARP (GARP) from TG port to the router is needed to keep traffic up
+# Default value: 1 packet per second
+# This value needs to be defined inferior to SUT ARP stale time to avoid GARP packets drop
+# in case of high load traffic
+periodic_gratuitous_arp: false
+gratuitous_arp_pps: 1
# Test configuration
@@ -354,6 +801,9 @@ duration_sec: 60
# Can be overridden by --interval
interval_sec: 10
+# Default pause between iterations of a binary search (NDR/PDR)
+pause_sec: 2
+
# NDR / PDR configuration
measurement:
# Drop rates represent the ratio of dropped packet to the total number of packets sent.
@@ -392,6 +842,26 @@ debug: false
# Defaults to disabled
log_file:
+# One can specify a user ID for changing ownership of output log/json files
+# - empty: depends on file existency
+# . yes? replacement, owner is unchanged
+# . no ? creation with root as user
+# - 0: this is the root user ID
+# - other: will corresponds (or not) to an existing user/group in the host
+# (the current user ID can be obtained with the command 'id -u')
+# Can be overriden by --user-id
+# Consider also that the default value below is overridable by a USER_ID env variable,
+# if nfvbench is run into a container, this information can be passed at its creation.
+# The overall precedence rule is: 'default_config (this) < env < config < command_line'
+user_id:
+
+# Similarly, the group ID is defined
+# Can be overriden by --group-id
+# Default may be set through env GROUP_ID
+# Caveat: user and group with a same name may have different numerical IDs
+# (the current group ID can be obtained with the command 'id -g')
+group_id:
+
# When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports
# A list of one or more fluentd servers identified by their IPs and port numbers should be given.
# For each recipient it is possible to enable both sending logs and performance
@@ -426,3 +896,58 @@ factory_class: 'BasicFactory'
# Custom label added for every perf record generated during this run.
# Can be overriden by --user-label
user_label:
+
+# Custom information to be passed to results post-processing,
+# they will be included as is in the json report 'config' branch.
+# Useful for documenting or automating further treatments.
+# The value is any yaml object (=> open usage) - example:
+# |user_info:
+# | status: explore
+# | description:
+# | generator: VM
+# | attachment: direct
+# | target: lab-pf
+# | switch: qfx3500
+# Keys may be merged/overriden using the --user-info command line option
+# (the command-line parameter value is expressed as a json object string)
+user_info:
+
+
+# THESE FIELDS SHOULD BE USED VERY RARELY OR ON PURPOSE
+
+# Skip vswitch configuration and retrieving of stats
+# Can be overriden by --no-vswitch-access
+# Should be left to the default value (false)
+no_vswitch_access: false
+
+# Enable service mode for trafic capture from TRex console (for debugging purpose)
+# Can be overriden by --service-mode
+# Should be left to the default value (false)
+service_mode: false
+
+# Disable extra flow stats (on high load traffic)
+# Can be overriden by --no-flow-stats
+# Should be left to the default value (false)
+no_flow_stats: false
+
+# Disable flow stats for latency traffic
+# Can be overriden by --no-latency-stats
+# Should be left to the default value (false)
+no_latency_stats: false
+
+# Disable latency measurements (no streams)
+# Can be overriden by --no-latency-streams
+# Should be left to the default value (false)
+no_latency_streams: false
+
+# Skip "end to end" connectivity check on traffic setup
+# Can be overriden by --no-e2e-check
+# Should be left to the default value (false)
+# This flag is usable for traffic generation only
+no_e2e_check: false
+
+# General purpose register (debugging flags)
+# Can be overriden by --debug-mask
+# Designed for development needs
+# The hexadecimal notation (0x...) is accepted.
+debug_mask: 0x00000000
diff --git a/nfvbench/chain_clients.py b/nfvbench/chain_clients.py
deleted file mode 100644
index faf7c2a..0000000
--- a/nfvbench/chain_clients.py
+++ /dev/null
@@ -1,596 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import os
-import re
-import time
-
-from glanceclient.v2 import client as glanceclient
-from neutronclient.neutron import client as neutronclient
-from novaclient.client import Client
-
-import compute
-from log import LOG
-
-class StageClientException(Exception):
- pass
-
-
-class BasicStageClient(object):
- """Client for spawning and accessing the VM setup"""
-
- nfvbenchvm_config_name = 'nfvbenchvm.conf'
-
- def __init__(self, config, cred):
- self.comp = None
- self.image_instance = None
- self.image_name = None
- self.config = config
- self.cred = cred
- self.nets = []
- self.vms = []
- self.created_ports = []
- self.ports = {}
- self.compute_nodes = set([])
- self.comp = None
- self.neutron = None
- self.flavor_type = {'is_reuse': True, 'flavor': None}
- self.host_ips = None
-
- def _ensure_vms_active(self):
- retry_count = (self.config.check_traffic_time_sec +
- self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
- for _ in range(retry_count):
- for i, instance in enumerate(self.vms):
- if instance.status == 'ACTIVE':
- continue
- is_reuse = getattr(instance, 'is_reuse', True)
- instance = self.comp.poll_server(instance)
- if instance.status == 'ERROR':
- raise StageClientException('Instance creation error: %s' %
- instance.fault['message'])
- if instance.status == 'ACTIVE':
- LOG.info('Created instance: %s', instance.name)
- self.vms[i] = instance
- setattr(self.vms[i], 'is_reuse', is_reuse)
-
- if all([(vm.status == 'ACTIVE') for vm in self.vms]):
- return
- time.sleep(self.config.generic_poll_sec)
- raise StageClientException('Timed out waiting for VMs to spawn')
-
- def _setup_openstack_clients(self):
- self.session = self.cred.get_session()
- nova_client = Client(2, session=self.session)
- self.neutron = neutronclient.Client('2.0', session=self.session)
- self.glance_client = glanceclient.Client('2',
- session=self.session)
- self.comp = compute.Compute(nova_client, self.glance_client, self.neutron, self.config)
-
- def _lookup_network(self, network_name):
- networks = self.neutron.list_networks(name=network_name)
- return networks['networks'][0] if networks['networks'] else None
-
- def _create_net(self, name, subnet, cidr, network_type=None,
- segmentation_id=None, physical_network=None):
- network = self._lookup_network(name)
- if network:
- # a network of same name already exists, we need to verify it has the same
- # characteristics
- if segmentation_id:
- if network['provider:segmentation_id'] != segmentation_id:
- raise StageClientException("Mismatch of 'segmentation_id' for reused "
- "network '{net}'. Network has id '{seg_id1}', "
- "configuration requires '{seg_id2}'."
- .format(net=name,
- seg_id1=network['provider:segmentation_id'],
- seg_id2=segmentation_id))
-
- if physical_network:
- if network['provider:physical_network'] != physical_network:
- raise StageClientException("Mismatch of 'physical_network' for reused "
- "network '{net}'. Network has '{phys1}', "
- "configuration requires '{phys2}'."
- .format(net=name,
- phys1=network['provider:physical_network'],
- phys2=physical_network))
-
- LOG.info('Reusing existing network: %s', name)
- network['is_reuse'] = True
- return network
-
- body = {
- 'network': {
- 'name': name,
- 'admin_state_up': True
- }
- }
-
- if network_type:
- body['network']['provider:network_type'] = network_type
- if segmentation_id:
- body['network']['provider:segmentation_id'] = segmentation_id
- if physical_network:
- body['network']['provider:physical_network'] = physical_network
-
- network = self.neutron.create_network(body)['network']
- body = {
- 'subnet': {
- 'name': subnet,
- 'cidr': cidr,
- 'network_id': network['id'],
- 'enable_dhcp': False,
- 'ip_version': 4,
- 'dns_nameservers': []
- }
- }
- subnet = self.neutron.create_subnet(body)['subnet']
- # add subnet id to the network dict since it has just been added
- network['subnets'] = [subnet['id']]
- network['is_reuse'] = False
- LOG.info('Created network: %s.', name)
- return network
-
- def _create_port(self, net, vnic_type='normal'):
- body = {
- "port": {
- 'network_id': net['id'],
- 'binding:vnic_type': vnic_type
- }
- }
- port = self.neutron.create_port(body)
- return port['port']
-
- def __delete_port(self, port):
- retry = 0
- while retry < self.config.generic_retry_count:
- try:
- self.neutron.delete_port(port['id'])
- return
- except Exception:
- retry += 1
- time.sleep(self.config.generic_poll_sec)
- LOG.error('Unable to delete port: %s', port['id'])
-
- def __delete_net(self, network):
- retry = 0
- while retry < self.config.generic_retry_count:
- try:
- self.neutron.delete_network(network['id'])
- return
- except Exception:
- retry += 1
- time.sleep(self.config.generic_poll_sec)
- LOG.error('Unable to delete network: %s', network['name'])
-
- def __get_server_az(self, server):
- availability_zone = getattr(server, 'OS-EXT-AZ:availability_zone', None)
- host = getattr(server, 'OS-EXT-SRV-ATTR:host', None)
- if availability_zone is None:
- return None
- if host is None:
- return None
- return availability_zone + ':' + host
-
- def _lookup_servers(self, name=None, nets=None, az=None, flavor_id=None):
- error_msg = 'VM with the same name, but non-matching {} found. Aborting.'
- networks = set([net['name'] for net in nets]) if nets else None
- server_list = self.comp.get_server_list()
- matching_servers = []
-
- for server in server_list:
- if name and server.name != name:
- continue
-
- if flavor_id and server.flavor['id'] != flavor_id:
- raise StageClientException(error_msg.format('flavors'))
-
- if networks and not set(server.networks.keys()).issuperset(networks):
- raise StageClientException(error_msg.format('networks'))
-
- if server.status != "ACTIVE":
- raise StageClientException(error_msg.format('state'))
-
- # everything matches
- matching_servers.append(server)
-
- return matching_servers
-
- def _create_server(self, name, ports, az, nfvbenchvm_config):
- port_ids = [{'port-id': port['id']} for port in ports]
- nfvbenchvm_config_location = os.path.join('/etc/', self.nfvbenchvm_config_name)
- server = self.comp.create_server(name,
- self.image_instance,
- self.flavor_type['flavor'],
- None,
- port_ids,
- None,
- avail_zone=az,
- user_data=None,
- config_drive=True,
- files={nfvbenchvm_config_location: nfvbenchvm_config})
- if server:
- setattr(server, 'is_reuse', False)
- LOG.info('Creating instance: %s on %s', name, az)
- else:
- raise StageClientException('Unable to create instance: %s.' % (name))
- return server
-
- def _setup_resources(self):
- # To avoid reuploading image in server mode, check whether image_name is set or not
- if self.image_name:
- self.image_instance = self.comp.find_image(self.image_name)
- if self.image_instance:
- LOG.info("Reusing image %s", self.image_name)
- else:
- image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
- if self.config.vm_image_file:
- match = re.search(image_name_search_pattern, self.config.vm_image_file)
- if match:
- self.image_name = match.group(1)
- LOG.info('Using provided VM image file %s', self.config.vm_image_file)
- else:
- raise StageClientException('Provided VM image file name %s must start with '
- '"nfvbenchvm-<version>"' % self.config.vm_image_file)
- else:
- pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- for f in os.listdir(pkg_root):
- if re.search(image_name_search_pattern, f):
- self.config.vm_image_file = pkg_root + '/' + f
- self.image_name = f.replace('.qcow2', '')
- LOG.info('Found built-in VM image file %s', f)
- break
- else:
- raise StageClientException('Cannot find any built-in VM image file.')
- if self.image_name:
- self.image_instance = self.comp.find_image(self.image_name)
- if not self.image_instance:
- LOG.info('Uploading %s', self.image_name)
- res = self.comp.upload_image_via_url(self.image_name,
- self.config.vm_image_file)
-
- if not res:
- raise StageClientException('Error uploading image %s from %s. ABORTING.'
- % (self.image_name,
- self.config.vm_image_file))
- LOG.info('Image %s successfully uploaded.', self.image_name)
- self.image_instance = self.comp.find_image(self.image_name)
-
- self.__setup_flavor()
-
- def __setup_flavor(self):
- if self.flavor_type.get('flavor', False):
- return
-
- self.flavor_type['flavor'] = self.comp.find_flavor(self.config.flavor_type)
- if self.flavor_type['flavor']:
- self.flavor_type['is_reuse'] = True
- else:
- flavor_dict = self.config.flavor
- extra_specs = flavor_dict.pop('extra_specs', None)
-
- self.flavor_type['flavor'] = self.comp.create_flavor(self.config.flavor_type,
- override=True,
- **flavor_dict)
-
- LOG.info("Flavor '%s' was created.", self.config.flavor_type)
-
- if extra_specs:
- self.flavor_type['flavor'].set_keys(extra_specs)
-
- self.flavor_type['is_reuse'] = False
-
- if self.flavor_type['flavor'] is None:
- raise StageClientException('%s: flavor to launch VM not found. ABORTING.'
- % self.config.flavor_type)
-
- def __delete_flavor(self, flavor):
- if self.comp.delete_flavor(flavor=flavor):
- LOG.info("Flavor '%s' deleted", self.config.flavor_type)
- self.flavor_type = {'is_reuse': False, 'flavor': None}
- else:
- LOG.error('Unable to delete flavor: %s', self.config.flavor_type)
-
- def get_config_file(self, chain_index, src_mac, dst_mac, intf_mac1, intf_mac2):
- boot_script_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
- 'nfvbenchvm/', self.nfvbenchvm_config_name)
-
- with open(boot_script_file, 'r') as boot_script:
- content = boot_script.read()
-
- g1cidr = self.config.generator_config.src_device.get_gw_ip(chain_index) + '/8'
- g2cidr = self.config.generator_config.dst_device.get_gw_ip(chain_index) + '/8'
-
- vm_config = {
- 'forwarder': self.config.vm_forwarder,
- 'intf_mac1': intf_mac1,
- 'intf_mac2': intf_mac2,
- 'tg_gateway1_ip': self.config.traffic_generator.tg_gateway_ip_addrs[0],
- 'tg_gateway2_ip': self.config.traffic_generator.tg_gateway_ip_addrs[1],
- 'tg_net1': self.config.traffic_generator.ip_addrs[0],
- 'tg_net2': self.config.traffic_generator.ip_addrs[1],
- 'vnf_gateway1_cidr': g1cidr,
- 'vnf_gateway2_cidr': g2cidr,
- 'tg_mac1': src_mac,
- 'tg_mac2': dst_mac
- }
-
- return content.format(**vm_config)
-
- def set_ports(self):
- """Stores all ports of NFVbench networks."""
- nets = self.get_networks_uuids()
- for port in self.neutron.list_ports()['ports']:
- if port['network_id'] in nets:
- ports = self.ports.setdefault(port['network_id'], [])
- ports.append(port)
-
- def disable_port_security(self):
- """
- Disable security at port level.
- """
- vm_ids = [vm.id for vm in self.vms]
- for net in self.nets:
- for port in self.ports[net['id']]:
- if port['device_id'] in vm_ids:
- try:
- self.neutron.update_port(port['id'], {
- 'port': {
- 'security_groups': [],
- 'port_security_enabled': False,
- }
- })
- LOG.info('Security disabled on port %s', port['id'])
- except Exception:
- LOG.warning('Failed to disable port security on port %s, ignoring...',
- port['id'])
-
-
- def get_loop_vm_hostnames(self):
- return [getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname') for vm in self.vms]
-
- def get_host_ips(self):
- '''Return the IP adresss(es) of the host compute nodes for this VMclient instance.
- Returns a list of 1 IP adress or 2 IP addresses (PVVP inter-node)
- '''
- if not self.host_ips:
- # get the hypervisor object from the host name
- self.host_ips = [self.comp.get_hypervisor(
- getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname')).host_ip for vm in self.vms]
- return self.host_ips
-
- def get_loop_vm_compute_nodes(self):
- compute_nodes = []
- for vm in self.vms:
- az = getattr(vm, 'OS-EXT-AZ:availability_zone')
- hostname = getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
- compute_nodes.append(az + ':' + hostname)
- return compute_nodes
-
- def get_reusable_vm(self, name, nets, az):
- servers = self._lookup_servers(name=name, nets=nets, az=az,
- flavor_id=self.flavor_type['flavor'].id)
- if servers:
- server = servers[0]
- LOG.info('Reusing existing server: %s', name)
- setattr(server, 'is_reuse', True)
- return server
- return None
-
- def get_networks_uuids(self):
- """
- Extract UUID of used networks. Order is important.
-
- :return: list of UUIDs of created networks
- """
- return [net['id'] for net in self.nets]
-
- def get_vlans(self):
- """
- Extract vlans of used networks. Order is important.
-
- :return: list of UUIDs of created networks
- """
- vlans = []
- for net in self.nets:
- assert net['provider:network_type'] == 'vlan'
- vlans.append(net['provider:segmentation_id'])
-
- return vlans
-
- def setup(self):
- """
- Creates two networks and spawn a VM which act as a loop VM connected
- with the two networks.
- """
- if self.cred:
- self._setup_openstack_clients()
-
- def dispose(self, only_vm=False):
- """
- Deletes the created two networks and the VM.
- """
- for vm in self.vms:
- if vm:
- if not getattr(vm, 'is_reuse', True):
- self.comp.delete_server(vm)
- else:
- LOG.info('Server %s not removed since it is reused', vm.name)
-
- for port in self.created_ports:
- self.__delete_port(port)
-
- if not only_vm:
- for net in self.nets:
- if 'is_reuse' in net and not net['is_reuse']:
- self.__delete_net(net)
- else:
- LOG.info('Network %s not removed since it is reused', net['name'])
-
- if not self.flavor_type['is_reuse']:
- self.__delete_flavor(self.flavor_type['flavor'])
-
-
-class EXTStageClient(BasicStageClient):
- def setup(self):
- super(EXTStageClient, self).setup()
-
- # Lookup two existing networks
- if self.cred:
- for net_name in [self.config.external_networks.left,
- self.config.external_networks.right]:
- net = self._lookup_network(net_name)
- if net:
- self.nets.append(net)
- else:
- raise StageClientException('Existing network {} cannot be found.'.
- format(net_name))
-
-
-class PVPStageClient(BasicStageClient):
- def get_end_port_macs(self):
- vm_ids = [vm.id for vm in self.vms]
- port_macs = []
- for _index, net in enumerate(self.nets):
- vm_mac_map = {port['device_id']: port['mac_address'] for port in self.ports[net['id']]}
- port_macs.append([vm_mac_map[vm_id] for vm_id in vm_ids])
- return port_macs
-
- def setup(self):
- super(PVPStageClient, self).setup()
- self._setup_resources()
-
- # Create two networks
- nets = self.config.internal_networks
- self.nets.extend([self._create_net(**n) for n in [nets.left, nets.right]])
-
- az_list = self.comp.get_enabled_az_host_list(required_count=1)
- if not az_list:
- raise Exception('Not enough hosts found.')
-
- az = az_list[0]
- self.compute_nodes.add(az)
- for chain_index in xrange(self.config.service_chain_count):
- name = self.config.loop_vm_name + str(chain_index)
- reusable_vm = self.get_reusable_vm(name, self.nets, az)
- if reusable_vm:
- self.vms.append(reusable_vm)
- else:
- vnic_type = 'direct' if self.config.sriov else 'normal'
- ports = [self._create_port(net, vnic_type) for net in self.nets]
- config_file = self.get_config_file(chain_index,
- self.config.generator_config.src_device.mac,
- self.config.generator_config.dst_device.mac,
- ports[0]['mac_address'],
- ports[1]['mac_address'])
- self.created_ports.extend(ports)
- self.vms.append(self._create_server(name, ports, az, config_file))
- self._ensure_vms_active()
- self.set_ports()
-
-
-class PVVPStageClient(BasicStageClient):
- def get_end_port_macs(self):
- port_macs = []
- for index, net in enumerate(self.nets[:2]):
- vm_ids = [vm.id for vm in self.vms[index::2]]
- vm_mac_map = {port['device_id']: port['mac_address'] for port in self.ports[net['id']]}
- port_macs.append([vm_mac_map[vm_id] for vm_id in vm_ids])
- return port_macs
-
- def setup(self):
- super(PVVPStageClient, self).setup()
- self._setup_resources()
-
- # Create two networks
- nets = self.config.internal_networks
- self.nets.extend([self._create_net(**n) for n in [nets.left, nets.right, nets.middle]])
-
- required_count = 2 if self.config.inter_node else 1
- az_list = self.comp.get_enabled_az_host_list(required_count=required_count)
-
- if not az_list:
- raise Exception('Not enough hosts found.')
-
- az1 = az2 = az_list[0]
- if self.config.inter_node:
- if len(az_list) > 1:
- az1 = az_list[0]
- az2 = az_list[1]
- else:
- # fallback to intra-node
- az1 = az2 = az_list[0]
- self.config.inter_node = False
- LOG.info('Using intra-node instead of inter-node.')
-
- self.compute_nodes.add(az1)
- self.compute_nodes.add(az2)
-
- # Create loop VMs
- for chain_index in xrange(self.config.service_chain_count):
- name0 = self.config.loop_vm_name + str(chain_index) + 'a'
- # Attach first VM to net0 and net2
- vm0_nets = self.nets[0::2]
- reusable_vm0 = self.get_reusable_vm(name0, vm0_nets, az1)
-
- name1 = self.config.loop_vm_name + str(chain_index) + 'b'
- # Attach second VM to net1 and net2
- vm1_nets = self.nets[1:]
- reusable_vm1 = self.get_reusable_vm(name1, vm1_nets, az2)
-
- if reusable_vm0 and reusable_vm1:
- self.vms.extend([reusable_vm0, reusable_vm1])
- else:
- edge_vnic_type = 'direct' if self.config.sriov else 'normal'
- middle_vnic_type = 'direct' \
- if self.config.sriov and self.config.use_sriov_middle_net \
- else 'normal'
- vm0_port_net0 = self._create_port(vm0_nets[0], edge_vnic_type)
- vm0_port_net2 = self._create_port(vm0_nets[1], middle_vnic_type)
-
- vm1_port_net2 = self._create_port(vm1_nets[1], middle_vnic_type)
- vm1_port_net1 = self._create_port(vm1_nets[0], edge_vnic_type)
-
- self.created_ports.extend([vm0_port_net0,
- vm0_port_net2,
- vm1_port_net2,
- vm1_port_net1])
-
- # order of ports is important for sections below
- # order of MAC addresses needs to follow order of interfaces
- # TG0 (net0) -> VM0 (net2) -> VM1 (net2) -> TG1 (net1)
- config_file0 = self.get_config_file(chain_index,
- self.config.generator_config.src_device.mac,
- vm1_port_net2['mac_address'],
- vm0_port_net0['mac_address'],
- vm0_port_net2['mac_address'])
- config_file1 = self.get_config_file(chain_index,
- vm0_port_net2['mac_address'],
- self.config.generator_config.dst_device.mac,
- vm1_port_net2['mac_address'],
- vm1_port_net1['mac_address'])
-
- self.vms.append(self._create_server(name0,
- [vm0_port_net0, vm0_port_net2],
- az1,
- config_file0))
- self.vms.append(self._create_server(name1,
- [vm1_port_net2, vm1_port_net1],
- az2,
- config_file1))
-
- self._ensure_vms_active()
- self.set_ports()
diff --git a/nfvbench/chain_managers.py b/nfvbench/chain_managers.py
deleted file mode 100644
index ab340bf..0000000
--- a/nfvbench/chain_managers.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-import time
-
-from log import LOG
-from network import Network
-from packet_analyzer import PacketAnalyzer
-from specs import ChainType
-from stats_collector import IntervalCollector
-
-
-class StageManager(object):
- """A class to stage resources in the systenm under test."""
-
- def __init__(self, config, cred, factory):
- self.config = config
- self.client = None
- # conditions due to EXT chain special cases
- if (config.vlan_tagging and not config.vlans) or not config.no_int_config:
- VM_CLASS = factory.get_stage_class(config.service_chain)
- self.client = VM_CLASS(config, cred)
- self.client.setup()
-
- def get_vlans(self):
- return self.client.get_vlans() if self.client else []
-
- def get_host_ips(self):
- return self.client.get_host_ips()
-
- def get_networks_uuids(self):
- return self.client.get_networks_uuids()
-
- def disable_port_security(self):
- self.client.disable_port_security()
-
- def get_vms(self):
- return self.client.vms
-
- def get_nets(self):
- return self.client.nets
-
- def get_ports(self):
- return self.client.ports
-
- def get_compute_nodes(self):
- return self.client.compute_nodes
-
- def set_vm_macs(self):
- if self.client and self.config.service_chain != ChainType.EXT:
- self.config.generator_config.set_vm_mac_list(self.client.get_end_port_macs())
-
- def close(self):
- if not self.config.no_cleanup and self.client:
- self.client.dispose()
-
-
-class PVPStatsManager(object):
- """A class to generate traffic and extract results for PVP chains."""
-
- def __init__(self, config, clients, specs, factory, vlans, notifier=None):
- self.config = config
- self.clients = clients
- self.specs = specs
- self.notifier = notifier
- self.interval_collector = None
- self.vlans = vlans
- self.factory = factory
- self._setup()
-
- def set_vlan_tag(self, device, vlan):
- self.worker.set_vlan_tag(device, vlan)
-
- def _setup(self):
- WORKER_CLASS = self.factory.get_chain_worker(self.specs.openstack.encaps,
- self.config.service_chain)
- self.worker = WORKER_CLASS(self.config, self.clients, self.specs)
- try:
- self.worker.set_vlans(self.vlans)
- self._config_interfaces()
- except Exception as exc:
- # since the wrorker is up and running, we need to close it
- # in case of exception
- self.close()
- raise exc
-
- def _get_data(self):
- return self.worker.get_data() if self.worker else {}
-
- def _get_network(self, traffic_port, stats, reverse=False):
- """Get the Network object corresponding to a given TG port.
-
- :param traffic_port: must be either 0 or 1
- :param stats: TG stats for given traffic port
- :param reverse: specifies if the interface list for this network
- should go from TG to loopback point (reverse=false) or
- from loopback point to TG (reverse=true)
- """
- # build the interface list in fwd direction (TG To loopback point)
- interfaces = [self.clients['traffic'].get_interface(traffic_port, stats)]
- if self.worker:
- # if available,
- # interfaces for workers must be aligned on the TG port number
- interfaces.extend(self.worker.get_network_interfaces(traffic_port))
- # let Network reverse the interface order if needed
- return Network(interfaces, reverse)
-
- def _config_interfaces(self):
- if self.config.service_chain != ChainType.EXT:
- self.clients['vm'].disable_port_security()
-
- self.worker.config_interfaces()
-
- def _generate_traffic(self):
- if self.config.no_traffic:
- return {}
-
- self.interval_collector = IntervalCollector(time.time())
- self.interval_collector.attach_notifier(self.notifier)
- LOG.info('Starting to generate traffic...')
- stats = {}
- for stats in self.clients['traffic'].run_traffic():
- self.interval_collector.add(stats)
-
- LOG.info('...traffic generating ended.')
- return stats
-
- def get_stats(self):
- return self.interval_collector.get() if self.interval_collector else []
-
- def get_version(self):
- return self.worker.get_version() if self.worker else {}
-
- def run(self):
- """Run analysis in both direction and return the analysis."""
- if self.worker:
- self.worker.run()
-
- stats = self._generate_traffic()
- result = {
- 'raw_data': self._get_data(),
- 'packet_analysis': {},
- 'stats': stats
- }
-
- # fetch latest stats from traffic gen
- stats = self.clients['traffic'].get_stats()
- LOG.info('Requesting packet analysis on the forward direction...')
- result['packet_analysis']['direction-forward'] = \
- self.get_analysis([self._get_network(0, stats),
- self._get_network(1, stats, reverse=True)])
- LOG.info('Packet analysis on the forward direction completed')
-
- LOG.info('Requesting packet analysis on the reverse direction...')
- result['packet_analysis']['direction-reverse'] = \
- self.get_analysis([self._get_network(1, stats),
- self._get_network(0, stats, reverse=True)])
-
- LOG.info('Packet analysis on the reverse direction completed')
- return result
-
- def get_compute_nodes_bios(self):
- return self.worker.get_compute_nodes_bios() if self.worker else {}
-
- @staticmethod
- def get_analysis(nets):
- LOG.info('Starting traffic analysis...')
-
- packet_analyzer = PacketAnalyzer()
- # Traffic types are assumed to always alternate in every chain. Add a no stats interface in
- # between if that is not the case.
- tx = True
- for network in nets:
- for interface in network.get_interfaces():
- packet_analyzer.record(interface, 'tx' if tx else 'rx')
- tx = not tx
-
- LOG.info('...traffic analysis completed')
- return packet_analyzer.get_analysis()
-
- def close(self):
- if self.worker:
- self.worker.close()
-
-
-class PVVPStatsManager(PVPStatsManager):
- """A Class to generate traffic and extract results for PVVP chains."""
-
- def __init__(self, config, clients, specs, factory, vlans, notifier=None):
- PVPStatsManager.__init__(self, config, clients, specs, factory, vlans, notifier)
-
- def run(self):
- """Run analysis in both direction and return the analysis."""
- fwd_v2v_net, rev_v2v_net = self.worker.run()
-
- stats = self._generate_traffic()
- result = {
- 'raw_data': self._get_data(),
- 'packet_analysis': {},
- 'stats': stats
- }
- # fetch latest stats from traffic gen
- stats = self.clients['traffic'].get_stats()
- fwd_nets = [self._get_network(0, stats)]
- if fwd_v2v_net:
- fwd_nets.append(fwd_v2v_net)
- fwd_nets.append(self._get_network(1, stats, reverse=True))
-
- rev_nets = [self._get_network(1, stats)]
- if rev_v2v_net:
- rev_nets.append(rev_v2v_net)
- rev_nets.append(self._get_network(0, stats, reverse=True))
-
- LOG.info('Requesting packet analysis on the forward direction...')
- result['packet_analysis']['direction-forward'] = self.get_analysis(fwd_nets)
- LOG.info('Packet analysis on the forward direction completed')
-
- LOG.info('Requesting packet analysis on the reverse direction...')
- result['packet_analysis']['direction-reverse'] = self.get_analysis(rev_nets)
-
- LOG.info('Packet analysis on the reverse direction completed')
- return result
-
-
-class EXTStatsManager(PVPStatsManager):
- """A Class to generate traffic and extract results for EXT chains."""
-
- def __init__(self, config, clients, specs, factory, vlans, notifier=None):
- PVPStatsManager.__init__(self, config, clients, specs, factory, vlans, notifier)
-
- def _setup(self):
- if self.specs.openstack:
- WORKER_CLASS = self.factory.get_chain_worker(self.specs.openstack.encaps,
- self.config.service_chain)
- self.worker = WORKER_CLASS(self.config, self.clients, self.specs)
- self.worker.set_vlans(self.vlans)
-
- if not self.config.no_int_config:
- self._config_interfaces()
- else:
- self.worker = None
diff --git a/nfvbench/chain_router.py b/nfvbench/chain_router.py
new file mode 100644
index 0000000..99114e0
--- /dev/null
+++ b/nfvbench/chain_router.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# Copyright 2018 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+# This module takes care of chaining routers
+#
+"""NFVBENCH CHAIN DISCOVERY/STAGING.
+
+This module takes care of staging/discovering resources that are participating in a
+L3 benchmarking session: routers, networks, ports, routes.
+If a resource is discovered with the same name, it will be reused.
+Otherwise it will be created.
+
+Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
+Configuration parameters that will influence how these resources are staged/related:
+- openstack or no openstack
+- chain type
+- number of chains
+- number of VNF in each chain (PVP, PVVP)
+- SRIOV and middle port SRIOV for port types
+- whether networks are shared across chains or not
+
+There is not traffic generation involved in this module.
+"""
+import time
+
+from netaddr import IPAddress
+from netaddr import IPNetwork
+
+from .log import LOG
+
+
+class ChainException(Exception):
+ """Exception while operating the chains."""
+
+class ChainRouter(object):
+ """Could be a shared router across all chains or a chain private router."""
+
+ def __init__(self, manager, name, subnets, routes):
+ """Create a router for given chain."""
+ self.manager = manager
+ self.subnets = subnets
+ self.routes = routes
+ self.name = name
+ self.ports = [None, None]
+ self.reuse = False
+ self.router = None
+ try:
+ self._setup()
+ except Exception:
+ LOG.error("Error creating router %s", self.name)
+ self.delete()
+ raise
+
+ def _setup(self):
+ # Lookup if there is a matching router with same name
+ routers = self.manager.neutron_client.list_routers(name=self.name)
+
+ if routers['routers']:
+ router = routers['routers'][0]
+ # a router of same name already exists, we need to verify it has the same
+ # characteristics
+ if self.subnets:
+ for subnet in self.subnets:
+ if not self.get_router_interface(router['id'], subnet.network['subnets'][0]):
+ raise ChainException("Mismatch of 'subnet_id' for reused "
+ "router '{router}'.Router has no subnet id '{sub_id}'."
+ .format(router=self.name,
+ sub_id=subnet.network['subnets'][0]))
+ interfaces = self.manager.neutron_client.list_ports(device_id=router['id'])['ports']
+ # This string filters nfvbench networks in case when some other specific networks
+ # created and attached to the test nfvebnch router manually or automatically
+ # like in case of HA when neutron router virtually present on several network nodes
+ interfaces = [x for x in interfaces if x['fixed_ips'][0]['subnet_id'] in
+ [s.network['subnets'][0] for s in self.subnets]]
+ for interface in interfaces:
+ if self.is_ip_in_network(
+ interface['fixed_ips'][0]['ip_address'],
+ self.manager.config.traffic_generator.tg_gateway_ip_cidrs[0]) \
+ or self.is_ip_in_network(
+ interface['fixed_ips'][0]['ip_address'],
+ self.manager.config.traffic_generator.tg_gateway_ip_cidrs[1]):
+ self.ports[0] = interface
+ else:
+ self.ports[1] = interface
+ if self.routes:
+ for route in self.routes:
+ if route not in router['routes']:
+ LOG.info("Mismatch of 'router' for reused router '%s'."
+ "Router has no existing route destination '%s', "
+ "and nexthop '%s'.", self.name,
+ route['destination'],
+ route['nexthop'])
+ LOG.info("New route added to router %s for reused ", self.name)
+ body = {
+ 'router': {
+ 'routes': self.routes
+ }
+ }
+ self.manager.neutron_client.update_router(router['id'], body)
+
+ LOG.info('Reusing existing router: %s', self.name)
+ self.reuse = True
+ self.router = router
+ return
+
+ body = {
+ 'router': {
+ 'name': self.name,
+ 'admin_state_up': True
+ }
+ }
+ router = self.manager.neutron_client.create_router(body)['router']
+ router_id = router['id']
+
+ if self.subnets:
+ for subnet in self.subnets:
+ router_interface = {'subnet_id': subnet.network['subnets'][0]}
+ self.manager.neutron_client.add_interface_router(router_id, router_interface)
+ interfaces = self.manager.neutron_client.list_ports(device_id=router_id)['ports']
+ interfaces = [x for x in interfaces if x['fixed_ips'][0]['subnet_id'] in
+ [s.network['subnets'][0] for s in self.subnets]]
+ for interface in interfaces:
+ itf = interface['fixed_ips'][0]['ip_address']
+ cidr0 = self.manager.config.traffic_generator.tg_gateway_ip_cidrs[0]
+ cidr1 = self.manager.config.traffic_generator.tg_gateway_ip_cidrs[1]
+ if self.is_ip_in_network(itf, cidr0) or self.is_ip_in_network(itf, cidr1):
+ self.ports[0] = interface
+ else:
+ self.ports[1] = interface
+
+ if self.routes:
+ body = {
+ 'router': {
+ 'routes': self.routes
+ }
+ }
+ self.manager.neutron_client.update_router(router_id, body)
+
+ LOG.info('Created router: %s.', self.name)
+ self.router = self.manager.neutron_client.show_router(router_id)
+
+ def get_uuid(self):
+ """
+ Extract UUID of this router.
+
+ :return: UUID of this router
+ """
+ return self.router['id']
+
+ def get_router_interface(self, router_id, subnet_id):
+ interfaces = self.manager.neutron_client.list_ports(device_id=router_id)['ports']
+ matching_interface = None
+ for interface in interfaces:
+ if interface['fixed_ips'][0]['subnet_id'] == subnet_id:
+ matching_interface = interface
+ return matching_interface
+
+ def is_ip_in_network(self, interface_ip, cidr):
+ return IPAddress(interface_ip) in IPNetwork(cidr)
+
+ def delete(self):
+ """Delete this router."""
+ if not self.reuse and self.router:
+ retry = 0
+ while retry < self.manager.config.generic_retry_count:
+ try:
+ self.manager.neutron_client.delete_router(self.router['id'])
+ LOG.info("Deleted router: %s", self.name)
+ return
+ except Exception:
+ retry += 1
+ LOG.info('Error deleting router %s (retry %d/%d)...',
+ self.name,
+ retry,
+ self.manager.config.generic_retry_count)
+ time.sleep(self.manager.config.generic_poll_sec)
+ LOG.error('Unable to delete router: %s', self.name)
diff --git a/nfvbench/chain_runner.py b/nfvbench/chain_runner.py
index 63cc48f..f045528 100644
--- a/nfvbench/chain_runner.py
+++ b/nfvbench/chain_runner.py
@@ -13,71 +13,217 @@
# License for the specific language governing permissions and limitations
# under the License.
#
+"""This module takes care of coordinating a benchmark run between various modules.
-import traceback
+The ChainRunner class is in charge of coordinating:
+- the chain manager which takes care of staging resources
+- traffic generator client which drives the traffic generator
+- the stats manager which collects and aggregates stats
+"""
-from log import LOG
-from service_chain import ServiceChain
-from traffic_client import TrafficClient
+from collections import OrderedDict
+
+from .chaining import ChainManager
+from .log import LOG
+from .specs import ChainType
+from .stats_manager import StatsManager
+from .traffic_client import TrafficClient
class ChainRunner(object):
"""Run selected chain, collect results and analyse them."""
- def __init__(self, config, clients, cred, specs, factory, notifier=None):
+ def __init__(self, config, cred, specs, factory, notifier=None):
+ """Create a new instance of chain runner.
+
+ Create dependent components
+ A new instance is created everytime the nfvbench config may have changed.
+
+ config: the new nfvbench config to use for this run
+ cred: openstack credentials (or None if no openstack)
+ specs: TBD
+ factory:
+ notifier:
+ """
self.config = config
- self.clients = clients
+ self.cred = cred
self.specs = specs
self.factory = factory
+ self.notifier = notifier
self.chain_name = self.config.service_chain
- try:
- TORClass = factory.get_tor_class(self.config.tor.type, self.config.no_tor_access)
- except AttributeError:
- raise Exception("Requested TOR class '{}' was not found.".format(self.config.tor.type))
-
- self.clients['tor'] = TORClass(self.config.tor.switches)
- self.clients['traffic'] = TrafficClient(config, notifier)
- self.chain = ServiceChain(config, clients, cred, specs, factory, notifier)
+ # get an instance of traffic client
+ self.traffic_client = TrafficClient(config, notifier)
+
+ if self.config.no_traffic:
+ LOG.info('Dry run: traffic generation is disabled')
+ else:
+ # Start the traffic generator server
+ self.traffic_client.start_traffic_generator()
+
+ # get an instance of a chain manager
+ self.chain_manager = ChainManager(self)
+
+ # at this point all resources are setup/discovered
+ # we need to program the traffic dest MAC and VLANs
+ gen_config = self.traffic_client.generator_config
+ if config.vlan_tagging:
+ # VLAN is discovered from the networks
+ gen_config.set_vlans(0, self.chain_manager.get_chain_vlans(0))
+ gen_config.set_vlans(1, self.chain_manager.get_chain_vlans(1))
+ else:
+ LOG.info("Ports: untagged")
+
+ # the only case we do not need to set the dest MAC is in the case of
+ # l2-loopback (because the traffic gen will default to use the peer MAC)
+ # or EXT+ARP+VLAN (because dest MAC will be discovered by TRex ARP)
+ # Note that in the case of EXT+ARP+VxLAN, the dest MACs need to be loaded
+ # because ARP only operates on the dest VTEP IP not on the VM dest MAC
+ if not config.l2_loopback and \
+ (config.service_chain != ChainType.EXT or config.no_arp or config.vxlan):
+ gen_config.set_dest_macs(0, self.chain_manager.get_dest_macs(0))
+ gen_config.set_dest_macs(1, self.chain_manager.get_dest_macs(1))
+
+ if config.vxlan:
+ # VXLAN is discovered from the networks
+ vtep_vlan = gen_config.gen_config.vtep_vlan
+ src_vteps = gen_config.gen_config.src_vteps
+ dst_vtep = gen_config.gen_config.dst_vtep
+ gen_config.set_vxlans(0, self.chain_manager.get_chain_vxlans(0))
+ gen_config.set_vxlans(1, self.chain_manager.get_chain_vxlans(1))
+ gen_config.set_vtep_vlan(0, vtep_vlan)
+ gen_config.set_vtep_vlan(1, vtep_vlan)
+ # Configuring source an remote VTEPs on TREx interfaces
+ gen_config.set_vxlan_endpoints(0, src_vteps[0], dst_vtep)
+ gen_config.set_vxlan_endpoints(1, src_vteps[1], dst_vtep)
+ self.config['vxlan_gen_config'] = gen_config
+
+ if config.mpls:
+ # MPLS VPN is discovered from the networks
+ src_vteps = gen_config.gen_config.src_vteps
+ vtep_gateway_ips = gen_config.gen_config.vtep_gateway_ips
+ gen_config.set_mpls_inner_labels(0, self.chain_manager.get_chain_mpls_inner_labels(0))
+ gen_config.set_mpls_inner_labels(1, self.chain_manager.get_chain_mpls_inner_labels(1))
+ outer_mpls_labels_left = self.config.internal_networks.left.mpls_transport_labels
+ outer_mpls_labels_right = self.config.internal_networks.right.mpls_transport_labels
+ if outer_mpls_labels_left or outer_mpls_labels_right:
+ gen_config.set_mpls_outer_labels(0, outer_mpls_labels_left)
+ gen_config.set_mpls_outer_labels(1, outer_mpls_labels_right)
+ # Configuring source an remote VTEPs on TREx interfaces
+ gen_config.set_mpls_peers(0, src_vteps[0], vtep_gateway_ips[0])
+ gen_config.set_mpls_peers(1, src_vteps[1], vtep_gateway_ips[1])
+ self.config['mpls_gen_config'] = gen_config
+
+ # get an instance of the stats manager
+ self.stats_manager = StatsManager(self)
+ LOG.info('ChainRunner initialized')
+
+ def __setup_traffic(self):
+ # possibly skip connectivity check
+ if self.config.no_e2e_check:
+ LOG.info('Skipping end to end connectivity check')
+ return
+ self.traffic_client.setup()
+ if not self.config.no_traffic:
+ # ARP is needed for EXT chain or VxLAN overlay or MPLS unless disabled explicitly
+ if (self.config.service_chain == ChainType.EXT or self.config.mpls or
+ self.config.vxlan or self.config.l3_router or self.config.loop_vm_arp)\
+ and not self.config.no_arp:
+ self.traffic_client.ensure_arp_successful()
+ self.traffic_client.ensure_end_to_end()
+
+ def __get_result_per_frame_size(self, frame_size, bidirectional):
+ traffic_result = {
+ frame_size: {}
+ }
+ result = {}
+ if not self.config.no_traffic:
+ self.traffic_client.set_traffic(frame_size, bidirectional)
- LOG.info('ChainRunner initialized.')
+ if self.config.single_run:
+ result = self.stats_manager.run_fixed_rate()
+ else:
+ results = self.traffic_client.get_ndr_and_pdr()
+
+ for dr in ['pdr', 'ndr']:
+ if dr in results:
+ traffic_result[frame_size][dr] = results[dr]
+ if 'warning' in results[dr]['stats'] and results[dr]['stats']['warning']:
+ traffic_result['warning'] = results[dr]['stats']['warning']
+ traffic_result[frame_size]['iteration_stats'] = results['iteration_stats']
+
+ if self.config.single_run:
+ result['run_config'] = self.traffic_client.get_run_config(result)
+ required = result['run_config']['direction-total']['orig']['rate_pps']
+ if self.config.periodic_gratuitous_arp:
+ actual = result['stats']['total_tx_rate'] + self.config.gratuitous_arp_pps
+ else:
+ actual = result['stats']['total_tx_rate']
+ warning = self.traffic_client.compare_tx_rates(required, actual)
+ if warning is not None:
+ result['run_config']['warning'] = warning
+
+ traffic_result[frame_size].update(result)
+ return traffic_result
+
+ def __get_chain_result(self):
+ result = OrderedDict()
+ for fs in self.config.frame_sizes:
+ result.update(self.__get_result_per_frame_size(fs,
+ self.config.traffic.bidirectional))
+ chain_result = {
+ 'flow_count': self.config.flow_count,
+ 'service_chain_count': self.config.service_chain_count,
+ 'bidirectional': self.config.traffic.bidirectional,
+ 'profile': self.config.traffic.profile,
+ 'compute_nodes': self.stats_manager.get_compute_nodes_bios(),
+ 'result': result
+ }
+ return chain_result
def run(self):
- """
- Run a chain, collect and analyse results.
+ """Run the requested benchmark.
- :return: dictionary
+ return: the results of the benchmark as a dict
"""
- self.clients['traffic'].start_traffic_generator()
- self.clients['traffic'].set_macs()
+ results = {}
+ if self.config.no_traffic:
+ return results
+
+ LOG.info('Starting %dx%s benchmark...', self.config.service_chain_count, self.chain_name)
+ self.stats_manager.create_worker()
+ if self.config.vxlan or self.config.mpls:
+ # Configure vxlan or mpls tunnels
+ self.stats_manager.worker.config_interfaces()
+ self.__setup_traffic()
- return self.chain.run()
+ results[self.chain_name] = {'result': self.__get_chain_result()}
+
+ LOG.info("Service chain '%s' run completed.", self.chain_name)
+ return results
def close(self):
+ """Close this instance of chain runner and delete resources if applicable."""
try:
if not self.config.no_cleanup:
LOG.info('Cleaning up...')
+ if self.chain_manager:
+ self.chain_manager.delete()
else:
LOG.info('Clean up skipped.')
-
- for client in ['traffic', 'tor']:
- try:
- self.clients[client].close()
- except Exception as e:
- traceback.print_exc()
- LOG.error(e)
-
- self.chain.close()
+ try:
+ self.traffic_client.close()
+ except Exception as exc:
+ LOG.exception(exc)
+ if self.stats_manager:
+ self.stats_manager.close()
except Exception:
- traceback.print_exc()
- LOG.error('Cleanup not finished.')
+ LOG.exception('Cleanup not finished')
def get_version(self):
- versions = {
- 'Traffic Generator': self.clients['traffic'].get_version(),
- 'TOR': self.clients['tor'].get_version(),
- }
-
- versions.update(self.chain.get_version())
-
+ """Retrieve the version of dependent components."""
+ versions = {}
+ if self.traffic_client:
+ versions['Traffic_Generator'] = self.traffic_client.get_version()
+ versions.update(self.stats_manager.get_version())
return versions
diff --git a/nfvbench/chain_workers.py b/nfvbench/chain_workers.py
index 2e36fb1..e332d7b 100644
--- a/nfvbench/chain_workers.py
+++ b/nfvbench/chain_workers.py
@@ -17,37 +17,38 @@
class BasicWorker(object):
- def __init__(self, config, clients, specs):
- self.config = config
- self.clients = clients
- self.specs = specs
+ def __init__(self, stats_manager):
+ self.stats_manager = stats_manager
+ self.chain_manager = stats_manager.chain_runner.chain_manager
+ self.config = stats_manager.config
+ self.specs = stats_manager.specs
- def set_vlan_tag(self, device, vlan):
- device.set_vlan_tag(vlan)
+ def get_compute_nodes_bios(self):
+ return {}
- def set_vlans(self, vlans):
- pass
+ def get_version(self):
+ return {}
def config_interfaces(self):
- pass
-
- def get_data(self):
return {}
- def get_network_interfaces(self, index):
- return []
-
- def clear_interfaces(self):
+ def close(self):
pass
- def run(self):
- return None, None
+ def insert_interface_stats(self, pps_list):
+ """Insert interface stats to a list of packet path stats.
- def get_compute_nodes_bios(self):
- return {}
+ pps_list: a list of packet path stats instances indexed by chain index
- def get_version(self):
- return {}
+ Specialized workers can insert their own interface stats inside each existing packet path
+ stats for every chain.
+ """
- def close(self):
- pass
+ def update_interface_stats(self, diff=False):
+ """Update all interface stats.
+
+ diff: if False, simply refresh the interface stats values with latest values
+ if True, diff the interface stats with the latest values
+ Make sure that the interface stats inserted in insert_interface_stats() are updated
+ with proper values
+ """
diff --git a/nfvbench/chaining.py b/nfvbench/chaining.py
new file mode 100644
index 0000000..d6f67f9
--- /dev/null
+++ b/nfvbench/chaining.py
@@ -0,0 +1,1559 @@
+#!/usr/bin/env python
+# Copyright 2018 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+# This module takes care of chaining networks, ports and vms
+#
+"""NFVBENCH CHAIN DISCOVERY/STAGING.
+
+This module takes care of staging/discovering all resources that are participating in a
+benchmarking session: flavors, networks, ports, VNF instances.
+If a resource is discovered with the same name, it will be reused.
+Otherwise it will be created.
+
+ChainManager: manages VM image, flavor, the staging discovery of all chains
+ has 1 or more chains
+Chain: manages one chain, has 2 or more networks and 1 or more instances
+ChainNetwork: manages 1 network in a chain
+ChainVnf: manages 1 VNF instance in a chain, has 2 ports
+ChainVnfPort: manages 1 instance port
+
+ChainManager-->Chain(*)
+Chain-->ChainNetwork(*),ChainVnf(*)
+ChainVnf-->ChainVnfPort(2)
+
+Once created/discovered, instances are checked to be in the active state (ready to pass traffic)
+Configuration parameters that will influence how these resources are staged/related:
+- openstack or no openstack
+- chain type
+- number of chains
+- number of VNF in each chain (PVP, PVVP)
+- SRIOV and middle port SRIOV for port types
+- whether networks are shared across chains or not
+
+There is not traffic generation involved in this module.
+"""
+import os
+import re
+import time
+
+import glanceclient
+from neutronclient.neutron import client as neutronclient
+from novaclient.client import Client
+
+from attrdict import AttrDict
+from .chain_router import ChainRouter
+from . import compute
+from .log import LOG
+from .specs import ChainType
+# Left and right index for network and port lists
+LEFT = 0
+RIGHT = 1
+# L3 traffic edge networks are at the end of networks list
+EDGE_LEFT = -2
+EDGE_RIGHT = -1
+# Name of the VM config file
+NFVBENCH_CFG_FILENAME = 'nfvbenchvm.conf'
+# full pathame of the VM config in the VM
+NFVBENCH_CFG_VM_PATHNAME = os.path.join('/etc/', NFVBENCH_CFG_FILENAME)
+# full path of the boot shell script template file on the server where nfvbench runs
+BOOT_SCRIPT_PATHNAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'nfvbenchvm',
+ NFVBENCH_CFG_FILENAME)
+
+
+class ChainException(Exception):
+ """Exception while operating the chains."""
+
+class NetworkEncaps(object):
+ """Network encapsulation."""
+
+
+class ChainFlavor(object):
+ """Class to manage the chain flavor."""
+
+ def __init__(self, flavor_name, flavor_dict, comp):
+ """Create a flavor."""
+ self.name = flavor_name
+ self.comp = comp
+ self.flavor = self.comp.find_flavor(flavor_name)
+ self.reuse = False
+ if self.flavor:
+ self.reuse = True
+ LOG.info("Reused flavor '%s'", flavor_name)
+ else:
+ extra_specs = flavor_dict.pop('extra_specs', None)
+
+ self.flavor = comp.create_flavor(flavor_name,
+ **flavor_dict)
+
+ LOG.info("Created flavor '%s'", flavor_name)
+ if extra_specs:
+ self.flavor.set_keys(extra_specs)
+
+ def delete(self):
+ """Delete this flavor."""
+ if not self.reuse and self.flavor:
+ self.flavor.delete()
+ LOG.info("Flavor '%s' deleted", self.name)
+
+
+class ChainVnfPort(object):
+ """A port associated to one VNF in the chain."""
+
+ def __init__(self, name, vnf, chain_network, vnic_type):
+ """Create or reuse a port on a given network.
+
+ if vnf.instance is None the VNF instance is not reused and this ChainVnfPort instance must
+ create a new port.
+ Otherwise vnf.instance is a reused VNF instance and this ChainVnfPort instance must
+ find an existing port to reuse that matches the port requirements: same attached network,
+ instance, name, vnic type
+
+ name: name for this port
+ vnf: ChainVNf instance that owns this port
+ chain_network: ChainNetwork instance where this port should attach
+ vnic_type: required vnic type for this port
+ """
+ self.name = name
+ self.vnf = vnf
+ self.manager = vnf.manager
+ self.reuse = False
+ self.port = None
+ self.floating_ip = None
+ if vnf.instance:
+ # VNF instance is reused, we need to find an existing port that matches this instance
+ # and network
+ # discover ports attached to this instance
+ port_list = self.manager.get_ports_from_network(chain_network)
+ for port in port_list:
+ if port['name'] != name:
+ continue
+ if port['binding:vnic_type'] != vnic_type:
+ continue
+ if port['device_id'] == vnf.get_uuid():
+ self.port = port
+ LOG.info('Reusing existing port %s mac=%s', name, port['mac_address'])
+ break
+ else:
+ raise ChainException('Cannot find matching port')
+ else:
+ # VNF instance is not created yet, we need to create a new port
+ body = {
+ "port": {
+ 'name': name,
+ 'network_id': chain_network.get_uuid(),
+ 'binding:vnic_type': vnic_type
+ }
+ }
+ subnet_id = chain_network.get_subnet_uuid()
+ if subnet_id:
+ body['port']['fixed_ips'] = [{'subnet_id': subnet_id}]
+
+ port = self.manager.neutron_client.create_port(body)
+ self.port = port['port']
+ LOG.info('Created port %s', name)
+ try:
+ self.manager.neutron_client.update_port(self.port['id'], {
+ 'port': {
+ 'security_groups': [],
+ 'port_security_enabled': False,
+ }
+ })
+ LOG.info('Security disabled on port %s', name)
+ except Exception:
+ LOG.info('Failed to disable security on port %s (ignored)', name)
+
+ def get_mac(self):
+ """Get the MAC address for this port."""
+ return self.port['mac_address']
+
+ def get_ip(self):
+ """Get the IP address for this port."""
+ return self.port['fixed_ips'][0]['ip_address']
+
+ def set_floating_ip(self, chain_network):
+ # create and add floating ip to port
+ try:
+ self.floating_ip = self.manager.neutron_client.create_floatingip({
+ 'floatingip': {
+ 'floating_network_id': chain_network.get_uuid(),
+ 'port_id': self.port['id'],
+ 'description': 'nfvbench floating ip for port:' + self.port['name'],
+ }})['floatingip']
+ LOG.info('Floating IP %s created and associated on port %s',
+ self.floating_ip['floating_ip_address'], self.name)
+ return self.floating_ip['floating_ip_address']
+ except Exception:
+ LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
+ return self.port['fixed_ips'][0]['ip_address']
+
+ def delete(self):
+ """Delete this port instance."""
+ if self.reuse or not self.port:
+ return
+ for _ in range(0, self.manager.config.generic_retry_count):
+ try:
+ self.manager.neutron_client.delete_port(self.port['id'])
+ LOG.info("Deleted port %s", self.name)
+ if self.floating_ip:
+ self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
+ LOG.info("Deleted floating IP %s", self.floating_ip['description'])
+ return
+ except Exception:
+ time.sleep(self.manager.config.generic_poll_sec)
+ LOG.error('Unable to delete port: %s', self.name)
+
+
+class ChainNetwork(object):
+ """Could be a shared network across all chains or a chain private network."""
+
+ def __init__(self, manager, network_config, chain_id=None, lookup_only=False,
+ suffix=None):
+ """Create a network for given chain.
+
+ network_config: a dict containing the network properties
+ (name, segmentation_id and physical_network)
+ chain_id: to which chain the networks belong.
+ a None value will mean that these networks are shared by all chains
+ suffix: a suffix to add to the network name (if not None)
+ """
+ self.manager = manager
+ if chain_id is None:
+ self.name = network_config.name
+ else:
+ # the name itself can be either a string or a list of names indexed by chain ID
+ if isinstance(network_config.name, tuple):
+ self.name = network_config.name[chain_id]
+ else:
+ # network_config.name is a prefix string
+ self.name = network_config.name + str(chain_id)
+ if suffix:
+ self.name = self.name + suffix
+ self.segmentation_id = self._get_item(network_config.segmentation_id,
+ chain_id, auto_index=True)
+ self.subnet_name = self._get_item(network_config.subnet, chain_id)
+ self.physical_network = self._get_item(network_config.physical_network, chain_id)
+
+ self.reuse = False
+ self.network = None
+ self.vlan = None
+ self.router_name = None
+ if manager.config.l3_router and hasattr(network_config, 'router_name'):
+ self.router_name = network_config.router_name
+ try:
+ self._setup(network_config, lookup_only)
+ except Exception:
+ if lookup_only:
+ LOG.error("Cannot find network %s", self.name)
+ else:
+ LOG.error("Error creating network %s", self.name)
+ self.delete()
+ raise
+
+ def _get_item(self, item_field, index, auto_index=False):
+ """Retrieve an item from a list or a single value.
+
+ item_field: can be None, a tuple of a single value
+ index: if None is same as 0, else is the index for a chain
+ auto_index: if true will automatically get the final value by adding the
+ index to the base value (if full list not provided)
+
+ If the item_field is not a tuple, it is considered same as a tuple with same value at any
+ index.
+ If a list is provided, its length must be > index
+ """
+ if not item_field:
+ return None
+ if index is None:
+ index = 0
+ if isinstance(item_field, tuple):
+ try:
+ return item_field[index]
+ except IndexError:
+ raise ChainException("List %s is too short for chain index %d" %
+ (str(item_field), index)) from IndexError
+ # single value is configured
+ if auto_index:
+ return item_field + index
+ return item_field
+
+ def _setup(self, network_config, lookup_only):
+ # Lookup if there is a matching network with same name
+ networks = self.manager.neutron_client.list_networks(name=self.name)
+ if networks['networks']:
+ network = networks['networks'][0]
+ # a network of same name already exists, we need to verify it has the same
+ # characteristics
+ if self.segmentation_id:
+ if network['provider:segmentation_id'] != self.segmentation_id:
+ raise ChainException("Mismatch of 'segmentation_id' for reused "
+ "network '{net}'. Network has id '{seg_id1}', "
+ "configuration requires '{seg_id2}'."
+ .format(net=self.name,
+ seg_id1=network['provider:segmentation_id'],
+ seg_id2=self.segmentation_id))
+
+ if self.physical_network:
+ if network['provider:physical_network'] != self.physical_network:
+ raise ChainException("Mismatch of 'physical_network' for reused "
+ "network '{net}'. Network has '{phys1}', "
+ "configuration requires '{phys2}'."
+ .format(net=self.name,
+ phys1=network['provider:physical_network'],
+ phys2=self.physical_network))
+
+ LOG.info('Reusing existing network %s', self.name)
+ self.reuse = True
+ self.network = network
+ else:
+ if lookup_only:
+ raise ChainException('Network %s not found' % self.name)
+ body = {
+ 'network': {
+ 'name': self.name,
+ 'admin_state_up': True
+ }
+ }
+ if network_config.network_type:
+ body['network']['provider:network_type'] = network_config.network_type
+ if self.segmentation_id:
+ body['network']['provider:segmentation_id'] = self.segmentation_id
+ if self.physical_network:
+ body['network']['provider:physical_network'] = self.physical_network
+ self.network = self.manager.neutron_client.create_network(body)['network']
+ # create associated subnet, all subnets have the same name (which is ok since
+ # we do not need to address them directly by name)
+ body = {
+ 'subnet': {'name': network_config.subnet,
+ 'cidr': network_config.cidr,
+ 'network_id': self.network['id'],
+ 'enable_dhcp': False,
+ 'ip_version': 4,
+ 'dns_nameservers': []}
+ }
+ subnet = self.manager.neutron_client.create_subnet(body)['subnet']
+ # add subnet id to the network dict since it has just been added
+ self.network['subnets'] = [subnet['id']]
+ LOG.info('Created network: %s', self.name)
+
+ def get_uuid(self):
+ """
+ Extract UUID of this network.
+
+ :return: UUID of this network
+ """
+ return self.network['id']
+
+ def get_subnet_uuid(self):
+ """
+ Extract UUID of this subnet network.
+
+ :return: UUID of this subnet network
+ """
+ for subnet in self.network['subnets']:
+ if self.subnet_name == self.manager.neutron_client \
+ .show_subnet(subnet)['subnet']['name']:
+ return subnet
+ return None
+
+ def get_vlan(self):
+ """
+ Extract vlan for this network.
+
+ :return: vlan ID for this network
+ """
+ if self.network['provider:network_type'] != 'vlan':
+ raise ChainException('Trying to retrieve VLAN id for non VLAN network')
+ return self.network['provider:segmentation_id']
+
+ def get_vxlan(self):
+ """
+ Extract VNI for this network.
+
+ :return: VNI ID for this network
+ """
+
+ return self.network['provider:segmentation_id']
+
+ def get_mpls_inner_label(self):
+ """
+ Extract MPLS VPN Label for this network.
+
+ :return: MPLS VPN Label for this network
+ """
+
+ return self.network['provider:segmentation_id']
+
+ def delete(self):
+ """Delete this network."""
+ if not self.reuse and self.network:
+ for retry in range(0, self.manager.config.generic_retry_count):
+ try:
+ self.manager.neutron_client.delete_network(self.network['id'])
+ LOG.info("Deleted network: %s", self.name)
+ return
+ except Exception:
+ LOG.info('Error deleting network %s (retry %d/%d)...',
+ self.name,
+ retry + 1,
+ self.manager.config.generic_retry_count)
+ time.sleep(self.manager.config.generic_poll_sec)
+ LOG.error('Unable to delete network: %s', self.name)
+
+
+class ChainVnf(object):
+ """A class to represent a VNF in a chain."""
+
+ def __init__(self, chain, vnf_id, networks):
+ """Reuse a VNF instance with same characteristics or create a new VNF instance.
+
+ chain: the chain where this vnf belongs
+ vnf_id: indicates the index of this vnf in its chain (first vnf=0)
+ networks: the list of all networks (ChainNetwork) of the current chain
+ """
+ self.manager = chain.manager
+ self.chain = chain
+ self.vnf_id = vnf_id
+ self.name = self.manager.config.loop_vm_name + str(chain.chain_id)
+ if len(networks) > 2:
+ # we will have more than 1 VM in each chain
+ self.name += '-' + str(vnf_id)
+ # A list of ports for this chain
+ # There are normally 2 ports carrying traffic (index 0, and index 1) and
+ # potentially multiple idle ports not carrying traffic (index 2 and up)
+ # For example if 7 idle interfaces are requested, the corresp. ports will be
+ # at index 2 to 8
+ self.ports = []
+ self.management_port = None
+ self.routers = []
+ self.status = None
+ self.instance = None
+ self.reuse = False
+ self.host_ip = None
+ self.idle_networks = []
+ self.idle_ports = []
+ try:
+ # the vnf_id is conveniently also the starting index in networks
+ # for the left and right networks associated to this VNF
+ if self.manager.config.l3_router:
+ self._setup(networks[vnf_id:vnf_id + 4])
+ else:
+ self._setup(networks[vnf_id:vnf_id + 2])
+ except Exception:
+ LOG.error("Error creating VNF %s", self.name)
+ self.delete()
+ raise
+
+ def _get_vm_config(self, remote_mac_pair):
+ config = self.manager.config
+ devices = self.manager.generator_config.devices
+
+ if config.l3_router:
+ tg_gateway1_ip = self.routers[LEFT].ports[1]['fixed_ips'][0][
+ 'ip_address'] # router edge ip left
+ tg_gateway2_ip = self.routers[RIGHT].ports[1]['fixed_ips'][0][
+ 'ip_address'] # router edge ip right
+ tg_mac1 = self.routers[LEFT].ports[1]['mac_address'] # router edge mac left
+ tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
+ # edge cidr mask left
+ vnf_gateway1_cidr = \
+ self.ports[LEFT].get_ip() + self.__get_network_mask(
+ self.manager.config.edge_networks.left.cidr)
+ # edge cidr mask right
+ vnf_gateway2_cidr = \
+ self.ports[RIGHT].get_ip() + self.__get_network_mask(
+ self.manager.config.edge_networks.right.cidr)
+ if config.vm_forwarder != 'vpp':
+ raise ChainException(
+ 'L3 router mode imply to set VPP as VM forwarder.'
+ 'Please update your config file with: vm_forwarder: vpp')
+ else:
+ tg_gateway1_ip = devices[LEFT].tg_gateway_ip_addrs
+ tg_gateway2_ip = devices[RIGHT].tg_gateway_ip_addrs
+ if not config.loop_vm_arp:
+ tg_mac1 = remote_mac_pair[0]
+ tg_mac2 = remote_mac_pair[1]
+ else:
+ tg_mac1 = ""
+ tg_mac2 = ""
+
+ g1cidr = devices[LEFT].get_gw_ip(
+ self.chain.chain_id) + self.__get_network_mask(
+ self.manager.config.internal_networks.left.cidr)
+ g2cidr = devices[RIGHT].get_gw_ip(
+ self.chain.chain_id) + self.__get_network_mask(
+ self.manager.config.internal_networks.right.cidr)
+
+ vnf_gateway1_cidr = g1cidr
+ vnf_gateway2_cidr = g2cidr
+
+ with open(BOOT_SCRIPT_PATHNAME, 'r', encoding="utf-8") as boot_script:
+ content = boot_script.read()
+ vm_config = {
+ 'forwarder': config.vm_forwarder,
+ 'intf_mac1': self.ports[LEFT].get_mac(),
+ 'intf_mac2': self.ports[RIGHT].get_mac(),
+ 'tg_gateway1_ip': tg_gateway1_ip,
+ 'tg_gateway2_ip': tg_gateway2_ip,
+ 'tg_net1': devices[LEFT].ip_addrs,
+ 'tg_net2': devices[RIGHT].ip_addrs,
+ 'vnf_gateway1_cidr': vnf_gateway1_cidr,
+ 'vnf_gateway2_cidr': vnf_gateway2_cidr,
+ 'tg_mac1': tg_mac1,
+ 'tg_mac2': tg_mac2,
+ 'vif_mq_size': config.vif_multiqueue_size,
+ 'num_mbufs': config.num_mbufs
+ }
+ if self.manager.config.use_management_port:
+ mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
+ mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
+ vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
+ vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
+ vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
+ else:
+ # Interface management config left empty to avoid error in VM spawn
+ # if nfvbench config has values for management network but use_management_port=false
+ vm_config['intf_mgmt_cidr'] = ''
+ vm_config['intf_mgmt_ip_gw'] = ''
+ vm_config['intf_mac_mgmt'] = ''
+ return content.format(**vm_config)
+
+ @staticmethod
+ def __get_network_mask(network):
+ return '/' + network.split('/')[1]
+
+ def _get_vnic_type(self, port_index):
+ """Get the right vnic type for given port indexself.
+
+ If SR-IOV is specified, middle ports in multi-VNF chains
+ can use vswitch or SR-IOV based on config.use_sriov_middle_net
+ """
+ if self.manager.config.sriov:
+ chain_length = self.chain.get_length()
+ if self.manager.config.use_sriov_middle_net or chain_length == 1:
+ return 'direct'
+ if self.vnf_id == 0 and port_index == 0:
+ # first VNF in chain must use sriov for left port
+ return 'direct'
+ if (self.vnf_id == chain_length - 1) and (port_index == 1):
+ # last VNF in chain must use sriov for right port
+ return 'direct'
+ return 'normal'
+
+ def _get_idle_networks_ports(self):
+ """Get the idle networks for PVP or PVVP chain (non shared net only)
+
+ For EXT packet path or shared net, returns empty list.
+ For PVP, PVVP these networks will be created if they do not exist.
+ chain_id: to which chain the networks belong.
+ a None value will mean that these networks are shared by all chains
+ """
+ networks = []
+ ports = []
+ config = self.manager.config
+ chain_id = self.chain.chain_id
+ idle_interfaces_per_vm = config.idle_interfaces_per_vm
+ if config.service_chain == ChainType.EXT or chain_id is None or \
+ idle_interfaces_per_vm == 0:
+ return
+
+ # Make a copy of the idle networks dict as we may have to modify the
+ # segmentation ID
+ idle_network_cfg = AttrDict(config.idle_networks)
+ if idle_network_cfg.segmentation_id:
+ segmentation_id = idle_network_cfg.segmentation_id + \
+ chain_id * idle_interfaces_per_vm
+ else:
+ segmentation_id = None
+ try:
+ # create as many idle networks and ports as requested
+ for idle_index in range(idle_interfaces_per_vm):
+ if config.service_chain == ChainType.PVP:
+ suffix = '.%d' % (idle_index)
+ else:
+ suffix = '.%d.%d' % (self.vnf_id, idle_index)
+ port_name = self.name + '-idle' + str(idle_index)
+ # update the segmentation id based on chain id and idle index
+ if segmentation_id:
+ idle_network_cfg.segmentation_id = segmentation_id + idle_index
+ port_name = port_name + "." + str(segmentation_id)
+
+ networks.append(ChainNetwork(self.manager,
+ idle_network_cfg,
+ chain_id,
+ suffix=suffix))
+ ports.append(ChainVnfPort(port_name,
+ self,
+ networks[idle_index],
+ 'normal'))
+ except Exception:
+ # need to cleanup all successful networks
+ for net in networks:
+ net.delete()
+ for port in ports:
+ port.delete()
+ raise
+ self.idle_networks = networks
+ self.idle_ports = ports
+
+ def _setup(self, networks):
+ flavor_id = self.manager.flavor.flavor.id
+ # Check if we can reuse an instance with same name
+ for instance in self.manager.existing_instances:
+ if instance.name == self.name:
+ instance_left = LEFT
+ instance_right = RIGHT
+ # In case of L3 traffic instance use edge networks
+ if self.manager.config.l3_router:
+ instance_left = EDGE_LEFT
+ instance_right = EDGE_RIGHT
+ # Verify that other instance characteristics match
+ if instance.flavor['id'] != flavor_id:
+ self._reuse_exception('Flavor mismatch')
+ if instance.status != "ACTIVE":
+ self._reuse_exception('Matching instance is not in ACTIVE state')
+ # The 2 networks for this instance must also be reused
+ if not networks[instance_left].reuse:
+ self._reuse_exception('network %s is new' % networks[instance_left].name)
+ if not networks[instance_right].reuse:
+ self._reuse_exception('network %s is new' % networks[instance_right].name)
+ # instance.networks have the network names as keys:
+ # {'nfvbench-rnet0': ['192.168.2.10'], 'nfvbench-lnet0': ['192.168.1.8']}
+ if networks[instance_left].name not in instance.networks:
+ self._reuse_exception('Left network mismatch')
+ if networks[instance_right].name not in instance.networks:
+ self._reuse_exception('Right network mismatch')
+
+ self.reuse = True
+ self.instance = instance
+ LOG.info('Reusing existing instance %s on %s',
+ self.name, self.get_hypervisor_name())
+ # create management port if needed
+ if self.manager.config.use_management_port:
+ self.management_port = ChainVnfPort(self.name + '-mgmt', self,
+ self.manager.management_network, 'normal')
+ ip = self.management_port.port['fixed_ips'][0]['ip_address']
+ if self.manager.config.use_floating_ip:
+ ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
+ LOG.info("Management interface will be active using IP: %s, "
+ "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
+ # create or reuse/discover 2 ports per instance
+ if self.manager.config.l3_router:
+ for index in [0, 1]:
+ self.ports.append(ChainVnfPort(self.name + '-' + str(index),
+ self,
+ networks[index + 2],
+ self._get_vnic_type(index)))
+ else:
+ for index in [0, 1]:
+ self.ports.append(ChainVnfPort(self.name + '-' + str(index),
+ self,
+ networks[index],
+ self._get_vnic_type(index)))
+
+ # create idle networks and ports only if instance is not reused
+ # if reused, we do not care about idle networks/ports
+ if not self.reuse:
+ self._get_idle_networks_ports()
+
+ # Create neutron routers for L3 traffic use case
+ if self.manager.config.l3_router and self.manager.openstack:
+ internal_nets = networks[:2]
+ if self.manager.config.service_chain == ChainType.PVP:
+ edge_nets = networks[2:]
+ else:
+ edge_nets = networks[3:]
+ subnets_left = [internal_nets[0], edge_nets[0]]
+ routes_left = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
+ 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
+ 0]},
+ {'destination': self.manager.config.traffic_generator.ip_addrs[1],
+ 'nexthop': self.ports[0].get_ip()}]
+ self.routers.append(
+ ChainRouter(self.manager, edge_nets[0].router_name, subnets_left, routes_left))
+ subnets_right = [internal_nets[1], edge_nets[1]]
+ routes_right = [{'destination': self.manager.config.traffic_generator.ip_addrs[0],
+ 'nexthop': self.ports[1].get_ip()},
+ {'destination': self.manager.config.traffic_generator.ip_addrs[1],
+ 'nexthop': self.manager.config.traffic_generator.tg_gateway_ip_addrs[
+ 1]}]
+ self.routers.append(
+ ChainRouter(self.manager, edge_nets[1].router_name, subnets_right, routes_right))
+ # Overload gateway_ips property with router ip address for ARP and traffic calls
+ self.manager.generator_config.devices[LEFT].set_gw_ip(
+ self.routers[LEFT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip left)
+ self.manager.generator_config.devices[RIGHT].set_gw_ip(
+ self.routers[RIGHT].ports[0]['fixed_ips'][0]['ip_address']) # router edge ip right)
+
+ # if no reuse, actual vm creation is deferred after all ports in the chain are created
+ # since we need to know the next mac in a multi-vnf chain
+
+ def create_vnf(self, remote_mac_pair):
+ """Create the VNF instance if it does not already exist."""
+ if self.instance is None:
+ port_ids = []
+ if self.manager.config.use_management_port:
+ port_ids.append({'port-id': self.management_port.port['id']})
+ port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
+ # add idle ports
+ for idle_port in self.idle_ports:
+ port_ids.append({'port-id': idle_port.port['id']})
+ vm_config = self._get_vm_config(remote_mac_pair)
+ az = self.manager.placer.get_required_az()
+ server = self.manager.comp.create_server(self.name,
+ self.manager.image_instance,
+ self.manager.flavor.flavor,
+ None,
+ port_ids,
+ None,
+ avail_zone=az,
+ user_data=None,
+ config_drive=True,
+ files={NFVBENCH_CFG_VM_PATHNAME: vm_config})
+ if server:
+ self.instance = server
+ if self.manager.placer.is_resolved():
+ LOG.info('Created instance %s on %s', self.name, az)
+ else:
+ # the location is undetermined at this point
+ # self.get_hypervisor_name() will return None
+ LOG.info('Created instance %s - waiting for placement resolution...', self.name)
+ # here we MUST wait until this instance is resolved otherwise subsequent
+ # VNF creation can be placed in other hypervisors!
+ config = self.manager.config
+ max_retries = int((config.check_traffic_time_sec +
+ config.generic_poll_sec - 1) / config.generic_poll_sec)
+ retry = 0
+ for retry in range(max_retries):
+ status = self.get_status()
+ if status == 'ACTIVE':
+ hyp_name = self.get_hypervisor_name()
+ LOG.info('Instance %s is active and has been placed on %s',
+ self.name, hyp_name)
+ self.manager.placer.register_full_name(hyp_name)
+ break
+ if status == 'ERROR':
+ raise ChainException('Instance %s creation error: %s' %
+ (self.name,
+ self.instance.fault['message']))
+ LOG.info('Waiting for instance %s to become active (retry %d/%d)...',
+ self.name, retry + 1, max_retries + 1)
+ time.sleep(config.generic_poll_sec)
+ else:
+ # timing out
+ LOG.error('Instance %s creation timed out', self.name)
+ raise ChainException('Instance %s creation timed out' % self.name)
+ self.reuse = False
+ else:
+ raise ChainException('Unable to create instance: %s' % (self.name))
+
+ def _reuse_exception(self, reason):
+ raise ChainException('Instance %s cannot be reused (%s)' % (self.name, reason))
+
+ def get_status(self):
+ """Get the statis of this instance."""
+ if self.instance.status != 'ACTIVE':
+ self.instance = self.manager.comp.poll_server(self.instance)
+ return self.instance.status
+
+ def get_hostname(self):
+ """Get the hypervisor host name running this VNF instance."""
+ if self.manager.is_admin:
+ hypervisor_hostname = getattr(self.instance, 'OS-EXT-SRV-ATTR:hypervisor_hostname')
+ else:
+ hypervisor_hostname = self.manager.config.hypervisor_hostname
+ if not hypervisor_hostname:
+ raise ChainException('Hypervisor hostname parameter is mandatory')
+ return hypervisor_hostname
+
+ def get_host_ip(self):
+ """Get the IP address of the host where this instance runs.
+
+ return: the IP address
+ """
+ if not self.host_ip:
+ self.host_ip = self.manager.comp.get_hypervisor(self.get_hostname()).host_ip
+ return self.host_ip
+
+ def get_hypervisor_name(self):
+ """Get hypervisor name (az:hostname) for this VNF instance."""
+ if self.instance:
+ if self.manager.is_admin:
+ az = getattr(self.instance, 'OS-EXT-AZ:availability_zone')
+ else:
+ az = self.manager.config.availability_zone
+ if not az:
+ raise ChainException('Availability zone parameter is mandatory')
+ hostname = self.get_hostname()
+ if az:
+ return az + ':' + hostname
+ return hostname
+ return None
+
+ def get_uuid(self):
+ """Get the uuid for this instance."""
+ return self.instance.id
+
+ def delete(self, forced=False):
+ """Delete this VNF instance."""
+ if self.reuse:
+ LOG.info("Instance %s not deleted (reused)", self.name)
+ else:
+ if self.instance:
+ self.manager.comp.delete_server(self.instance)
+ LOG.info("Deleted instance %s", self.name)
+ if self.manager.config.use_management_port:
+ self.management_port.delete()
+ for port in self.ports:
+ port.delete()
+ for port in self.idle_ports:
+ port.delete()
+ for network in self.idle_networks:
+ network.delete()
+
+
+class Chain(object):
+ """A class to manage a single chain.
+
+ Can handle any type of chain (EXT, PVP, PVVP)
+ """
+
+ def __init__(self, chain_id, manager):
+ """Create a new chain.
+
+ chain_id: chain index (first chain is 0)
+ manager: the chain manager that owns all chains
+ """
+ self.chain_id = chain_id
+ self.manager = manager
+ self.encaps = manager.encaps
+ self.networks = []
+ self.instances = []
+ try:
+ self.networks = manager.get_networks(chain_id)
+ # For external chain VNFs can only be discovered from their MAC addresses
+ # either from config or from ARP
+ if manager.config.service_chain != ChainType.EXT:
+ for chain_instance_index in range(self.get_length()):
+ self.instances.append(ChainVnf(self,
+ chain_instance_index,
+ self.networks))
+ # at this point new VNFs are not created yet but
+ # verify that all discovered VNFs are on the same hypervisor
+ self._check_hypervisors()
+ # now that all VNF ports are created we need to calculate the
+ # left/right remote MAC for each VNF in the chain
+ # before actually creating the VNF itself
+ rem_mac_pairs = self._get_remote_mac_pairs()
+ for instance in self.instances:
+ rem_mac_pair = rem_mac_pairs.pop(0)
+ instance.create_vnf(rem_mac_pair)
+ except Exception:
+ self.delete()
+ raise
+
+ def _check_hypervisors(self):
+ common_hypervisor = None
+ for instance in self.instances:
+ # get the full hypervizor name (az:compute)
+ hname = instance.get_hypervisor_name()
+ if hname:
+ if common_hypervisor:
+ if hname != common_hypervisor:
+ raise ChainException('Discovered instances on different hypervisors:'
+ ' %s and %s' % (hname, common_hypervisor))
+ else:
+ common_hypervisor = hname
+ if common_hypervisor:
+ # check that the common hypervisor name matchs the requested hypervisor name
+ # and set the name to be used by all future instances (if any)
+ if not self.manager.placer.register_full_name(common_hypervisor):
+ raise ChainException('Discovered hypervisor placement %s is incompatible' %
+ common_hypervisor)
+
+ def get_length(self):
+ """Get the number of VNF in the chain."""
+ # Take into account 2 edge networks for routers
+ return len(self.networks) - 3 if self.manager.config.l3_router else len(self.networks) - 1
+
+ def _get_remote_mac_pairs(self):
+ """Get the list of remote mac pairs for every VNF in the chain.
+
+ Traverse the chain from left to right and establish the
+ left/right remote MAC for each VNF in the chainself.
+
+ PVP case is simpler:
+ mac sequence: tg_src_mac, vm0-mac0, vm0-mac1, tg_dst_mac
+ must produce [[tg_src_mac, tg_dst_mac]] or looking at index in mac sequence: [[0, 3]]
+ the mac pair is what the VNF at that position (index 0) sees as next hop mac left and right
+
+ PVVP:
+ tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, tg_dst_mac
+ Must produce the following list:
+ [[tg_src_mac, vm1-mac0], [vm0-mac1, tg_dst_mac]] or index: [[0, 3], [2, 5]]
+
+ General case with 3 VMs in chain, the list of consecutive macs (left to right):
+ tg_src_mac, vm0-mac0, vm0-mac1, vm1-mac0, vm1-mac1, vm2-mac0, vm2-mac1, tg_dst_mac
+ Must produce the following list:
+ [[tg_src_mac, vm1-mac0], [vm0-mac1, vm2-mac0], [vm1-mac1, tg_dst_mac]]
+ or index: [[0, 3], [2, 5], [4, 7]]
+
+ The series pattern is pretty clear: [[n, n+3],... ] where n is multiple of 2
+ """
+ # line up all mac from left to right
+ mac_seq = [self.manager.generator_config.devices[LEFT].mac]
+ for instance in self.instances:
+ mac_seq.append(instance.ports[0].get_mac())
+ mac_seq.append(instance.ports[1].get_mac())
+ mac_seq.append(self.manager.generator_config.devices[RIGHT].mac)
+ base = 0
+ rem_mac_pairs = []
+ for _ in self.instances:
+ rem_mac_pairs.append([mac_seq[base], mac_seq[base + 3]])
+ base += 2
+ return rem_mac_pairs
+
+ def get_instances(self):
+ """Return all instances for this chain."""
+ return self.instances
+
+ def get_vlan(self, port_index):
+ """Get the VLAN id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: the vlan_id or None if there is no vlan tagging
+ """
+ # for port 1 we need to return the VLAN of the last network in the chain
+ # The networks array contains 2 networks for PVP [left, right]
+ # and 3 networks in the case of PVVP [left.middle,right]
+ if port_index:
+ # this will pick the last item in array
+ port_index = -1
+ # This string filters networks connected to TG, in case of
+ # l3-router feature we have 4 networks instead of 2
+ networks = [x for x in self.networks if not x.router_name]
+ return networks[port_index].get_vlan()
+
+ def get_vxlan(self, port_index):
+ """Get the VXLAN id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: the vxlan_id or None if there is no vxlan
+ """
+ # for port 1 we need to return the VLAN of the last network in the chain
+ # The networks array contains 2 networks for PVP [left, right]
+ # and 3 networks in the case of PVVP [left.middle,right]
+ if port_index:
+ # this will pick the last item in array
+ port_index = -1
+ return self.networks[port_index].get_vxlan()
+
+ def get_mpls_inner_label(self, port_index):
+ """Get the MPLS VPN Label on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: the mpls_label_id or None if there is no mpls
+ """
+ # for port 1 we need to return the MPLS Label of the last network in the chain
+ # The networks array contains 2 networks for PVP [left, right]
+ # and 3 networks in the case of PVVP [left.middle,right]
+ if port_index:
+ # this will pick the last item in array
+ port_index = -1
+ return self.networks[port_index].get_mpls_inner_label()
+
+ def get_dest_mac(self, port_index):
+ """Get the dest MAC on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: the dest MAC
+ """
+ if port_index:
+ # for right port, use the right port MAC of the last (right most) VNF In chain
+ return self.instances[-1].ports[1].get_mac()
+ # for left port use the left port MAC of the first (left most) VNF in chain
+ return self.instances[0].ports[0].get_mac()
+
+ def get_network_uuids(self):
+ """Get UUID of networks in this chain from left to right (order is important).
+
+ :return: list of UUIDs of networks (2 or 3 elements)
+ """
+ return [net['id'] for net in self.networks]
+
+ def get_host_ips(self):
+ """Return the IP adresss(es) of the host compute nodes used for this chain.
+
+ :return: a list of 1 or 2 IP addresses
+ """
+ return [vnf.get_host_ip() for vnf in self.instances]
+
+ def get_compute_nodes(self):
+ """Return the name of the host compute nodes used for this chain.
+
+ :return: a list of 1 host name in the az:host format
+ """
+ # Since all chains go through the same compute node(s) we can just retrieve the
+ # compute node name(s) for the first chain
+ return [vnf.get_hypervisor_name() for vnf in self.instances]
+
+ def delete(self):
+ """Delete this chain."""
+ for instance in self.instances:
+ instance.delete()
+ # only delete if these are chain private networks (not shared)
+ if not self.manager.config.service_chain_shared_net:
+ for network in self.networks:
+ network.delete()
+
+
+class InstancePlacer(object):
+ """A class to manage instance placement for all VNFs in all chains.
+
+ A full az string is made of 2 parts AZ and hypervisor.
+ The placement is resolved when both parts az and hypervisor names are known.
+ """
+
+ def __init__(self, req_az, req_hyp):
+ """Create a new instance placer.
+
+ req_az: requested AZ (can be None or empty if no preference)
+ req_hyp: requested hypervisor name (can be None of empty if no preference)
+ can be any of 'nova:', 'comp1', 'nova:comp1'
+ if it is a list, only the first item is used (backward compatibility in config)
+
+ req_az is ignored if req_hyp has an az part
+ all other parts beyond the first 2 are ignored in req_hyp
+ """
+ # if passed a list just pick the first item
+ if req_hyp and isinstance(req_hyp, list):
+ req_hyp = req_hyp[0]
+ # only pick first part of az
+ if req_az and ':' in req_az:
+ req_az = req_az.split(':')[0]
+ if req_hyp:
+ # check if requested hypervisor string has an AZ part
+ split_hyp = req_hyp.split(':')
+ if len(split_hyp) > 1:
+ # override the AZ part and hypervisor part
+ req_az = split_hyp[0]
+ req_hyp = split_hyp[1]
+ self.requested_az = req_az if req_az else ''
+ self.requested_hyp = req_hyp if req_hyp else ''
+ # Nova can accept AZ only (e.g. 'nova:', use any hypervisor in that AZ)
+ # or hypervisor only (e.g. ':comp1')
+ # or both (e.g. 'nova:comp1')
+ if req_az:
+ self.required_az = req_az + ':' + self.requested_hyp
+ else:
+ # need to insert a ':' so nova knows this is the hypervisor name
+ self.required_az = ':' + self.requested_hyp if req_hyp else ''
+ # placement is resolved when both AZ and hypervisor names are known and set
+ self.resolved = self.requested_az != '' and self.requested_hyp != ''
+
+ def get_required_az(self):
+ """Return the required az (can be resolved or not)."""
+ return self.required_az
+
+ def register_full_name(self, discovered_az):
+ """Verify compatibility and register a discovered hypervisor full name.
+
+ discovered_az: a discovered AZ in az:hypervisor format
+ return: True if discovered_az is compatible and set
+ False if discovered_az is not compatible
+ """
+ if self.resolved:
+ return discovered_az == self.required_az
+
+ # must be in full az format
+ split_daz = discovered_az.split(':')
+ if len(split_daz) != 2:
+ return False
+ if self.requested_az and self.requested_az != split_daz[0]:
+ return False
+ if self.requested_hyp and self.requested_hyp != split_daz[1]:
+ return False
+ self.required_az = discovered_az
+ self.resolved = True
+ return True
+
+ def is_resolved(self):
+ """Check if the full AZ is resolved.
+
+ return: True if resolved
+ """
+ return self.resolved
+
+
+class ChainManager(object):
+ """A class for managing all chains for a given run.
+
+ Supports openstack or no openstack.
+ Supports EXT, PVP and PVVP chains.
+ """
+
+ def __init__(self, chain_runner):
+ """Create a chain manager to take care of discovering or bringing up the requested chains.
+
+ A new instance must be created every time a new config is used.
+ config: the nfvbench config to use
+ cred: openstack credentials to use of None if there is no openstack
+ """
+ self.chain_runner = chain_runner
+ self.config = chain_runner.config
+ self.generator_config = chain_runner.traffic_client.generator_config
+ self.chains = []
+ self.image_instance = None
+ self.image_name = None
+ # Left and right networks shared across all chains (only if shared)
+ self.networks = []
+ self.encaps = None
+ self.flavor = None
+ self.comp = None
+ self.nova_client = None
+ self.neutron_client = None
+ self.glance_client = None
+ self.existing_instances = []
+ # existing ports keyed by the network uuid they belong to
+ self._existing_ports = {}
+ config = self.config
+ self.openstack = (chain_runner.cred is not None) and not config.l2_loopback
+ self.chain_count = config.service_chain_count
+ self.az = None
+ if self.openstack:
+ # openstack only
+ session = chain_runner.cred.get_session()
+ self.is_admin = chain_runner.cred.is_admin
+ self.nova_client = Client(2, session=session)
+ self.neutron_client = neutronclient.Client('2.0', session=session)
+ self.glance_client = glanceclient.Client('2', session=session)
+ self.comp = compute.Compute(self.nova_client,
+ self.glance_client,
+ config)
+ try:
+ if config.service_chain != ChainType.EXT:
+ self.placer = InstancePlacer(config.availability_zone, config.compute_nodes)
+ self._setup_image()
+ self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
+ # Get list of all existing instances to check if some instances can be reused
+ self.existing_instances = self.comp.get_server_list()
+ # If management port is requested for VMs, create management network (shared)
+ if self.config.use_management_port:
+ self.management_network = ChainNetwork(self, self.config.management_network,
+ None, False)
+ # If floating IP is used for management, create and share
+ # across chains the floating network
+ if self.config.use_floating_ip:
+ self.floating_ip_network = ChainNetwork(self,
+ self.config.floating_network,
+ None, False)
+ else:
+ # For EXT chains, the external_networks left and right fields in the config
+ # must be either a prefix string or a list of at least chain-count strings
+ self._check_extnet('left', config.external_networks.left)
+ self._check_extnet('right', config.external_networks.right)
+
+ # If networks are shared across chains, get the list of networks
+ if config.service_chain_shared_net:
+ self.networks = self.get_networks()
+ # Reuse/create chains
+ for chain_id in range(self.chain_count):
+ self.chains.append(Chain(chain_id, self))
+ if config.service_chain == ChainType.EXT:
+ # if EXT and no ARP or VxLAN we need to read dest MACs from config
+ if config.no_arp or config.vxlan:
+ self._get_dest_macs_from_config()
+ else:
+ # Make sure all instances are active before proceeding
+ self._ensure_instances_active()
+ # network API call do not show VLANS ID if not admin read from config
+ if not self.is_admin and config.vlan_tagging:
+ self._get_config_vlans()
+ except Exception:
+ self.delete()
+ raise
+ else:
+ # no openstack, no need to create chains
+ if not config.l2_loopback and config.no_arp:
+ self._get_dest_macs_from_config()
+ if config.vlan_tagging:
+ # make sure there at least as many entries as chains in each left/right list
+ if len(config.vlans) != 2:
+ raise ChainException('The config vlans property must be a list '
+ 'with 2 lists of VLAN IDs')
+ self._get_config_vlans()
+ if config.vxlan:
+ raise ChainException('VxLAN is only supported with OpenStack')
+
+ def _check_extnet(self, side, name):
+ if not name:
+ raise ChainException('external_networks.%s must contain a valid network'
+ ' name prefix or a list of network names' % side)
+ if isinstance(name, tuple) and len(name) < self.chain_count:
+ raise ChainException('external_networks.%s %s'
+ ' must have at least %d names' % (side, name, self.chain_count))
+
+ def _get_config_vlans(self):
+ re_vlan = "[0-9]*$"
+ try:
+ self.vlans = [self._check_list('vlans[0]', self.config.vlans[0], re_vlan),
+ self._check_list('vlans[1]', self.config.vlans[1], re_vlan)]
+ except IndexError:
+ raise ChainException(
+ 'vlans parameter is mandatory. Set valid value in config file') from IndexError
+
+ def _get_dest_macs_from_config(self):
+ re_mac = "[0-9a-fA-F]{2}([-:])[0-9a-fA-F]{2}(\\1[0-9a-fA-F]{2}){4}$"
+ tg_config = self.config.traffic_generator
+ self.dest_macs = [self._check_list("mac_addrs_left",
+ tg_config.mac_addrs_left, re_mac),
+ self._check_list("mac_addrs_right",
+ tg_config.mac_addrs_right, re_mac)]
+
+ def _check_list(self, list_name, ll, pattern):
+ # if it is a single int or mac, make it a list of 1 int
+ if isinstance(ll, (int, str)):
+ ll = [ll]
+ else:
+ ll = list(ll)
+ for item in ll:
+ if not re.match(pattern, str(item)):
+ raise ChainException("Invalid format '{item}' specified in {fname}"
+ .format(item=item, fname=list_name))
+ # must have at least 1 element
+ if not ll:
+ raise ChainException('%s cannot be empty' % (list_name))
+ # for shared network, if 1 element is passed, replicate it as many times
+ # as chains
+ if self.config.service_chain_shared_net and len(ll) == 1:
+ ll = [ll[0]] * self.chain_count
+
+ # number of elements musty be the number of chains
+ elif len(ll) < self.chain_count:
+ raise ChainException('%s=%s must be a list with %d elements per chain' %
+ (list_name, ll, self.chain_count))
+ return ll
+
+ def _setup_image(self):
+ # To avoid reuploading image in server mode, check whether image_name is set or not
+ if self.image_name:
+ self.image_instance = self.comp.find_image(self.image_name)
+ if self.image_instance:
+ LOG.info("Reusing image %s", self.image_name)
+ else:
+ image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
+ if self.config.vm_image_file:
+ match = re.search(image_name_search_pattern, self.config.vm_image_file)
+ if match:
+ self.image_name = match.group(1)
+ LOG.info('Using provided VM image file %s', self.config.vm_image_file)
+ else:
+ raise ChainException('Provided VM image file name %s must start with '
+ '"nfvbenchvm-<version>"' % self.config.vm_image_file)
+ else:
+ pkg_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ for f in os.listdir(pkg_root):
+ if re.search(image_name_search_pattern, f):
+ self.config.vm_image_file = pkg_root + '/' + f
+ self.image_name = f.replace('.qcow2', '')
+ LOG.info('Found built-in VM image file %s', f)
+ break
+ else:
+ raise ChainException('Cannot find any built-in VM image file.')
+ if self.image_name:
+ self.image_instance = self.comp.find_image(self.image_name)
+ if not self.image_instance:
+ LOG.info('Uploading %s', self.image_name)
+ res = self.comp.upload_image_via_url(self.image_name,
+ self.config.vm_image_file)
+
+ if not res:
+ raise ChainException('Error uploading image %s from %s. ABORTING.' %
+ (self.image_name, self.config.vm_image_file))
+ LOG.info('Image %s successfully uploaded.', self.image_name)
+ self.image_instance = self.comp.find_image(self.image_name)
+
+ # image multiqueue property must be set according to the vif_multiqueue_size
+ # config value (defaults to 1 or disabled)
+ self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
+
+ def _ensure_instances_active(self):
+ instances = []
+ for chain in self.chains:
+ instances.extend(chain.get_instances())
+ initial_instance_count = len(instances)
+ max_retries = (self.config.check_traffic_time_sec + (initial_instance_count - 1) * 10 +
+ self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
+ retry = 0
+ while instances:
+ remaining_instances = []
+ for instance in instances:
+ status = instance.get_status()
+ if status == 'ACTIVE':
+ LOG.info('Instance %s is ACTIVE on %s',
+ instance.name, instance.get_hypervisor_name())
+ continue
+ if status == 'ERROR':
+ raise ChainException('Instance %s creation error: %s' %
+ (instance.name,
+ instance.instance.fault['message']))
+ remaining_instances.append(instance)
+ if not remaining_instances:
+ break
+ retry += 1
+ if retry >= max_retries:
+ raise ChainException('Time-out: %d/%d instances still not active' %
+ (len(remaining_instances), initial_instance_count))
+ LOG.info('Waiting for %d/%d instance to become active (retry %d/%d)...',
+ len(remaining_instances), initial_instance_count,
+ retry, max_retries)
+ instances = remaining_instances
+ time.sleep(self.config.generic_poll_sec)
+ if initial_instance_count:
+ LOG.info('All instances are active')
+
+ def get_networks(self, chain_id=None):
+ """Get the networks for given EXT, PVP or PVVP chain.
+
+ For EXT packet path, these networks must pre-exist.
+ For PVP, PVVP these networks will be created if they do not exist.
+ chain_id: to which chain the networks belong.
+ a None value will mean that these networks are shared by all chains
+ """
+ if self.networks:
+ # the only case where self.networks exists is when the networks are shared
+ # across all chains
+ return self.networks
+ if self.config.service_chain == ChainType.EXT:
+ lookup_only = True
+ ext_net = self.config.external_networks
+ net_cfg = [AttrDict({'name': name,
+ 'subnet': None,
+ 'segmentation_id': None,
+ 'physical_network': None})
+ for name in [ext_net.left, ext_net.right]]
+ # segmentation id and subnet should be discovered from neutron
+ else:
+ lookup_only = False
+ int_nets = self.config.internal_networks
+ # VLAN and VxLAN
+ if self.config.service_chain == ChainType.PVP:
+ net_cfg = [int_nets.left, int_nets.right]
+ else:
+ net_cfg = [int_nets.left, int_nets.middle, int_nets.right]
+ if self.config.l3_router:
+ edge_nets = self.config.edge_networks
+ net_cfg.append(edge_nets.left)
+ net_cfg.append(edge_nets.right)
+ networks = []
+ try:
+ for cfg in net_cfg:
+ networks.append(ChainNetwork(self, cfg, chain_id, lookup_only=lookup_only))
+ except Exception:
+ # need to cleanup all successful networks prior to bailing out
+ for net in networks:
+ net.delete()
+ raise
+ return networks
+
+ def get_existing_ports(self):
+ """Get the list of existing ports.
+
+ Lazy retrieval of ports as this can be costly if there are lots of ports and
+ is only needed when VM and network are being reused.
+
+ return: a dict of list of neutron ports indexed by the network uuid they are attached to
+
+ Each port is a dict with fields such as below:
+ {'allowed_address_pairs': [], 'extra_dhcp_opts': [],
+ 'updated_at': '2018-10-06T07:15:35Z', 'device_owner': 'compute:nova',
+ 'revision_number': 10, 'port_security_enabled': False, 'binding:profile': {},
+ 'fixed_ips': [{'subnet_id': '6903a3b3-49a1-4ba4-8259-4a90e7a44b21',
+ 'ip_address': '192.168.1.4'}], 'id': '3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
+ 'security_groups': [],
+ 'binding:vif_details': {'vhostuser_socket': '/tmp/3dcb9cfa-d82a-4dd1-85a1-fd8284b52d72',
+ 'vhostuser_mode': 'server'},
+ 'binding:vif_type': 'vhostuser',
+ 'mac_address': 'fa:16:3e:3c:63:04',
+ 'project_id': '977ac76a63d7492f927fa80e86baff4c',
+ 'status': 'ACTIVE',
+ 'binding:host_id': 'a20-champagne-compute-1',
+ 'description': '',
+ 'device_id': 'a98e2ad2-5371-4aa5-a356-8264a970ce4b',
+ 'name': 'nfvbench-loop-vm0-0', 'admin_state_up': True,
+ 'network_id': '3ea5fd88-278f-4d9d-b24d-1e443791a055',
+ 'tenant_id': '977ac76a63d7492f927fa80e86baff4c',
+ 'created_at': '2018-10-06T07:15:10Z',
+ 'binding:vnic_type': 'normal'}
+ """
+ if not self._existing_ports:
+ LOG.info('Loading list of all ports...')
+ existing_ports = self.neutron_client.list_ports()['ports']
+ # place all ports in the dict keyed by the port network uuid
+ for port in existing_ports:
+ port_list = self._existing_ports.setdefault(port['network_id'], [])
+ port_list.append(port)
+ LOG.info("Loaded %d ports attached to %d networks",
+ len(existing_ports), len(self._existing_ports))
+ return self._existing_ports
+
+ def get_ports_from_network(self, chain_network):
+ """Get the list of existing ports that belong to a network.
+
+ Lazy retrieval of ports as this can be costly if there are lots of ports and
+ is only needed when VM and network are being reused.
+
+ chain_network: a ChainNetwork instance for which attached ports neeed to be retrieved
+ return: list of neutron ports attached to requested network
+ """
+ return self.get_existing_ports().get(chain_network.get_uuid(), None)
+
+ def get_hypervisor_from_mac(self, mac):
+ """Get the hypervisor that hosts a VM MAC.
+
+ mac: MAC address to look for
+ return: the hypervisor where the matching port runs or None if not found
+ """
+ # _existing_ports is a dict of list of ports indexed by network id
+ for port_list in list(self.get_existing_ports().values()):
+ for port in port_list:
+ try:
+ if port['mac_address'] == mac:
+ host_id = port['binding:host_id']
+ return self.comp.get_hypervisor(host_id)
+ except KeyError:
+ pass
+ return None
+
+ def get_host_ip_from_mac(self, mac):
+ """Get the host IP address matching a MAC.
+
+ mac: MAC address to look for
+ return: the IP address of the host where the matching port runs or None if not found
+ """
+ hypervisor = self.get_hypervisor_from_mac(mac)
+ if hypervisor:
+ return hypervisor.host_ip
+ return None
+
+ def get_chain_vlans(self, port_index):
+ """Get the list of per chain VLAN id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: a VLAN ID list indexed by the chain index or None if no vlan tagging
+ """
+ if self.chains and self.is_admin:
+ return [self.chains[chain_index].get_vlan(port_index)
+ for chain_index in range(self.chain_count)]
+ # no openstack
+ return self.vlans[port_index]
+
+ def get_chain_vxlans(self, port_index):
+ """Get the list of per chain VNIs id on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: a VNIs ID list indexed by the chain index or None if no vlan tagging
+ """
+ if self.chains and self.is_admin:
+ return [self.chains[chain_index].get_vxlan(port_index)
+ for chain_index in range(self.chain_count)]
+ # no openstack
+ raise ChainException('VxLAN is only supported with OpenStack and with admin user')
+
+ def get_chain_mpls_inner_labels(self, port_index):
+ """Get the list of per chain MPLS VPN Labels on a given port.
+
+ port_index: left port is 0, right port is 1
+ return: a MPLSs ID list indexed by the chain index or None if no mpls
+ """
+ if self.chains and self.is_admin:
+ return [self.chains[chain_index].get_mpls_inner_label(port_index)
+ for chain_index in range(self.chain_count)]
+ # no openstack
+ raise ChainException('MPLS is only supported with OpenStack and with admin user')
+
+ def get_dest_macs(self, port_index):
+ """Get the list of per chain dest MACs on a given port.
+
+ Should not be called if EXT+ARP is used (in that case the traffic gen will
+ have the ARP responses back from VNFs with the dest MAC to use).
+
+ port_index: left port is 0, right port is 1
+ return: a list of dest MACs indexed by the chain index
+ """
+ if self.chains and self.config.service_chain != ChainType.EXT:
+ return [self.chains[chain_index].get_dest_mac(port_index)
+ for chain_index in range(self.chain_count)]
+ # no openstack or EXT+no-arp
+ return self.dest_macs[port_index]
+
+ def get_host_ips(self):
+ """Return the IP adresss(es) of the host compute nodes used for this run.
+
+ :return: a list of 1 IP address
+ """
+ # Since all chains go through the same compute node(s) we can just retrieve the
+ # compute node(s) for the first chain
+ if self.chains:
+ if self.config.service_chain != ChainType.EXT:
+ return self.chains[0].get_host_ips()
+ # in the case of EXT, the compute node must be retrieved from the port
+ # associated to any of the dest MACs
+ dst_macs = self.generator_config.get_dest_macs()
+ # dest MAC on port 0, chain 0
+ dst_mac = dst_macs[0][0]
+ host_ip = self.get_host_ip_from_mac(dst_mac)
+ if host_ip:
+ LOG.info('Found compute node IP for EXT chain: %s', host_ip)
+ return [host_ip]
+ return []
+
+ def get_compute_nodes(self):
+ """Return the name of the host compute nodes used for this run.
+
+ :return: a list of 0 or 1 host name in the az:host format
+ """
+ # Since all chains go through the same compute node(s) we can just retrieve the
+ # compute node name(s) for the first chain
+ if self.chains:
+ # in the case of EXT, the compute node must be retrieved from the port
+ # associated to any of the dest MACs
+ if self.config.service_chain != ChainType.EXT:
+ return self.chains[0].get_compute_nodes()
+ # in the case of EXT, the compute node must be retrieved from the port
+ # associated to any of the dest MACs
+ dst_macs = self.generator_config.get_dest_macs()
+ # dest MAC on port 0, chain 0
+ dst_mac = dst_macs[0][0]
+ hypervisor = self.get_hypervisor_from_mac(dst_mac)
+ if hypervisor:
+ LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
+ return [':' + hypervisor.hypervisor_hostname]
+ # no openstack = no chains
+ return []
+
+ def delete(self):
+ """Delete resources for all chains."""
+ for chain in self.chains:
+ chain.delete()
+ for network in self.networks:
+ network.delete()
+ if self.config.use_management_port and hasattr(self, 'management_network'):
+ self.management_network.delete()
+ if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
+ self.floating_ip_network.delete()
+ if self.flavor:
+ self.flavor.delete()
diff --git a/nfvbench/cleanup.py b/nfvbench/cleanup.py
index 246be3f..cefdcfa 100644
--- a/nfvbench/cleanup.py
+++ b/nfvbench/cleanup.py
@@ -15,15 +15,16 @@
#
import sys
-import time
from neutronclient.neutron import client as nclient
from novaclient.client import Client
from novaclient.exceptions import NotFound
from tabulate import tabulate
-import credentials as credentials
-from log import LOG
+from . import credentials
+from .log import LOG
+from . import utils
+
class ComputeCleaner(object):
"""A cleaner for compute resources."""
@@ -35,85 +36,165 @@ class ComputeCleaner(object):
self.servers = [server for server in all_servers
if server.name.startswith(instance_prefix)]
- def instance_exists(self, server):
- try:
- self.nova_client.servers.get(server.id)
- except NotFound:
- return False
- return True
-
def get_resource_list(self):
return [["Instance", server.name, server.id] for server in self.servers]
- def clean(self):
- if self.servers:
- for server in self.servers:
- try:
- LOG.info('Deleting instance %s...', server.name)
- self.nova_client.servers.delete(server.id)
- except Exception:
- LOG.exception("Instance %s deletion failed", server.name)
- LOG.info(' Waiting for %d instances to be fully deleted...', len(self.servers))
- retry_count = 5 + len(self.servers) * 2
- while True:
- retry_count -= 1
- self.servers = [server for server in self.servers if self.instance_exists(server)]
- if not self.servers:
- break
+ def get_cleaner_code(self):
+ return "instances"
- if retry_count:
- LOG.info(' %d yet to be deleted by Nova, retries left=%d...',
- len(self.servers), retry_count)
- time.sleep(2)
- else:
- LOG.warning(' instance deletion verification timed out: %d not removed',
- len(self.servers))
- break
+ def clean_needed(self, clean_options):
+ if clean_options is None:
+ return True
+ code = self.get_cleaner_code()
+ return code[0] in clean_options
+
+ def clean(self, clean_options):
+ if self.clean_needed(clean_options):
+ if self.servers:
+ for server in self.servers:
+ utils.delete_server(self.nova_client, server)
+ utils.waiting_servers_deletion(self.nova_client, self.servers)
class NetworkCleaner(object):
"""A cleaner for network resources."""
- def __init__(self, neutron_client, network_names):
+ def __init__(self, neutron_client, network_name_prefixes):
self.neutron_client = neutron_client
LOG.info('Discovering networks...')
all_networks = self.neutron_client.list_networks()['networks']
self.networks = []
+ net_ids = []
for net in all_networks:
- try:
- network_names.remove(net['name'])
- self.networks.append(net)
- except ValueError:
- pass
- if not network_names:
- break
- net_ids = [net['id'] for net in self.networks]
+ netname = net['name']
+ for prefix in network_name_prefixes:
+ if prefix and netname.startswith(prefix):
+ self.networks.append(net)
+ net_ids.append(net['id'])
+ break
if net_ids:
LOG.info('Discovering ports...')
all_ports = self.neutron_client.list_ports()['ports']
self.ports = [port for port in all_ports if port['network_id'] in net_ids]
+ LOG.info('Discovering floating ips...')
+ all_floating_ips = self.neutron_client.list_floatingips()['floatingips']
+ self.floating_ips = [floating_ip for floating_ip in all_floating_ips if
+ floating_ip['floating_network_id'] in net_ids and "nfvbench" in
+ floating_ip['description']]
else:
self.ports = []
+ self.floating_ips = []
def get_resource_list(self):
res_list = [["Network", net['name'], net['id']] for net in self.networks]
res_list.extend([["Port", port['name'], port['id']] for port in self.ports])
+ res_list.extend(
+ [["Floating IP", floating_ip['description'], floating_ip['id']] for floating_ip in
+ self.floating_ips])
+ return res_list
+
+ def get_cleaner_code(self):
+ return "networks, ports and floating ips"
+
+ def clean_needed(self, clean_options):
+ if clean_options is None:
+ return True
+ code = self.get_cleaner_code()
+ return code[0] in clean_options
+
+ def clean(self, clean_options):
+ if self.clean_needed(clean_options):
+ for port in self.ports:
+ LOG.info("Deleting port %s...", port['id'])
+ try:
+ self.neutron_client.delete_port(port['id'])
+ except Exception:
+ LOG.exception("Port deletion failed")
+ for floating_ip in self.floating_ips:
+ LOG.info("Deleting floating ip %s...", floating_ip['id'])
+ try:
+ self.neutron_client.delete_floatingip(floating_ip['id'])
+ except Exception:
+ LOG.exception("Floating IP deletion failed")
+ # associated subnets are automatically deleted by neutron
+ for net in self.networks:
+ LOG.info("Deleting network %s...", net['name'])
+ try:
+ self.neutron_client.delete_network(net['id'])
+ except Exception:
+ LOG.exception("Network deletion failed")
+
+
+class RouterCleaner(object):
+ """A cleaner for router resources."""
+
+ def __init__(self, neutron_client, router_names):
+ self.neutron_client = neutron_client
+ LOG.info('Discovering routers...')
+ all_routers = self.neutron_client.list_routers()['routers']
+ self.routers = []
+ self.ports = []
+ self.routes = []
+ rtr_ids = []
+ for rtr in all_routers:
+ rtrname = rtr['name']
+ for name in router_names:
+ if rtrname == name:
+ self.routers.append(rtr)
+ rtr_ids.append(rtr['id'])
+
+ LOG.info('Discovering router routes for router %s...', rtr['name'])
+ all_routes = rtr['routes']
+ for route in all_routes:
+ LOG.info("destination: %s, nexthop: %s", route['destination'],
+ route['nexthop'])
+
+ LOG.info('Discovering router ports for router %s...', rtr['name'])
+ self.ports.extend(self.neutron_client.list_ports(device_id=rtr['id'])['ports'])
+ break
+
+ def get_resource_list(self):
+ res_list = [["Router", rtr['name'], rtr['id']] for rtr in self.routers]
return res_list
- def clean(self):
- for port in self.ports:
- LOG.info("Deleting port %s...", port['id'])
- try:
- self.neutron_client.delete_port(port['id'])
- except Exception:
- LOG.exception("Port deletion failed")
-
- for net in self.networks:
- LOG.info("Deleting network %s...", net['name'])
- try:
- self.neutron_client.delete_network(net['id'])
- except Exception:
- LOG.exception("Network deletion failed")
+ def get_cleaner_code(self):
+ return "router"
+
+ def clean_needed(self, clean_options):
+ if clean_options is None:
+ return True
+ code = self.get_cleaner_code()
+ return code[0] in clean_options
+
+ def clean(self, clean_options):
+ if self.clean_needed(clean_options):
+ # associated routes needs to be deleted before deleting routers
+ for rtr in self.routers:
+ LOG.info("Deleting routes for %s...", rtr['name'])
+ try:
+ body = {
+ 'router': {
+ 'routes': []
+ }
+ }
+ self.neutron_client.update_router(rtr['id'], body)
+ except Exception:
+ LOG.exception("Router routes deletion failed")
+ LOG.info("Deleting ports for %s...", rtr['name'])
+ try:
+ for port in self.ports:
+ body = {
+ 'port_id': port['id']
+ }
+ self.neutron_client.remove_interface_router(rtr['id'], body)
+ except Exception:
+ LOG.exception("Router ports deletion failed")
+ LOG.info("Deleting router %s...", rtr['name'])
+ try:
+ self.neutron_client.delete_router(rtr['id'])
+ except Exception:
+ LOG.exception("Router deletion failed")
+
class FlavorCleaner(object):
"""Cleaner for NFVbench flavor."""
@@ -131,26 +212,45 @@ class FlavorCleaner(object):
return [['Flavor', self.name, self.flavor.id]]
return None
- def clean(self):
- if self.flavor:
- LOG.info("Deleting flavor %s...", self.flavor.name)
- try:
- self.flavor.delete()
- except Exception:
- LOG.exception("Flavor deletion failed")
+ def get_cleaner_code(self):
+ return "flavor"
+
+ def clean_needed(self, clean_options):
+ if clean_options is None:
+ return True
+ code = self.get_cleaner_code()
+ return code[0] in clean_options
+
+ def clean(self, clean_options):
+ if self.clean_needed(clean_options):
+ if self.flavor:
+ LOG.info("Deleting flavor %s...", self.flavor.name)
+ try:
+ self.flavor.delete()
+ except Exception:
+ LOG.exception("Flavor deletion failed")
+
class Cleaner(object):
"""Cleaner for all NFVbench resources."""
def __init__(self, config):
- cred = credentials.Credentials(config.openrc_file, None, False)
+ cred = credentials.Credentials(config.openrc_file, config.clouds_detail, None, False)
session = cred.get_session()
self.neutron_client = nclient.Client('2.0', session=session)
self.nova_client = Client(2, session=session)
network_names = [inet['name'] for inet in config.internal_networks.values()]
+ network_names.extend([inet['name'] for inet in config.edge_networks.values()])
+ network_names.append(config.management_network['name'])
+ network_names.append(config.floating_network['name'])
+ router_names = [rtr['router_name'] for rtr in config.edge_networks.values()]
+ # add idle networks as well
+ if config.idle_networks.name:
+ network_names.append(config.idle_networks.name)
self.cleaners = [ComputeCleaner(self.nova_client, config.loop_vm_name),
FlavorCleaner(self.nova_client, config.flavor_type),
- NetworkCleaner(self.neutron_client, network_names)]
+ NetworkCleaner(self.neutron_client, network_names),
+ RouterCleaner(self.neutron_client, router_names)]
def show_resources(self):
"""Show all NFVbench resources."""
@@ -161,19 +261,45 @@ class Cleaner(object):
table.extend(res_list)
count = len(table) - 1
if count:
- LOG.info('Discovered %d NFVbench resources:', count)
- print tabulate(table, headers="firstrow", tablefmt="psql")
+ LOG.info('Discovered %d NFVbench resources:\n%s', count,
+ tabulate(table, headers="firstrow", tablefmt="psql"))
else:
LOG.info('No matching NFVbench resources found')
return count
def clean(self, prompt):
"""Clean all resources."""
- LOG.info("NFVbench will delete all resources shown...")
+ LOG.info("NFVbench will delete resources shown...")
+ clean_options = None
if prompt:
- answer = raw_input("Are you sure? (y/n) ")
+ answer = input("Do you want to delete all ressources? (y/n) ")
if answer.lower() != 'y':
- LOG.info("Exiting without deleting any resource")
- sys.exit(0)
+ print("What kind of resources do you want to delete?")
+ all_option = ""
+ all_option_codes = []
+ for cleaner in self.cleaners:
+ code = cleaner.get_cleaner_code()
+ print(("%s: %s" % (code[0], code)))
+ all_option += code[0]
+ all_option_codes.append(code)
+ print(("a: all resources - a shortcut for '%s'" % all_option))
+ all_option_codes.append("all resources")
+ print("q: quit")
+ answer_res = input(":").lower()
+ # Check only first character because answer_res can be "flavor" and it is != all
+ if answer_res[0] == "a":
+ clean_options = all_option
+ elif answer_res[0] != 'q':
+ # if user write complete code instead of shortcuts
+ # Get only first character of clean code to avoid false clean request
+ # i.e "networks and ports" and "router" have 1 letter in common and router clean
+ # will be called even if user ask for networks and ports
+ if answer_res in all_option_codes:
+ clean_options = answer_res[0]
+ else:
+ clean_options = answer_res
+ else:
+ LOG.info("Exiting without deleting any resource")
+ sys.exit(0)
for cleaner in self.cleaners:
- cleaner.clean()
+ cleaner.clean(clean_options)
diff --git a/nfvbench/compute.py b/nfvbench/compute.py
index af1a0d6..883dc28 100644
--- a/nfvbench/compute.py
+++ b/nfvbench/compute.py
@@ -11,8 +11,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Module for Openstack compute operations"""
-import os
+"""Module to interface with nova and glance."""
+
import time
import traceback
@@ -23,18 +23,23 @@ except ImportError:
from glanceclient.v1.apiclient.exceptions import NotFound as GlanceImageNotFound
import keystoneauth1
import novaclient
+from novaclient.exceptions import NotFound
-from log import LOG
+from .log import LOG
+from . import utils
class Compute(object):
- def __init__(self, nova_client, glance_client, neutron_client, config):
+ """Class to interface with nova and glance."""
+
+ def __init__(self, nova_client, glance_client, config):
+ """Create a new compute instance to interact with nova and glance."""
self.novaclient = nova_client
self.glance_client = glance_client
- self.neutronclient = neutron_client
self.config = config
def find_image(self, image_name):
+ """Find an image by name."""
try:
return next(self.glance_client.images.list(filters={'name': image_name}), None)
except (novaclient.exceptions.NotFound, keystoneauth1.exceptions.http.NotFound,
@@ -43,13 +48,11 @@ class Compute(object):
return None
def upload_image_via_url(self, final_image_name, image_file, retry_count=60):
- '''
- Directly uploads image to Nova via URL if image is not present
- '''
+ """Directly upload image to Nova via URL if image is not present."""
retry = 0
try:
# check image is file/url based.
- with open(image_file) as f_image:
+ with open(image_file, 'rb') as f_image:
img = self.glance_client.images.create(name=str(final_image_name),
disk_format="qcow2",
container_format="bare",
@@ -83,6 +86,7 @@ class Compute(object):
return True
def delete_image(self, img_name):
+ """Delete an image by name."""
try:
LOG.log("Deleting image %s...", img_name)
img = self.find_image(image_name=img_name)
@@ -93,68 +97,37 @@ class Compute(object):
return True
- # Remove keypair name from openstack if exists
- def remove_public_key(self, name):
- keypair_list = self.novaclient.keypairs.list()
- for key in keypair_list:
- if key.name == name:
- self.novaclient.keypairs.delete(name)
- LOG.info('Removed public key %s', name)
- break
-
- # Test if keypair file is present if not create it
- def create_keypair(self, name, private_key_pair_file):
- self.remove_public_key(name)
- keypair = self.novaclient.keypairs.create(name)
- # Now write the keypair to the file if requested
- if private_key_pair_file:
- kpf = os.open(private_key_pair_file,
- os.O_WRONLY | os.O_CREAT, 0o600)
- with os.fdopen(kpf, 'w') as kpf:
- kpf.write(keypair.private_key)
- return keypair
-
- # Add an existing public key to openstack
- def add_public_key(self, name, public_key_file):
- self.remove_public_key(name)
- # extract the public key from the file
- public_key = None
+ def image_multiqueue_enabled(self, img):
+ """Check if multiqueue property is enabled on given image."""
try:
- with open(os.path.expanduser(public_key_file)) as pkf:
- public_key = pkf.read()
- except IOError as exc:
- LOG.error('Cannot open public key file %s: %s', public_key_file, exc)
- return None
- keypair = self.novaclient.keypairs.create(name, public_key)
- return keypair
-
- def init_key_pair(self, kp_name, ssh_access):
- '''Initialize the key pair for all test VMs
- if a key pair is specified in access, use that key pair else
- create a temporary key pair
- '''
- if ssh_access.public_key_file:
- return self.add_public_key(kp_name, ssh_access.public_key_file)
- keypair = self.create_keypair(kp_name, None)
- ssh_access.private_key = keypair.private_key
- return keypair
+ return img['hw_vif_multiqueue_enabled'] == 'true'
+ except KeyError:
+ return False
- def find_network(self, label):
- net = self.novaclient.networks.find(label=label)
- return net
+ def image_set_multiqueue(self, img, enabled):
+ """Set multiqueue property as enabled or disabled on given image."""
+ cur_mqe = self.image_multiqueue_enabled(img)
+ LOG.info('Image %s hw_vif_multiqueue_enabled property is "%s"',
+ img.name, str(cur_mqe).lower())
+ if cur_mqe != enabled:
+ mqe = str(enabled).lower()
+ self.glance_client.images.update(img.id, hw_vif_multiqueue_enabled=mqe)
+ img['hw_vif_multiqueue_enabled'] = mqe
+ LOG.info('Image %s hw_vif_multiqueue_enabled property changed to "%s"', img.name, mqe)
# Create a server instance with name vmname
# and check that it gets into the ACTIVE state
def create_server(self, vmname, image, flavor, key_name,
nic, sec_group, avail_zone=None, user_data=None,
config_drive=None, files=None):
-
+ """Create a new server."""
if sec_group:
security_groups = [sec_group['id']]
else:
security_groups = None
# Also attach the created security group for the test
+ LOG.info('Creating instance %s with AZ: "%s"', vmname, avail_zone)
instance = self.novaclient.servers.create(name=vmname,
image=image,
flavor=flavor,
@@ -168,306 +141,45 @@ class Compute(object):
return instance
def poll_server(self, instance):
+ """Poll a server from its reference."""
return self.novaclient.servers.get(instance.id)
def get_server_list(self):
+ """Get the list of all servers."""
servers_list = self.novaclient.servers.list()
return servers_list
- def find_floating_ips(self):
- floating_ip = self.novaclient.floating_ips.list()
- return floating_ip
-
- def create_floating_ips(self, pool):
- return self.novaclient.floating_ips.create(pool)
-
- # Return the server network for a server
- def find_server_network(self, vmname):
- servers_list = self.get_server_list()
- for server in servers_list:
- if server.name == vmname and server.status == "ACTIVE":
- return server.networks
- return None
-
- # Returns True if server is present false if not.
- # Retry for a few seconds since after VM creation sometimes
- # it takes a while to show up
- def find_server(self, vmname, retry_count):
- for retry_attempt in range(retry_count):
- servers_list = self.get_server_list()
- for server in servers_list:
- if server.name == vmname and server.status == "ACTIVE":
- return True
- # Sleep between retries
- LOG.debug("[%s] VM not yet found, retrying %s of %s...",
- vmname, (retry_attempt + 1), retry_count)
- time.sleep(self.config.generic_poll_sec)
- LOG.error("[%s] VM not found, after %s attempts", vmname, retry_count)
- return False
-
- # Returns True if server is found and deleted/False if not,
- # retry the delete if there is a delay
- def delete_server_by_name(self, vmname):
- servers_list = self.get_server_list()
- for server in servers_list:
- if server.name == vmname:
- LOG.info('Deleting server %s', server)
- self.novaclient.servers.delete(server)
- return True
- return False
+ def instance_exists(self, server):
+ try:
+ self.novaclient.servers.get(server)
+ except NotFound:
+ return False
+ return True
def delete_server(self, server):
- self.novaclient.servers.delete(server)
+ """Delete a server from its object reference."""
+ utils.delete_server(self.novaclient, server)
+ utils.waiting_servers_deletion(self.novaclient, [server])
def find_flavor(self, flavor_type):
+ """Find a flavor by name."""
try:
flavor = self.novaclient.flavors.find(name=flavor_type)
return flavor
except Exception:
return None
- def create_flavor(self, name, ram, vcpus, disk, ephemeral=0, override=False):
- if override:
- self.delete_flavor(name)
+ def create_flavor(self, name, ram, vcpus, disk, ephemeral=0):
+ """Create a flavor."""
return self.novaclient.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk,
ephemeral=ephemeral)
- def delete_flavor(self, flavor=None, name=None):
- try:
- if not flavor:
- flavor = self.find_flavor(name)
- flavor.delete()
- return True
- except Exception:
- return False
-
- def normalize_az_host(self, az, host):
- if not az:
- az = self.config.availability_zone
- return az + ':' + host
-
- def auto_fill_az(self, host_list, host):
- '''
- no az provided, if there is a host list we can auto-fill the az
- else we use the configured az if available
- else we return an error
- '''
- if host_list:
- for hyp in host_list:
- if hyp.host == host:
- return self.normalize_az_host(hyp.zone, host)
- # no match on host
- LOG.error('Passed host name does not exist: %s', host)
- return None
- if self.config.availability_zone:
- return self.normalize_az_host(None, host)
- LOG.error('--hypervisor passed without an az and no az configured')
- return None
-
- def sanitize_az_host(self, host_list, az_host):
- '''
- host_list: list of hosts as retrieved from openstack (can be empty)
- az_host: either a host or a az:host string
- if a host, will check host is in the list, find the corresponding az and
- return az:host
- if az:host is passed will check the host is in the list and az matches
- if host_list is empty, will return the configured az if there is no
- az passed
- '''
- if ':' in az_host:
- # no host_list, return as is (no check)
- if not host_list:
- return az_host
- # if there is a host_list, extract and verify the az and host
- az_host_list = az_host.split(':')
- zone = az_host_list[0]
- host = az_host_list[1]
- for hyp in host_list:
- if hyp.host == host:
- if hyp.zone == zone:
- # matches
- return az_host
- # else continue - another zone with same host name?
- # no match
- LOG.error('No match for availability zone and host %s', az_host)
- return None
- else:
- return self.auto_fill_az(host_list, az_host)
-
- #
- # Return a list of 0, 1 or 2 az:host
- #
- # The list is computed as follows:
- # The list of all hosts is retrieved first from openstack
- # if this fails, checks and az auto-fill are disabled
- #
- # If the user provides a list of hypervisors (--hypervisor)
- # that list is checked and returned
- #
- # If the user provides a configured az name (config.availability_zone)
- # up to the first 2 hosts from the list that match the az are returned
- #
- # If the user did not configure an az name
- # up to the first 2 hosts from the list are returned
- # Possible return values:
- # [ az ]
- # [ az:hyp ]
- # [ az1:hyp1, az2:hyp2 ]
- # [] if an error occurred (error message printed to console)
- #
- def get_az_host_list(self):
- avail_list = []
- host_list = []
-
- try:
- host_list = self.novaclient.services.list()
- except novaclient.exceptions.Forbidden:
- LOG.warning('Operation Forbidden: could not retrieve list of hosts'
- ' (likely no permission)')
-
- for host in host_list:
- # this host must be a compute node
- if host.binary != 'nova-compute' or host.state != 'up':
- continue
- candidate = None
- if self.config.availability_zone:
- if host.zone == self.config.availability_zone:
- candidate = self.normalize_az_host(None, host.host)
- else:
- candidate = self.normalize_az_host(host.zone, host.host)
- if candidate:
- avail_list.append(candidate)
- # pick first 2 matches at most
- if len(avail_list) == 2:
- break
-
- # if empty we insert the configured az
- if not avail_list:
-
- if not self.config.availability_zone:
- LOG.error('Availability_zone must be configured')
- elif host_list:
- LOG.error('No host matching the selection for availability zone: %s',
- self.config.availability_zone)
- avail_list = []
- else:
- avail_list = [self.config.availability_zone]
- return avail_list
-
- def get_enabled_az_host_list(self, required_count=1):
- """
- Check which hypervisors are enabled and on which compute nodes they are running.
- Pick required count of hosts.
+ def get_hypervisor(self, hyper_name):
+ """Get the hypervisor from its name.
- :param required_count: count of compute-nodes to return
- :return: list of enabled available compute nodes
+ Can raise novaclient.exceptions.NotFound
"""
- host_list = []
- hypervisor_list = []
-
- try:
- hypervisor_list = self.novaclient.hypervisors.list()
- host_list = self.novaclient.services.list()
- except novaclient.exceptions.Forbidden:
- LOG.warning('Operation Forbidden: could not retrieve list of hypervisors'
- ' (likely no permission)')
-
- hypervisor_list = [h for h in hypervisor_list if h.status == 'enabled' and h.state == 'up']
- if self.config.availability_zone:
- host_list = [h for h in host_list if h.zone == self.config.availability_zone]
-
- if self.config.compute_nodes:
- host_list = [h for h in host_list if h.host in self.config.compute_nodes]
-
- hosts = [h.hypervisor_hostname for h in hypervisor_list]
- host_list = [h for h in host_list if h.host in hosts]
-
- avail_list = []
- for host in host_list:
- candidate = self.normalize_az_host(host.zone, host.host)
- if candidate:
- avail_list.append(candidate)
- if len(avail_list) == required_count:
- return avail_list
-
- return avail_list
-
- def get_hypervisor(self, hyper_name):
- # can raise novaclient.exceptions.NotFound
# first get the id from name
hyper = self.novaclient.hypervisors.search(hyper_name)[0]
# get full hypervisor object
return self.novaclient.hypervisors.get(hyper.id)
-
- # Given 2 VMs test if they are running on same Host or not
- def check_vm_placement(self, vm_instance1, vm_instance2):
- try:
- server_instance_1 = self.novaclient.servers.get(vm_instance1)
- server_instance_2 = self.novaclient.servers.get(vm_instance2)
- return bool(server_instance_1.hostId == server_instance_2.hostId)
- except novaclient.exceptions:
- LOG.warning("Exception in retrieving the hostId of servers")
-
- # Create a new security group with appropriate rules
- def security_group_create(self):
- # check first the security group exists
- sec_groups = self.neutronclient.list_security_groups()['security_groups']
- group = [x for x in sec_groups if x['name'] == self.config.security_group_name]
- if group:
- return group[0]
-
- body = {
- 'security_group': {
- 'name': self.config.security_group_name,
- 'description': 'PNS Security Group'
- }
- }
- group = self.neutronclient.create_security_group(body)['security_group']
- self.security_group_add_rules(group)
-
- return group
-
- # Delete a security group
- def security_group_delete(self, group):
- if group:
- LOG.info("Deleting security group")
- self.neutronclient.delete_security_group(group['id'])
-
- # Add rules to the security group
- def security_group_add_rules(self, group):
- body = {
- 'security_group_rule': {
- 'direction': 'ingress',
- 'security_group_id': group['id'],
- 'remote_group_id': None
- }
- }
- if self.config.ipv6_mode:
- body['security_group_rule']['ethertype'] = 'IPv6'
- body['security_group_rule']['remote_ip_prefix'] = '::/0'
- else:
- body['security_group_rule']['ethertype'] = 'IPv4'
- body['security_group_rule']['remote_ip_prefix'] = '0.0.0.0/0'
-
- # Allow ping traffic
- body['security_group_rule']['protocol'] = 'icmp'
- body['security_group_rule']['port_range_min'] = None
- body['security_group_rule']['port_range_max'] = None
- self.neutronclient.create_security_group_rule(body)
-
- # Allow SSH traffic
- body['security_group_rule']['protocol'] = 'tcp'
- body['security_group_rule']['port_range_min'] = 22
- body['security_group_rule']['port_range_max'] = 22
- self.neutronclient.create_security_group_rule(body)
-
- # Allow TCP/UDP traffic for perf tools like iperf/nuttcp
- # 5001: Data traffic (standard iperf data port)
- # 5002: Control traffic (non standard)
- # note that 5000/tcp is already picked by openstack keystone
- body['security_group_rule']['protocol'] = 'tcp'
- body['security_group_rule']['port_range_min'] = 5001
- body['security_group_rule']['port_range_max'] = 5002
- self.neutronclient.create_security_group_rule(body)
- body['security_group_rule']['protocol'] = 'udp'
- self.neutronclient.create_security_group_rule(body)
diff --git a/nfvbench/config.py b/nfvbench/config.py
index 5feeda5..8e77127 100644
--- a/nfvbench/config.py
+++ b/nfvbench/config.py
@@ -16,19 +16,19 @@
from attrdict import AttrDict
import yaml
-from log import LOG
+from .log import LOG
def config_load(file_name, from_cfg=None, whitelist_keys=None):
"""Load a yaml file into a config dict, merge with from_cfg if not None
The config file content taking precedence in case of duplicate
"""
try:
- with open(file_name) as fileobj:
+ with open(file_name, encoding="utf-8") as fileobj:
cfg = AttrDict(yaml.safe_load(fileobj))
except IOError:
raise Exception("Configuration file at '{}' was not found. Please use correct path "
"and verify it is visible to container if you run nfvbench in container."
- .format(file_name))
+ .format(file_name)) from IOError
if from_cfg:
if not whitelist_keys:
@@ -43,10 +43,16 @@ def config_loads(cfg_text, from_cfg=None, whitelist_keys=None):
"""Same as config_load but load from a string
"""
try:
- cfg = AttrDict(yaml.load(cfg_text))
+ cfg = AttrDict(yaml.safe_load(cfg_text))
except TypeError:
# empty string
cfg = AttrDict()
+ except ValueError as e:
+ # In case of wrong path or file not readable or string not well formatted
+ LOG.error("String %s is not well formatted. Please verify your yaml/json string. "
+ "If string is a file path, file was not found. Please use correct path and "
+ "verify it is visible to container if you run nfvbench in container.", cfg_text)
+ raise Exception(e) from e
if from_cfg:
if not whitelist_keys:
whitelist_keys = []
@@ -58,7 +64,7 @@ def config_loads(cfg_text, from_cfg=None, whitelist_keys=None):
def _validate_config(subset, superset, whitelist_keys):
def get_err_config(subset, superset):
result = {}
- for k, v in subset.items():
+ for k, v in list(subset.items()):
if k not in whitelist_keys:
if k not in superset:
result.update({k: v})
diff --git a/nfvbench/config_plugin.py b/nfvbench/config_plugin.py
index f6654eb..86e5505 100644
--- a/nfvbench/config_plugin.py
+++ b/nfvbench/config_plugin.py
@@ -13,44 +13,47 @@
# License for the specific language governing permissions and limitations
# under the License.
#
+"""Configuration Plugin.
+This module is used to override the configuration with platform specific constraints and extensions
+"""
import abc
-import specs
+from . import specs
-class ConfigPluginBase(object):
- """Base class for config plugins. Need to implement public interfaces."""
- __metaclass__ = abc.ABCMeta
+class ConfigPluginBase(object, metaclass=abc.ABCMeta):
+ """Base class for config plugins."""
class InitializationFailure(Exception):
- pass
+ """Used in case of any init failure."""
def __init__(self, config):
+ """Save configuration."""
if not config:
raise ConfigPluginBase.InitializationFailure(
'Initialization parameters need to be assigned.')
-
self.config = config
@abc.abstractmethod
def get_config(self):
- """Returns updated default configuration file."""
+ """Return updated default configuration file."""
def set_config(self, config):
- """This method is called when the config has changed after this instance was initialized.
+ """Set a new configuration.
- This is needed in teh frequent case where the main config is changed in a copy and to
+ This method is called when the config has changed after this instance was initialized.
+ This is needed in the frequent case where the main config is changed in a copy and to
prevent this instance to keep pointing to the old copy of the config
"""
self.config = config
@abc.abstractmethod
def get_openstack_spec(self):
- """Returns OpenStack specs for host."""
+ """Return OpenStack specs for host."""
@abc.abstractmethod
def get_run_spec(self, config, openstack_spec):
- """Returns RunSpec for given platform."""
+ """Return RunSpec for given platform."""
@abc.abstractmethod
def validate_config(self, cfg, openstack_spec):
@@ -58,19 +61,22 @@ class ConfigPluginBase(object):
@abc.abstractmethod
def prepare_results_config(self, cfg):
- """This function is called before running configuration is copied.
+ """Insert any plugin specific information to the results.
+
+ This function is called before running configuration is copied.
Example usage is to remove sensitive information like switch credentials.
"""
@abc.abstractmethod
def get_version(self):
- """Returns platform version."""
+ """Return platform version."""
class ConfigPlugin(ConfigPluginBase):
"""No-op config plugin class. Does not change anything."""
def __init__(self, config):
+ """Invoke the base class constructor."""
ConfigPluginBase.__init__(self, config)
def get_config(self):
@@ -78,18 +84,20 @@ class ConfigPlugin(ConfigPluginBase):
return self.config
def get_openstack_spec(self):
- """Returns OpenStack specs for host."""
+ """Return OpenStack specs for host."""
return specs.OpenStackSpec()
def get_run_spec(self, config, openstack_spec):
- """Returns RunSpec for given platform."""
+ """Return RunSpec for given platform."""
return specs.RunSpec(config.no_vswitch_access, openstack_spec)
- def validate_config(self, config, openstack_spec):
- pass
+ def validate_config(self, cfg, openstack_spec):
+ """Nothing to validate by default."""
def prepare_results_config(self, cfg):
+ """Nothing to add the results by default."""
return cfg
def get_version(self):
+ """Return an empty version."""
return {}
diff --git a/nfvbench/credentials.py b/nfvbench/credentials.py
index 530ad69..7c48879 100644
--- a/nfvbench/credentials.py
+++ b/nfvbench/credentials.py
@@ -21,32 +21,40 @@ import getpass
from keystoneauth1.identity import v2
from keystoneauth1.identity import v3
from keystoneauth1 import session
-from log import LOG
+import openstack
+from keystoneclient.exceptions import HTTPClientError
+
+from .log import LOG
class Credentials(object):
def get_session(self):
- dct = {
- 'username': self.rc_username,
- 'password': self.rc_password,
- 'auth_url': self.rc_auth_url
- }
- auth = None
-
- if self.rc_identity_api_version == 3:
- dct.update({
- 'project_name': self.rc_project_name,
- 'project_domain_name': self.rc_project_domain_name,
- 'user_domain_name': self.rc_user_domain_name
- })
- auth = v3.Password(**dct)
+
+ if self.clouds_detail:
+ connection = openstack.connect(cloud=self.clouds_detail)
+ cred_session = connection.session
else:
- dct.update({
- 'tenant_name': self.rc_tenant_name
- })
- auth = v2.Password(**dct)
- return session.Session(auth=auth, verify=self.rc_cacert)
+ dct = {
+ 'username': self.rc_username,
+ 'password': self.rc_password,
+ 'auth_url': self.rc_auth_url
+ }
+
+ if self.rc_identity_api_version == 3:
+ dct.update({
+ 'project_name': self.rc_project_name,
+ 'project_domain_name': self.rc_project_domain_name,
+ 'user_domain_name': self.rc_user_domain_name
+ })
+ auth = v3.Password(**dct)
+ else:
+ dct.update({
+ 'tenant_name': self.rc_tenant_name
+ })
+ auth = v2.Password(**dct)
+ cred_session = session.Session(auth=auth, verify=self.rc_cacert)
+ return cred_session
def __parse_openrc(self, file):
export_re = re.compile('export OS_([A-Z_]*)="?(.*)')
@@ -91,11 +99,28 @@ class Credentials(object):
elif name == "PROJECT_DOMAIN_NAME":
self.rc_project_domain_name = value
+ # /users URL returns exception (HTTP 403) if user is not admin.
+ # try first without the version in case session already has it in
+ # Return HTTP 200 if user is admin
+ def __user_is_admin(self, url):
+ is_admin = False
+ try:
+ # check if user has admin role in OpenStack project
+ filter = {'service_type': 'identity',
+ 'interface': 'public'}
+ self.get_session().get(url, endpoint_filter=filter)
+ is_admin = True
+ except HTTPClientError as exc:
+ if exc.http_status == 403:
+ LOG.warning(
+ "User is not admin, no permission to list user roles. Exception: %s", exc)
+ return is_admin
+
#
# Read a openrc file and take care of the password
# The 2 args are passed from the command line and can be None
#
- def __init__(self, openrc_file, pwd=None, no_env=False):
+ def __init__(self, openrc_file, clouds_detail, pwd=None, no_env=False):
self.rc_password = None
self.rc_username = None
self.rc_tenant_name = None
@@ -105,19 +130,22 @@ class Credentials(object):
self.rc_user_domain_name = None
self.rc_project_domain_name = None
self.rc_project_name = None
- self.rc_identity_api_version = 2
+ self.rc_identity_api_version = 3
+ self.is_admin = False
+ self.clouds_detail = clouds_detail
success = True
if openrc_file:
if isinstance(openrc_file, str):
if os.path.exists(openrc_file):
- self.__parse_openrc(open(openrc_file))
+ with open(openrc_file, encoding="utf-8") as rc_file:
+ self.__parse_openrc(rc_file)
else:
LOG.error('Error: rc file does not exist %s', openrc_file)
success = False
else:
self.__parse_openrc(openrc_file)
- elif not no_env:
+ elif not clouds_detail and not no_env:
# no openrc file passed - we assume the variables have been
# sourced by the calling shell
# just check that they are present
@@ -152,15 +180,27 @@ class Credentials(object):
# always override with CLI argument if provided
- if pwd:
- self.rc_password = pwd
- # if password not know, check from env variable
- elif self.rc_auth_url and not self.rc_password and success:
- if 'OS_PASSWORD' in os.environ and not no_env:
- self.rc_password = os.environ['OS_PASSWORD']
- else:
- # interactively ask for password
- self.rc_password = getpass.getpass(
- 'Please enter your OpenStack Password: ')
- if not self.rc_password:
- self.rc_password = ""
+ if not clouds_detail:
+ if pwd:
+ self.rc_password = pwd
+ # if password not know, check from env variable
+ elif self.rc_auth_url and not self.rc_password and success:
+ if 'OS_PASSWORD' in os.environ and not no_env:
+ self.rc_password = os.environ['OS_PASSWORD']
+ else:
+ # interactively ask for password
+ self.rc_password = getpass.getpass(
+ 'Please enter your OpenStack Password: ')
+ if not self.rc_password:
+ self.rc_password = ""
+
+
+ try:
+ # /users URL returns exception (HTTP 403) if user is not admin.
+ # try first without the version in case session already has it in
+ # Return HTTP 200 if user is admin
+ self.is_admin = self.__user_is_admin('/users') or self.__user_is_admin(
+ '/v2/users') or self.__user_is_admin('/v3/users')
+ except Exception as e:
+ LOG.warning("Error occurred during Openstack API access. "
+ "Unable to check user is admin. Exception: %s", e)
diff --git a/nfvbench/factory.py b/nfvbench/factory.py
index 1461036..0d4b042 100644
--- a/nfvbench/factory.py
+++ b/nfvbench/factory.py
@@ -13,57 +13,19 @@
# License for the specific language governing permissions and limitations
# under the License.
#
+"""Factory for creating worker and config plugin instances."""
-from chain_clients import EXTStageClient
-from chain_clients import PVPStageClient
-from chain_clients import PVVPStageClient
-from chain_managers import EXTStatsManager
-from chain_managers import PVPStatsManager
-from chain_managers import PVVPStatsManager
-import chain_workers as workers
-from config_plugin import ConfigPlugin
-from specs import ChainType
-import tor_client
+from . import chain_workers as workers
+from .config_plugin import ConfigPlugin
class BasicFactory(object):
- chain_classes = [ChainType.EXT, ChainType.PVP, ChainType.PVVP]
-
- chain_stats_classes = {
- ChainType.EXT: EXTStatsManager,
- ChainType.PVP: PVPStatsManager,
- ChainType.PVVP: PVVPStatsManager,
- }
-
- stage_clients_classes = {
- ChainType.EXT: EXTStageClient,
- ChainType.PVP: PVPStageClient,
- ChainType.PVVP: PVVPStageClient,
- }
-
- def get_stats_class(self, service_chain):
- CLASS = self.chain_stats_classes.get(service_chain, None)
- if CLASS is None:
- raise Exception("Service chain '{}' not supported.".format(service_chain))
-
- return CLASS
-
- def get_stage_class(self, service_chain):
- CLASS = self.stage_clients_classes.get(service_chain, None)
- if CLASS is None:
- raise Exception("VM Client for chain '{}' not supported.".format(service_chain))
-
- return CLASS
+ """Basic factory class to be overridden for advanced customization."""
def get_chain_worker(self, encaps, service_chain):
+ """Get a chain worker based on encaps and service chain type."""
return workers.BasicWorker
- def get_tor_class(self, tor_type, no_tor_access):
- if no_tor_access or not tor_type:
- # if no TOR access is required, use basic no-op client
- tor_type = 'BasicTORClient'
-
- return getattr(tor_client, tor_type)
-
def get_config_plugin_class(self):
+ """Get a config plugin."""
return ConfigPlugin
diff --git a/nfvbench/fluentd.py b/nfvbench/fluentd.py
index ad0ea34..535d640 100644
--- a/nfvbench/fluentd.py
+++ b/nfvbench/fluentd.py
@@ -114,7 +114,7 @@ class FluentLogHandler(logging.Handler):
def __get_highest_level(self):
if self.__error_counter > 0:
return logging.ERROR
- elif self.__warning_counter > 0:
+ if self.__warning_counter > 0:
return logging.WARNING
return logging.INFO
@@ -122,7 +122,7 @@ class FluentLogHandler(logging.Handler):
highest_level = self.__get_highest_level()
if highest_level == logging.INFO:
return "GOOD RUN"
- elif highest_level == logging.WARNING:
+ if highest_level == logging.WARNING:
return "RUN WITH WARNINGS"
return "RUN WITH ERRORS"
diff --git a/nfvbench/network.py b/nfvbench/network.py
deleted file mode 100644
index 6c02f04..0000000
--- a/nfvbench/network.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-
-class Interface(object):
- """A class to hold the RX and TX counters for a virtual or physical interface."""
-
- def __init__(self, name, device, tx_packets, rx_packets):
- """Create a new interface instance."""
- self.name = name
- self.device = device
- self.packets = {
- 'tx': tx_packets,
- 'rx': rx_packets
- }
-
- def set_packets(self, tx, rx):
- """Set tx and rx counters for this interface."""
- self.packets = {
- 'tx': tx,
- 'rx': rx
- }
-
- def set_packets_diff(self, tx, rx):
- """Subtract current counters from new set of counters and update with results."""
- self.packets = {
- 'tx': tx - self.packets['tx'],
- 'rx': rx - self.packets['rx'],
- }
-
- def is_no_op(self):
- """Check if this interface is a no-opn interface."""
- return self.name is None
-
- def get_packet_count(self, traffic_type):
- """Get packet count for given direction."""
- return self.packets.get(traffic_type, 0)
-
- @staticmethod
- def no_op():
- """Return an interface that doe snot pass any traffic."""
- return Interface(None, None, 0, 0)
-
-
-class Network(object):
- """This class holds all interfaces that make up a logical neutron network.
-
- A loopback packet path has exactly 2 networks.
- The first interface is always one of the 2 traffic gen interface.
- Subsequent interfaces are sorted along the path from the TG to the loopback point
- which could be interfaces in a switch, a vswitch or a VM.
- """
-
- def __init__(self, interfaces=None, reverse=False):
- """Create a network with initial interface list and direction.
-
- :param interfaces: initial interface list
- :param reverse: specifies the order of interfaces returned by get_interfaces
- """
- if interfaces is None:
- interfaces = []
- self.interfaces = interfaces
- self.reverse = reverse
-
- def add_interface(self, interface):
- """Add one more interface to this network.
-
- Order if important as interfaces must be added from traffic generator ports towards then
- looping back device.
- """
- self.interfaces.append(interface)
-
- def get_interfaces(self):
- """Get interfaces associated to this network.
-
- Returned interface list is ordered from traffic generator port towards looping device if
- reverse is false. Else returms the list in the reverse order.
- """
- return self.interfaces[::-1] if self.reverse else self.interfaces
diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py
index 1cb5a9c..891b2bb 100644
--- a/nfvbench/nfvbench.py
+++ b/nfvbench/nfvbench.py
@@ -15,7 +15,6 @@
#
import argparse
-from collections import defaultdict
import copy
import datetime
import importlib
@@ -25,25 +24,24 @@ import sys
import traceback
from attrdict import AttrDict
+from logging import FileHandler
import pbr.version
from pkg_resources import resource_string
-from __init__ import __version__
-from chain_runner import ChainRunner
-from cleanup import Cleaner
-from config import config_load
-from config import config_loads
-import credentials as credentials
-from factory import BasicFactory
-from fluentd import FluentLogHandler
-import log
-from log import LOG
-from nfvbenchd import WebSocketIoServer
-from specs import ChainType
-from specs import Specs
-from summarizer import NFVBenchSummarizer
-from traffic_client import TrafficGeneratorFactory
-import utils
+from .__init__ import __version__
+from .chain_runner import ChainRunner
+from .cleanup import Cleaner
+from .config import config_load
+from .config import config_loads
+from . import credentials
+from .fluentd import FluentLogHandler
+from . import log
+from .log import LOG
+from .nfvbenchd import WebServer
+from .specs import ChainType
+from .specs import Specs
+from .summarizer import NFVBenchSummarizer
+from . import utils
fluent_logger = None
@@ -55,33 +53,31 @@ class NFVBench(object):
STATUS_ERROR = 'ERROR'
def __init__(self, config, openstack_spec, config_plugin, factory, notifier=None):
+ # the base config never changes for a given NFVbench instance
self.base_config = config
+ # this is the running config, updated at every run()
self.config = None
self.config_plugin = config_plugin
self.factory = factory
self.notifier = notifier
- self.cred = credentials.Credentials(config.openrc_file, None, False) \
- if config.openrc_file else None
+ self.cred = credentials.Credentials(config.openrc_file, config.clouds_detail, None, False) \
+ if config.openrc_file or config.clouds_detail else None
self.chain_runner = None
self.specs = Specs()
self.specs.set_openstack_spec(openstack_spec)
- self.clients = defaultdict(lambda: None)
self.vni_ports = []
sys.stdout.flush()
- def setup(self):
- self.specs.set_run_spec(self.config_plugin.get_run_spec(self.config, self.specs.openstack))
- self.chain_runner = ChainRunner(self.config,
- self.clients,
- self.cred,
- self.specs,
- self.factory,
- self.notifier)
-
def set_notifier(self, notifier):
self.notifier = notifier
- def run(self, opts, args):
+ def run(self, opts, args, dry_run=False):
+ """This run() method is called for every NFVbench benchmark request.
+
+ In CLI mode, this method is called only once per invocation.
+ In REST server mode, this is called once per REST POST request
+ On dry_run, show the running config in json format then exit
+ """
status = NFVBench.STATUS_OK
result = None
message = ''
@@ -91,21 +87,41 @@ class NFVBench(object):
fluent_logger.start_new_run()
LOG.info(args)
try:
- self.update_config(opts)
- self.setup()
+ # recalc the running config based on the base config and options for this run
+ self._update_config(opts)
+
+ if dry_run:
+ print((json.dumps(self.config, sort_keys=True, indent=4)))
+ sys.exit(0)
+
+ # check that an empty openrc file (no OpenStack) is only allowed
+ # with EXT chain
+ if (not self.config.openrc_file and not self.config.clouds_detail) and \
+ self.config.service_chain != ChainType.EXT:
+ raise Exception("openrc_file or clouds_detail in the configuration is required"
+ " for PVP/PVVP chains")
+
+ self.specs.set_run_spec(self.config_plugin.get_run_spec(self.config,
+ self.specs.openstack))
+ self.chain_runner = ChainRunner(self.config,
+ self.cred,
+ self.specs,
+ self.factory,
+ self.notifier)
new_frame_sizes = []
- min_packet_size = "68" if self.config.vlan_tagging else "64"
+ # make sure that the min frame size is 64
+ min_packet_size = 64
for frame_size in self.config.frame_sizes:
try:
- if int(frame_size) < int(min_packet_size):
- new_frame_sizes.append(min_packet_size)
- LOG.info("Adjusting frame size %s Bytes to minimum size %s Bytes due to " +
- "traffic generator restriction", frame_size, min_packet_size)
- else:
+ if int(frame_size) < min_packet_size:
+ frame_size = str(min_packet_size)
+ LOG.info("Adjusting frame size %s bytes to minimum size %s bytes",
+ frame_size, min_packet_size)
+ if frame_size not in new_frame_sizes:
new_frame_sizes.append(frame_size)
except ValueError:
- new_frame_sizes.append(frame_size)
- self.config.actual_frame_sizes = tuple(new_frame_sizes)
+ new_frame_sizes.append(frame_size.upper())
+ self.config.frame_sizes = new_frame_sizes
result = {
"date": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
"nfvbench_version": __version__,
@@ -132,7 +148,7 @@ class NFVBench(object):
self.chain_runner.close()
if status == NFVBench.STATUS_OK:
- result = utils.dict_to_json_dict(result)
+ # result2 = utils.dict_to_json_dict(result)
return {
'status': status,
'result': result
@@ -156,102 +172,152 @@ class NFVBench(object):
self.config.service_chain,
self.config.service_chain_count,
self.config.flow_count,
- self.config.frame_sizes)
+ self.config.frame_sizes,
+ self.config.user_id,
+ self.config.group_id)
+
+ def _update_config(self, opts):
+ """Recalculate the running config based on the base config and opts.
- def update_config(self, opts):
+ Sanity check on the config is done here as well.
+ """
self.config = AttrDict(dict(self.base_config))
+ # Update log file handler if needed after a config update (REST mode)
+ if 'log_file' in opts:
+ if opts['log_file']:
+ (path, _filename) = os.path.split(opts['log_file'])
+ if not os.path.exists(path):
+ LOG.warning(
+ 'Path %s does not exist. Please verify root path is shared with host. Path '
+ 'will be created.', path)
+ os.makedirs(path)
+ LOG.info('%s is created.', path)
+ if not any(isinstance(h, FileHandler) for h in log.getLogger().handlers):
+ log.add_file_logger(opts['log_file'])
+ else:
+ for h in log.getLogger().handlers:
+ if isinstance(h, FileHandler) and h.baseFilename != opts['log_file']:
+ # clean log file handler
+ log.getLogger().removeHandler(h)
+ log.add_file_logger(opts['log_file'])
+
self.config.update(opts)
+ config = self.config
+
+ config.service_chain = config.service_chain.upper()
+ config.service_chain_count = int(config.service_chain_count)
+ if config.l2_loopback:
+ # force the number of chains to be 1 in case of untagged l2 loopback
+ # (on the other hand, multiple L2 vlan tagged service chains are allowed)
+ if not config.vlan_tagging:
+ config.service_chain_count = 1
+ config.service_chain = ChainType.EXT
+ config.no_arp = True
+ LOG.info('Running L2 loopback: using EXT chain/no ARP')
+
+ # allow oversized vlan lists, just clip them
+ try:
+ vlans = [list(v) for v in config.vlans]
+ for v in vlans:
+ del v[config.service_chain_count:]
+ config.vlans = vlans
+ except Exception:
+ pass
- self.config.service_chain = self.config.service_chain.upper()
- self.config.service_chain_count = int(self.config.service_chain_count)
- self.config.flow_count = utils.parse_flow_count(self.config.flow_count)
- required_flow_count = self.config.service_chain_count * 2
- if self.config.flow_count < required_flow_count:
+ # traffic profile override options
+ if 'frame_sizes' in opts:
+ unidir = False
+ if 'unidir' in opts:
+ unidir = opts['unidir']
+ override_custom_traffic(config, opts['frame_sizes'], unidir)
+ LOG.info("Frame size has been set to %s for current configuration", opts['frame_sizes'])
+
+ config.flow_count = utils.parse_flow_count(config.flow_count)
+ required_flow_count = config.service_chain_count * 2
+ if config.flow_count < required_flow_count:
LOG.info("Flow count %d has been set to minimum value of '%d' "
- "for current configuration", self.config.flow_count,
+ "for current configuration", config.flow_count,
required_flow_count)
- self.config.flow_count = required_flow_count
-
- if self.config.flow_count % 2 != 0:
- self.config.flow_count += 1
-
- self.config.duration_sec = float(self.config.duration_sec)
- self.config.interval_sec = float(self.config.interval_sec)
-
- # Get traffic generator profile config
- if not self.config.generator_profile:
- self.config.generator_profile = self.config.traffic_generator.default_profile
-
- generator_factory = TrafficGeneratorFactory(self.config)
- self.config.generator_config = \
- generator_factory.get_generator_config(self.config.generator_profile)
-
- # Check length of mac_addrs_left/right for serivce_chain EXT with no_arp
- if self.config.service_chain == ChainType.EXT and self.config.no_arp:
- if not (self.config.generator_config.mac_addrs_left is None and
- self.config.generator_config.mac_addrs_right is None):
- if (self.config.generator_config.mac_addrs_left is None or
- self.config.generator_config.mac_addrs_right is None):
- raise Exception("mac_addrs_left and mac_addrs_right must either "
- "both be None or have a number of entries matching "
- "service_chain_count")
- if not (len(self.config.generator_config.mac_addrs_left) ==
- self.config.service_chain_count and
- len(self.config.generator_config.mac_addrs_right) ==
- self.config.service_chain_count):
- raise Exception("length of mac_addrs_left ({a}) and/or mac_addrs_right ({b}) "
- "does not match service_chain_count ({c})"
- .format(a=len(self.config.generator_config.mac_addrs_left),
- b=len(self.config.generator_config.mac_addrs_right),
- c=self.config.service_chain_count))
-
- if not any(self.config.generator_config.pcis):
- raise Exception("PCI addresses configuration for selected traffic generator profile "
- "({tg_profile}) are missing. Please specify them in configuration file."
- .format(tg_profile=self.config.generator_profile))
-
- if self.config.traffic is None or not self.config.traffic:
- raise Exception("No traffic profile found in traffic configuration, "
- "please fill 'traffic' section in configuration file.")
-
- if isinstance(self.config.traffic, tuple):
- self.config.traffic = self.config.traffic[0]
-
- self.config.frame_sizes = generator_factory.get_frame_sizes(self.config.traffic.profile)
-
- self.config.ipv6_mode = False
- self.config.no_dhcp = True
- self.config.same_network_only = True
- if self.config.openrc_file:
- self.config.openrc_file = os.path.expanduser(self.config.openrc_file)
-
- self.config.ndr_run = (not self.config.no_traffic and
- 'ndr' in self.config.rate.strip().lower().split('_'))
- self.config.pdr_run = (not self.config.no_traffic and
- 'pdr' in self.config.rate.strip().lower().split('_'))
- self.config.single_run = (not self.config.no_traffic and
- not (self.config.ndr_run or self.config.pdr_run))
-
- if self.config.vlans and len(self.config.vlans) != 2:
- raise Exception('Number of configured VLAN IDs for VLAN tagging must be exactly 2.')
-
- self.config.json_file = self.config.json if self.config.json else None
- if self.config.json_file:
- (path, _filename) = os.path.split(self.config.json)
+ config.flow_count = required_flow_count
+
+ if config.flow_count % 2:
+ config.flow_count += 1
+
+ # Possibly adjust the cache size
+ if config.cache_size < 0:
+ config.cache_size = config.flow_count
+
+ # The size must be capped to 10000 (where does this limit come from?)
+ config.cache_size = min(config.cache_size, 10000)
+
+ config.duration_sec = float(config.duration_sec)
+ config.interval_sec = float(config.interval_sec)
+ config.pause_sec = float(config.pause_sec)
+
+ if config.traffic is None or not config.traffic:
+ raise Exception("Missing traffic property in configuration")
+
+ if config.openrc_file:
+ config.openrc_file = os.path.expanduser(config.openrc_file)
+ if config.flavor.vcpus < 2:
+ raise Exception("Flavor vcpus must be >= 2")
+
+ config.ndr_run = (not config.no_traffic and
+ 'ndr' in config.rate.strip().lower().split('_'))
+ config.pdr_run = (not config.no_traffic and
+ 'pdr' in config.rate.strip().lower().split('_'))
+ config.single_run = (not config.no_traffic and
+ not (config.ndr_run or config.pdr_run))
+
+ config.json_file = config.json if config.json else None
+ if config.json_file:
+ (path, _filename) = os.path.split(config.json)
if not os.path.exists(path):
raise Exception('Please provide existing path for storing results in JSON file. '
'Path used: {path}'.format(path=path))
- self.config.std_json_path = self.config.std_json if self.config.std_json else None
- if self.config.std_json_path:
- if not os.path.exists(self.config.std_json):
+ config.std_json_path = config.std_json if config.std_json else None
+ if config.std_json_path:
+ if not os.path.exists(config.std_json):
raise Exception('Please provide existing path for storing results in JSON file. '
- 'Path used: {path}'.format(path=self.config.std_json_path))
+ 'Path used: {path}'.format(path=config.std_json_path))
+
+ # Check that multiqueue is between 1 and 8 (8 is the max allowed by libvirt/qemu)
+ if config.vif_multiqueue_size < 1 or config.vif_multiqueue_size > 8:
+ raise Exception('vif_multiqueue_size (%d) must be in [1..8]' %
+ config.vif_multiqueue_size)
+
+ # VxLAN and MPLS sanity checks
+ if config.vxlan or config.mpls:
+ if config.vlan_tagging:
+ config.vlan_tagging = False
+ config.no_latency_streams = True
+ config.no_latency_stats = True
+ config.no_flow_stats = True
+ LOG.info('VxLAN or MPLS: vlan_tagging forced to False '
+ '(inner VLAN tagging must be disabled)')
+
+ self.config_plugin.validate_config(config, self.specs.openstack)
- self.config_plugin.validate_config(self.config, self.specs.openstack)
+def bool_arg(x):
+ """Argument type to be used in parser.add_argument()
+ When a boolean like value is expected to be given
+ """
+ return (str(x).lower() != 'false') \
+ and (str(x).lower() != 'no') \
+ and (str(x).lower() != '0')
-def parse_opts_from_cli():
+
+def int_arg(x):
+ """Argument type to be used in parser.add_argument()
+ When an integer type value is expected to be given
+ (returns 0 if argument is invalid, hexa accepted)
+ """
+ return int(x, 0)
+
+
+def _parse_opts_from_cli():
parser = argparse.ArgumentParser()
parser.add_argument('--status', dest='status',
@@ -267,10 +333,8 @@ def parse_opts_from_cli():
parser.add_argument('--server', dest='server',
default=None,
- action='store',
- metavar='<http_root_pathname>',
- help='Run nfvbench in server mode and pass'
- ' the HTTP root folder full pathname')
+ action='store_true',
+ help='Run nfvbench in server mode')
parser.add_argument('--host', dest='host',
action='store',
@@ -283,7 +347,7 @@ def parse_opts_from_cli():
help='Port on which server will be listening (default 7555)')
parser.add_argument('-sc', '--service-chain', dest='service_chain',
- choices=BasicFactory.chain_classes,
+ choices=ChainType.names,
action='store',
help='Service chain to run')
@@ -315,7 +379,7 @@ def parse_opts_from_cli():
parser.add_argument('--inter-node', dest='inter_node',
default=None,
action='store_true',
- help='run VMs in different compute nodes (PVVP only)')
+ help='(deprecated)')
parser.add_argument('--sriov', dest='sriov',
default=None,
@@ -337,6 +401,17 @@ def parse_opts_from_cli():
action='store',
help='Traffic generator profile to use')
+ parser.add_argument('-l3', '--l3-router', dest='l3_router',
+ default=None,
+ action='store_true',
+ help='Use L3 neutron routers to handle traffic')
+
+ parser.add_argument('-garp', '--gratuitous-arp', dest='periodic_gratuitous_arp',
+ default=None,
+ action='store_true',
+ help='Use gratuitous ARP to maintain session between TG '
+ 'and L3 routers to handle traffic')
+
parser.add_argument('-0', '--no-traffic', dest='no_traffic',
default=None,
action='store_true',
@@ -348,25 +423,26 @@ def parse_opts_from_cli():
help='Do not use ARP to find MAC addresses, '
'instead use values in config file')
- parser.add_argument('--no-reset', dest='no_reset',
+ parser.add_argument('--loop-vm-arp', dest='loop_vm_arp',
default=None,
action='store_true',
- help='Do not reset counters prior to running')
+ help='Use ARP to find MAC addresses '
+ 'instead of using values from TRex ports (VPP forwarder only)')
- parser.add_argument('--no-int-config', dest='no_int_config',
+ parser.add_argument('--no-vswitch-access', dest='no_vswitch_access',
default=None,
action='store_true',
- help='Skip interfaces config on EXT service chain')
+ help='Skip vswitch configuration and retrieving of stats')
- parser.add_argument('--no-tor-access', dest='no_tor_access',
+ parser.add_argument('--vxlan', dest='vxlan',
default=None,
action='store_true',
- help='Skip TOR switch configuration and retrieving of stats')
+ help='Enable VxLan encapsulation')
- parser.add_argument('--no-vswitch-access', dest='no_vswitch_access',
+ parser.add_argument('--mpls', dest='mpls',
default=None,
action='store_true',
- help='Skip vswitch configuration and retrieving of stats')
+ help='Enable MPLS encapsulation')
parser.add_argument('--no-cleanup', dest='no_cleanup',
default=None,
@@ -383,6 +459,11 @@ def parse_opts_from_cli():
action='store_true',
help='Cleanup NFVbench resources (do not prompt)')
+ parser.add_argument('--restart', dest='restart',
+ default=None,
+ action='store_true',
+ help='Restart TRex server')
+
parser.add_argument('--json', dest='json',
action='store',
help='store results in json format file',
@@ -400,10 +481,15 @@ def parse_opts_from_cli():
action='store_true',
help='print the default config in yaml format (unedited)')
+ parser.add_argument('--show-pre-config', dest='show_pre_config',
+ default=None,
+ action='store_true',
+ help='print the config in json format (cfg file applied)')
+
parser.add_argument('--show-config', dest='show_config',
default=None,
action='store_true',
- help='print the running config in json format')
+ help='print the running config in json format (final)')
parser.add_argument('-ss', '--show-summary', dest='summary',
action='store',
@@ -434,10 +520,116 @@ def parse_opts_from_cli():
action='store',
help='Custom label for performance records')
+ parser.add_argument('--hypervisor', dest='hypervisor',
+ action='store',
+ metavar='<hypervisor name>',
+ help='Where chains must run ("compute", "az:", "az:compute")')
+
parser.add_argument('--l2-loopback', '--l2loopback', dest='l2_loopback',
action='store',
- metavar='<vlan>',
- help='Port to port or port to switch to port L2 loopback with VLAN id')
+ metavar='<vlan(s)|no-tag|true|false>',
+ help='Port to port or port to switch to port L2 loopback '
+ 'tagged with given VLAN id(s) or not (given \'no-tag\') '
+ '\'true\': use current vlans; \'false\': disable this mode.')
+
+ parser.add_argument('--i40e-mixed', dest='i40e_mixed',
+ action='store',
+ default=None,
+ metavar='<ignore,check,unbind>',
+ help='TRex behavior when dealing with a i40e network card driver'
+ ' [ https://trex-tgn.cisco.com/youtrack/issue/trex-528 ]')
+
+ parser.add_argument('--user-info', dest='user_info',
+ action='append',
+ metavar='<data>',
+ help='Custom data to be included as is '
+ 'in the json report config branch - '
+ ' example, pay attention! no space: '
+ '--user-info=\'{"status":"explore","description":'
+ '{"target":"lab","ok":true,"version":2020}}\' - '
+ 'this option may be repeated; given data will be merged.')
+
+ parser.add_argument('--vlan-tagging', dest='vlan_tagging',
+ type=bool_arg,
+ metavar='<boolean>',
+ action='store',
+ default=None,
+ help='Override the NFVbench \'vlan_tagging\' parameter')
+
+ parser.add_argument('--intf-speed', dest='intf_speed',
+ metavar='<speed>',
+ action='store',
+ default=None,
+ help='Override the NFVbench \'intf_speed\' '
+ 'parameter (e.g. 10Gbps, auto, 16.72Gbps)')
+
+ parser.add_argument('--cores', dest='cores',
+ type=int_arg,
+ metavar='<number>',
+ action='store',
+ default=None,
+ help='Override the T-Rex \'cores\' parameter')
+
+ parser.add_argument('--cache-size', dest='cache_size',
+ type=int_arg,
+ metavar='<size>',
+ action='store',
+ default=None,
+ help='Specify the FE cache size (default: 0, flow-count if < 0)')
+
+ parser.add_argument('--service-mode', dest='service_mode',
+ action='store_true',
+ default=None,
+ help='Enable T-Rex service mode (for debugging purpose)')
+
+ parser.add_argument('--no-e2e-check', dest='no_e2e_check',
+ action='store_true',
+ default=None,
+ help='Skip "end to end" connectivity check (on test purpose)')
+
+ parser.add_argument('--no-flow-stats', dest='no_flow_stats',
+ action='store_true',
+ default=None,
+ help='Disable additional flow stats (on high load traffic)')
+
+ parser.add_argument('--no-latency-stats', dest='no_latency_stats',
+ action='store_true',
+ default=None,
+ help='Disable flow stats for latency traffic')
+
+ parser.add_argument('--no-latency-streams', dest='no_latency_streams',
+ action='store_true',
+ default=None,
+ help='Disable latency measurements (no streams)')
+
+ parser.add_argument('--user-id', dest='user_id',
+ type=int_arg,
+ metavar='<uid>',
+ action='store',
+ default=None,
+ help='Change json/log files ownership with this user (int)')
+
+ parser.add_argument('--group-id', dest='group_id',
+ type=int_arg,
+ metavar='<gid>',
+ action='store',
+ default=None,
+ help='Change json/log files ownership with this group (int)')
+
+ parser.add_argument('--show-trex-log', dest='show_trex_log',
+ default=None,
+ action='store_true',
+ help='Show the current TRex local server log file contents'
+ ' => diagnostic/help in case of configuration problems')
+
+ parser.add_argument('--debug-mask', dest='debug_mask',
+ type=int_arg,
+ metavar='<mask>',
+ action='store',
+ default=None,
+ help='General purpose register (debugging flags), '
+ 'the hexadecimal notation (0x...) is accepted.'
+ 'Designed for development needs (default: 0).')
opts, unknown_opts = parser.parse_known_args()
return opts, unknown_opts
@@ -503,41 +695,62 @@ def main():
log.setup()
# load default config file
config, default_cfg = load_default_config()
+ # possibly override the default user_id & group_id values
+ if 'USER_ID' in os.environ:
+ config.user_id = int(os.environ['USER_ID'])
+ if 'GROUP_ID' in os.environ:
+ config.group_id = int(os.environ['GROUP_ID'])
+
# create factory for platform specific classes
try:
factory_module = importlib.import_module(config['factory_module'])
factory = getattr(factory_module, config['factory_class'])()
except AttributeError:
raise Exception("Requested factory module '{m}' or class '{c}' was not found."
- .format(m=config['factory_module'], c=config['factory_class']))
+ .format(m=config['factory_module'],
+ c=config['factory_class'])) from AttributeError
# create config plugin for this platform
config_plugin = factory.get_config_plugin_class()(config)
config = config_plugin.get_config()
- opts, unknown_opts = parse_opts_from_cli()
+ opts, unknown_opts = _parse_opts_from_cli()
log.set_level(debug=opts.debug)
if opts.version:
- print pbr.version.VersionInfo('nfvbench').version_string_with_vcs()
+ print((pbr.version.VersionInfo('nfvbench').version_string_with_vcs()))
sys.exit(0)
if opts.summary:
- with open(opts.summary) as json_data:
+ with open(opts.summary, encoding="utf-8") as json_data:
result = json.load(json_data)
if opts.user_label:
result['config']['user_label'] = opts.user_label
- print NFVBenchSummarizer(result, fluent_logger)
+ print((NFVBenchSummarizer(result, fluent_logger)))
sys.exit(0)
# show default config in text/yaml format
if opts.show_default_config:
- print default_cfg
+ print((default_cfg.decode("utf-8")))
+ sys.exit(0)
+
+ # dump the contents of the trex log file
+ if opts.show_trex_log:
+ try:
+ with open('/tmp/trex.log', encoding="utf-8") as trex_log_file:
+ print(trex_log_file.read(), end="")
+ except FileNotFoundError:
+ print("No TRex log file found!")
sys.exit(0)
+ # mask info logging in case of further config dump
+ if opts.show_config or opts.show_pre_config:
+ LOG.setLevel(log.logging.WARNING)
+
config.name = ''
if opts.config:
# do not check extra_specs in flavor as it can contain any key/value pairs
- whitelist_keys = ['extra_specs']
+ # the same principle applies also to the optional user_info open property
+ whitelist_keys = ['extra_specs', 'user_info']
# override default config options with start config at path parsed from CLI
# check if it is an inline yaml/json config or a file name
if os.path.isfile(opts.config):
@@ -548,6 +761,11 @@ def main():
LOG.info('Loading configuration string: %s', opts.config)
config = config_loads(opts.config, config, whitelist_keys)
+ # show current config in json format (before CLI overriding)
+ if opts.show_pre_config:
+ print((json.dumps(config, sort_keys=True, indent=4)))
+ sys.exit(0)
+
# setup the fluent logger as soon as possible right after the config plugin is called,
# if there is any logging or result tag is set then initialize the fluent logger
for fluentd in config.fluentd:
@@ -559,35 +777,124 @@ def main():
# traffic profile override options
override_custom_traffic(config, opts.frame_sizes, opts.unidir)
- # copy over cli options that are used in config
+ # Copy over some of the cli options that are used in config.
+ # This explicit copy is sometimes necessary
+ # because some early evaluation depends on them
+ # and cannot wait for _update_config() coming further.
+ # It is good practice then to set them to None (<=> done)
+ # and even required if a specific conversion is performed here
+ # that would be corrupted by a default update (simple copy).
+ # On the other hand, some excessive assignments have been removed
+ # from here, since the _update_config() procedure does them well.
+
config.generator_profile = opts.generator_profile
- if opts.sriov:
+ if opts.sriov is not None:
config.sriov = True
- if opts.log_file:
+ opts.sriov = None
+ if opts.log_file is not None:
config.log_file = opts.log_file
- if opts.service_chain:
+ opts.log_file = None
+ if opts.user_id is not None:
+ config.user_id = opts.user_id
+ opts.user_id = None
+ if opts.group_id is not None:
+ config.group_id = opts.group_id
+ opts.group_id = None
+ if opts.service_chain is not None:
config.service_chain = opts.service_chain
- if opts.service_chain_count:
- config.service_chain_count = opts.service_chain_count
- if opts.no_vswitch_access:
- config.no_vswitch_access = opts.no_vswitch_access
- if opts.no_int_config:
- config.no_int_config = opts.no_int_config
-
- if opts.l2_loopback:
- if config.service_chain != ChainType.EXT:
- LOG.info('Changing service chain type to EXT')
- config.service_chain = ChainType.EXT
- if not config.no_arp:
- LOG.info('Disabling ARP')
- config.no_arp = True
- config.vlans = [int(opts.l2_loopback), int(opts.l2_loopback)]
- LOG.info('Running L2 loopback: using EXT chain and no ARP')
-
- if opts.use_sriov_middle_net:
- if (not config.sriov) or (config.service_chain != ChainType.PVVP):
- raise Exception("--use-sriov-middle-net is only valid for PVVP with SRIOV")
- config.use_sriov_middle_net = True
+ opts.service_chain = None
+ if opts.hypervisor is not None:
+ # can be any of 'comp1', 'nova:', 'nova:comp1'
+ config.compute_nodes = opts.hypervisor
+ opts.hypervisor = None
+ if opts.debug_mask is not None:
+ config.debug_mask = opts.debug_mask
+ opts.debug_mask = None
+
+ # convert 'user_info' opt from json string to dictionnary
+ # and merge the result with the current config dictionnary
+ if opts.user_info is not None:
+ for user_info_json in opts.user_info:
+ user_info_dict = json.loads(user_info_json)
+ if config.user_info:
+ config.user_info = config.user_info + user_info_dict
+ else:
+ config.user_info = user_info_dict
+ opts.user_info = None
+
+ # port to port loopback (direct or through switch)
+ # we accept the following syntaxes for the CLI argument
+ # 'false' : mode not enabled
+ # 'true' : mode enabled with currently defined vlan IDs
+ # 'no-tag' : mode enabled with no vlan tagging
+ # <vlan IDs>: mode enabled using the given (pair of) vlan ID lists
+ # - If present, a '_' char will separate left an right ports lists
+ # e.g. 'a_x' => vlans: [[a],[x]]
+ # 'a,b,c_x,y,z' => [[a,b,c],[x,y,z]]
+ # - Otherwise the given vlan ID list applies to both sides
+ # e.g. 'a' => vlans: [[a],[a]]
+ # 'a,b' => [[a,b],[a,b]]
+ # - Vlan lists size needs to be at least the actual SCC value
+ # - Unless overriden in CLI opts, config.service_chain_count
+ # is adjusted to the size of the VLAN ID lists given here.
+
+ if opts.l2_loopback is not None:
+ arg_pair = opts.l2_loopback.lower().split('_')
+ if arg_pair[0] == 'false':
+ config.l2_loopback = False
+ else:
+ config.l2_loopback = True
+ if config.service_chain != ChainType.EXT:
+ LOG.info('Changing service chain type to EXT')
+ config.service_chain = ChainType.EXT
+ if not config.no_arp:
+ LOG.info('Disabling ARP')
+ config.no_arp = True
+ if arg_pair[0] == 'true':
+ pass
+ else:
+ # here explicit (not)tagging is not CLI overridable
+ opts.vlan_tagging = None
+ if arg_pair[0] == 'no-tag':
+ config.vlan_tagging = False
+ else:
+ config.vlan_tagging = True
+ if len(arg_pair) == 1 or not arg_pair[1]:
+ arg_pair = [arg_pair[0], arg_pair[0]]
+ vlans = [[], []]
+
+ def append_vlan(port, vlan_id):
+ # a vlan tag value must be in [0..4095]
+ if vlan_id not in range(0, 4096):
+ raise ValueError
+ vlans[port].append(vlan_id)
+ try:
+ for port in [0, 1]:
+ vlan_ids = arg_pair[port].split(',')
+ for vlan_id in vlan_ids:
+ append_vlan(port, int(vlan_id))
+ if len(vlans[0]) != len(vlans[1]):
+ raise ValueError
+ except ValueError:
+ # at least one invalid tag => no tagging
+ config.vlan_tagging = False
+ if config.vlan_tagging:
+ config.vlans = vlans
+ # force service chain count if not CLI overriden
+ if opts.service_chain_count is None:
+ config.service_chain_count = len(vlans[0])
+ opts.l2_loopback = None
+
+ if config.i40e_mixed is None:
+ config.i40e_mixed = 'ignore'
+ if config.use_sriov_middle_net is None:
+ config.use_sriov_middle_net = False
+ if opts.use_sriov_middle_net is not None:
+ config.use_sriov_middle_net = opts.use_sriov_middle_net
+ opts.use_sriov_middle_net = None
+ if (config.use_sriov_middle_net and (
+ (not config.sriov) or (config.service_chain != ChainType.PVVP))):
+ raise Exception("--use-sriov-middle-net is only valid for PVVP with SRIOV")
if config.sriov and config.service_chain != ChainType.EXT:
# if sriov is requested (does not apply to ext chains)
@@ -597,19 +904,6 @@ def main():
if config.service_chain == ChainType.PVVP and config.use_sriov_middle_net:
check_physnet("middle", config.internal_networks.middle)
- # show running config in json format
- if opts.show_config:
- print json.dumps(config, sort_keys=True, indent=4)
- sys.exit(0)
-
- # check that an empty openrc file (no OpenStack) is only allowed
- # with EXT chain
- if not config.openrc_file:
- if config.service_chain == ChainType.EXT:
- LOG.info('EXT chain with OpenStack mode disabled')
- else:
- raise Exception("openrc_file is empty in the configuration and is required")
-
# update the config in the config plugin as it might have changed
# in a copy of the dict (config plugin still holds the original dict)
config_plugin.set_config(config)
@@ -620,6 +914,13 @@ def main():
# add file log if requested
if config.log_file:
log.add_file_logger(config.log_file)
+ # possibly change file ownership
+ uid = config.user_id
+ gid = config.group_id
+ if gid is None:
+ gid = uid
+ if uid is not None:
+ os.chown(config.log_file, uid, gid)
openstack_spec = config_plugin.get_openstack_spec() if config.openrc_file \
else None
@@ -627,19 +928,16 @@ def main():
nfvbench_instance = NFVBench(config, openstack_spec, config_plugin, factory)
if opts.server:
- if os.path.isdir(opts.server):
- server = WebSocketIoServer(opts.server, nfvbench_instance, fluent_logger)
- nfvbench_instance.set_notifier(server)
- try:
- port = int(opts.port)
- except ValueError:
- server.run(host=opts.host)
- else:
- server.run(host=opts.host, port=port)
+ server = WebServer(nfvbench_instance, fluent_logger)
+ try:
+ port = int(opts.port)
+ except ValueError:
+ server.run(host=opts.host)
else:
- print 'Invalid HTTP root directory: ' + opts.server
- sys.exit(1)
+ server.run(host=opts.host, port=port)
+ # server.run() should never return
else:
+ dry_run = opts.show_config
with utils.RunLock():
run_summary_required = True
if unknown_opts:
@@ -648,10 +946,10 @@ def main():
raise Exception(err_msg)
# remove unfilled values
- opts = {k: v for k, v in vars(opts).iteritems() if v is not None}
+ opts = {k: v for k, v in list(vars(opts).items()) if v is not None}
# get CLI args
params = ' '.join(str(e) for e in sys.argv[1:])
- result = nfvbench_instance.run(opts, params)
+ result = nfvbench_instance.run(opts, params, dry_run=dry_run)
if 'error_message' in result:
raise Exception(result['error_message'])
@@ -664,7 +962,7 @@ def main():
'status': NFVBench.STATUS_ERROR,
'error_message': traceback.format_exc()
})
- print str(exc)
+ print((str(exc)))
finally:
if fluent_logger:
# only send a summary record if there was an actual nfvbench run or
diff --git a/nfvbench/nfvbenchd.py b/nfvbench/nfvbenchd.py
index fa781af..07f1eea 100644
--- a/nfvbench/nfvbenchd.py
+++ b/nfvbench/nfvbenchd.py
@@ -15,38 +15,31 @@
#
import json
-import Queue
+import queue
+from threading import Thread
import uuid
from flask import Flask
from flask import jsonify
-from flask import render_template
from flask import request
-from flask_socketio import emit
-from flask_socketio import SocketIO
-from summarizer import NFVBenchSummarizer
+from .summarizer import NFVBenchSummarizer
-from log import LOG
-from utils import byteify
-from utils import RunLock
+from .log import LOG
+from .utils import RunLock
-# this global cannot reside in Ctx because of the @app and @socketio decorators
-app = None
-socketio = None
+from .__init__ import __version__
STATUS_OK = 'OK'
STATUS_ERROR = 'ERROR'
STATUS_PENDING = 'PENDING'
STATUS_NOT_FOUND = 'NOT_FOUND'
-
def result_json(status, message, request_id=None):
body = {
'status': status,
'error_message': message
}
-
if request_id is not None:
body['request_id'] = request_id
@@ -54,7 +47,7 @@ def result_json(status, message, request_id=None):
def load_json(data):
- return json.loads(json.dumps(data), object_hook=byteify)
+ return json.loads(json.dumps(data))
def get_uuid():
@@ -63,18 +56,16 @@ def get_uuid():
class Ctx(object):
MAXLEN = 5
- run_queue = Queue.Queue()
+ run_queue = queue.Queue()
busy = False
result = None
- request_from_socketio = False
results = {}
ids = []
current_id = None
@staticmethod
- def enqueue(config, request_id, from_socketio=False):
+ def enqueue(config, request_id):
Ctx.busy = True
- Ctx.request_from_socketio = from_socketio
config['request_id'] = request_id
Ctx.run_queue.put(config)
@@ -109,16 +100,15 @@ class Ctx(object):
res = Ctx.results[request_id]
except KeyError:
return None
-
+ # pylint: disable=unsubscriptable-object
if Ctx.result and request_id == Ctx.result['request_id']:
Ctx.result = None
-
- return res
- else:
- res = Ctx.result
- if res:
- Ctx.result = None
return res
+ # pylint: enable=unsubscriptable-object
+ res = Ctx.result
+ if res:
+ Ctx.result = None
+ return res
@staticmethod
def is_busy():
@@ -129,40 +119,18 @@ class Ctx(object):
return Ctx.current_id
-def setup_flask(root_path):
- global socketio
- global app
+def setup_flask():
app = Flask(__name__)
- app.root_path = root_path
- socketio = SocketIO(app, async_mode='threading')
busy_json = result_json(STATUS_ERROR, 'there is already an NFVbench request running')
not_busy_json = result_json(STATUS_ERROR, 'no pending NFVbench run')
not_found_msg = 'results not found'
pending_msg = 'NFVbench run still pending'
- # --------- socketio requests ------------
-
- @socketio.on('start_run')
- def _socketio_start_run(config):
- if not Ctx.is_busy():
- Ctx.enqueue(config, get_uuid(), from_socketio=True)
- else:
- emit('error', {'reason': 'there is already an NFVbench request running'})
-
- @socketio.on('echo')
- def _socketio_echo(config):
- emit('echo', config)
-
# --------- HTTP requests ------------
- @app.route('/')
- def _index():
- return render_template('index.html')
-
- @app.route('/echo', methods=['GET'])
- def _echo():
- config = request.json
- return jsonify(config)
+ @app.route('/version', methods=['GET'])
+ def _version():
+ return __version__
@app.route('/start_run', methods=['POST'])
def _start_run():
@@ -189,35 +157,34 @@ def setup_flask(root_path):
return jsonify(res)
# result for given request_id not found
return jsonify(result_json(STATUS_NOT_FOUND, not_found_msg, request_id))
- else:
- if Ctx.is_busy():
- # task still pending, return with request_id
- return jsonify(result_json(STATUS_PENDING,
- pending_msg,
- Ctx.get_current_request_id()))
-
- res = Ctx.get_result()
- if res:
- return jsonify(res)
- return jsonify(not_busy_json)
+ if Ctx.is_busy():
+ # task still pending, return with request_id
+ return jsonify(result_json(STATUS_PENDING,
+ pending_msg,
+ Ctx.get_current_request_id()))
+ res = Ctx.get_result()
+ if res:
+ return jsonify(res)
+ return jsonify(not_busy_json)
-class WebSocketIoServer(object):
- """This class takes care of the web socketio server, accepts websocket events, and sends back
- notifications using websocket events (send_ methods). Caller should simply create an instance
+ return app
+
+class WebServer(object):
+ """This class takes care of the web server. Caller should simply create an instance
of this class and pass a runner object then invoke the run method
"""
- def __init__(self, http_root, runner, fluent_logger):
+ def __init__(self, runner, fluent_logger):
self.nfvbench_runner = runner
- setup_flask(http_root)
+ self.app = setup_flask()
self.fluent_logger = fluent_logger
- def run(self, host='127.0.0.1', port=7556):
+ def run(self, host, port):
- # socketio.run will not return so we need to run it in a background thread so that
+ # app.run will not return so we need to run it in a background thread so that
# the calling thread (main thread) can keep doing work
- socketio.start_background_task(target=socketio.run, app=app, host=host, port=port)
+ Thread(target=self.app.run, args=(host, port)).start()
# wait for run requests
# the runner must be executed from the main thread (Trex client library requirement)
@@ -229,7 +196,7 @@ class WebSocketIoServer(object):
# print config
try:
# remove unfilled values as we do not want them to override default values with None
- config = {k: v for k, v in config.items() if v is not None}
+ config = {k: v for k, v in list(config.items()) if v is not None}
with RunLock():
if self.fluent_logger:
self.fluent_logger.start_new_run()
@@ -238,14 +205,13 @@ class WebSocketIoServer(object):
results = result_json(STATUS_ERROR, str(exc))
LOG.exception('NFVbench runner exception:')
- if Ctx.request_from_socketio:
- socketio.emit('run_end', results)
- else:
- # this might overwrite a previously unfetched result
- Ctx.set_result(results)
+ # this might overwrite a previously unfetched result
+ Ctx.set_result(results)
try:
summary = NFVBenchSummarizer(results['result'], self.fluent_logger)
LOG.info(str(summary))
+ if 'json' in config and 'result' in results and results['status']:
+ self.nfvbench_runner.save(results['result'])
except KeyError:
# in case of error, 'result' might be missing
if 'error_message' in results:
@@ -255,13 +221,3 @@ class WebSocketIoServer(object):
Ctx.release()
if self.fluent_logger:
self.fluent_logger.send_run_summary(True)
-
- def send_interval_stats(self, time_ms, tx_pps, rx_pps, drop_pct):
- stats = {'time_ms': time_ms, 'tx_pps': tx_pps, 'rx_pps': rx_pps, 'drop_pct': drop_pct}
- socketio.emit('run_interval_stats', stats)
-
- def send_ndr_found(self, ndr_pps):
- socketio.emit('ndr_found', {'rate_pps': ndr_pps})
-
- def send_pdr_found(self, pdr_pps):
- socketio.emit('pdr_found', {'rate_pps': pdr_pps})
diff --git a/nfvbench/nfvbenchvm/nfvbenchvm.conf b/nfvbench/nfvbenchvm/nfvbenchvm.conf
index 3bc6ace..8f5e7e9 100644
--- a/nfvbench/nfvbenchvm/nfvbenchvm.conf
+++ b/nfvbench/nfvbenchvm/nfvbenchvm.conf
@@ -9,3 +9,8 @@ TG_NET1={tg_net1}
TG_NET2={tg_net2}
TG_GATEWAY1_IP={tg_gateway1_ip}
TG_GATEWAY2_IP={tg_gateway2_ip}
+VIF_MQ_SIZE={vif_mq_size}
+NUM_MBUFS={num_mbufs}
+INTF_MGMT_CIDR={intf_mgmt_cidr}
+INTF_MGMT_IP_GW={intf_mgmt_ip_gw}
+INTF_MAC_MGMT={intf_mac_mgmt} \ No newline at end of file
diff --git a/nfvbench/packet_analyzer.py b/nfvbench/packet_analyzer.py
deleted file mode 100644
index 5d72bc9..0000000
--- a/nfvbench/packet_analyzer.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from collections import OrderedDict
-from log import LOG
-
-
-class PacketAnalyzer(object):
- """Analyze packet drop counter in a chain"""
-
- def __init__(self):
- self.last_packet_count = 0
- self.chain = []
-
- def record(self, interface, traffic_type):
- """Records the counter of the next interface with the corresponding traffic type"""
- if interface.is_no_op():
- return
- packet_count = interface.get_packet_count(traffic_type)
- packet_drop_count = self.last_packet_count - packet_count
- path_data = OrderedDict()
- path_data['interface'] = interface.name
- path_data['device'] = interface.device
- path_data['packet_count'] = packet_count
-
- if self.chain:
- path_data['packet_drop_count'] = packet_drop_count
-
- self.chain.append(path_data)
- self.last_packet_count = packet_count
-
- def get_analysis(self):
- """Gets the analysis of packet drops"""
- transmitted_packets = self.chain[0]['packet_count']
-
- for (index, path_data) in enumerate(self.chain):
- LOG.info('[Packet Analyze] Interface: %s', path_data['interface'])
- LOG.info('[Packet Analyze] > Count: %d', path_data['packet_count'])
-
- if index:
- if transmitted_packets:
- self.chain[index]['packet_drop_percentage'] = \
- 100.0 * path_data['packet_drop_count'] / transmitted_packets
- else:
- self.chain[index]['packet_drop_percentage'] = float('nan')
- LOG.info('[Packet Analyze] > Packet Drops: %d',
- path_data['packet_drop_count'])
- LOG.info('[Packet Analyze] > Percentage: %s',
- path_data['packet_drop_percentage'])
-
- return self.chain
diff --git a/nfvbench/packet_stats.py b/nfvbench/packet_stats.py
new file mode 100644
index 0000000..d3ec78a
--- /dev/null
+++ b/nfvbench/packet_stats.py
@@ -0,0 +1,341 @@
+# Copyright 2018 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""Manage all classes related to counting packet stats.
+
+InterfaceStats counts RX/TX packet counters for one interface.
+PacketPathStats manages all InterfaceStats instances for a given chain.
+PacketPathStatsManager manages all packet path stats for all chains.
+"""
+
+import copy
+
+from hdrh.histogram import HdrHistogram
+from .traffic_gen.traffic_base import Latency
+
+class InterfaceStats(object):
+ """A class to hold the RX and TX counters for a virtual or physical interface.
+
+ An interface stats instance can represent a real interface (e.g. traffic gen port or
+ vhost interface) or can represent an aggegation of multiple interfaces when packets
+ are faned out (e.g. one vlan subinterface can fan out to multiple vhost interfaces
+ in the case of multi-chaining and when the network is shared across chains).
+ """
+
+ TX = 0
+ RX = 1
+
+ def __init__(self, name, device, shared=False):
+ """Create a new interface instance.
+
+ name: interface name specific to each chain (e.g. "trex port 0 chain 0")
+ device: on which device this interface resides (e.g. "trex server")
+ fetch_tx_rx: a fetch method that takes name, chain_index and returns a (tx, rx) tuple
+ shared: if true this interface stats is shared across all chains
+ """
+ self.name = name
+ self.device = device
+ self.shared = shared
+ # RX and TX counters for this interface
+ # A None value can be set to mean that the data is not available
+ self.tx = 0
+ self.rx = 0
+ # This is a special field to hold an optional total rx count that is only
+ # used for column aggregation to compute a total intertface stats
+ # Set to non zero to be picked by the add interface stats method for rx total
+ self.rx_total = None
+
+ def get_packet_count(self, direction):
+ """Get packet count for given direction.
+
+ direction: InterfaceStats.TX or InterfaceStats.RX
+ """
+ return self.tx if direction == InterfaceStats.TX else self.rx
+
+ @staticmethod
+ def get_reverse_direction(direction):
+ """Get the reverse direction of a given direction.
+
+ direction: InterfaceStats.TX or InterfaceStats.RX
+ return: RX if TX given, or TX is RX given
+ """
+ return 1 - direction
+
+ @staticmethod
+ def get_direction_name(direction):
+ """Get the rdisplay name of a given direction.
+
+ direction: InterfaceStats.TX or InterfaceStats.RX
+ return: "TX" or "RX"
+ """
+ if direction == InterfaceStats.TX:
+ return 'TX'
+ return 'RX'
+
+ def add_if_stats(self, if_stats):
+ """Add another ifstats to this instance."""
+ def added_counter(old_value, new_value_to_add):
+ if new_value_to_add:
+ if old_value is None:
+ return new_value_to_add
+ return old_value + new_value_to_add
+ return old_value
+
+ self.tx = added_counter(self.tx, if_stats.tx)
+ self.rx = added_counter(self.rx, if_stats.rx)
+ # Add special rx total value if set
+ self.rx = added_counter(self.rx, if_stats.rx_total)
+
+ def update_stats(self, tx, rx, diff):
+ """Update stats for this interface.
+
+ tx: new TX packet count
+ rx: new RX packet count
+ diff: if True, perform a diff of new value with previous baselined value,
+ otherwise store the new value
+ """
+ if diff:
+ self.tx = tx - self.tx
+ self.rx = rx - self.rx
+ else:
+ self.tx = tx
+ self.rx = rx
+
+ def get_display_name(self, dir, name=None, aggregate=False):
+ """Get the name to use to display stats for this interface stats.
+
+ dir: direction InterfaceStats.TX or InterfaceStats.RX
+ name: override self.name
+ aggregate: true if this is for an aggregate of multiple chains
+ """
+ if name is None:
+ name = self.name
+ return self.device + '.' + InterfaceStats.get_direction_name(dir) + '.' + name
+
+
+class PacketPathStats(object):
+ """Manage the packet path stats for 1 chain in both directions.
+
+ A packet path stats instance manages an ordered list of InterfaceStats objects
+ that can be traversed in the forward and reverse direction to display packet
+ counters in each direction.
+ The requirement is that RX and TX counters must always alternate as we travel
+ along one direction. For example with 4 interfaces per chain:
+ [ifstat0, ifstat1, ifstat2, ifstat3]
+ Packet counters in the forward direction are:
+ [ifstat0.TX, ifstat1.RX, ifstat2.TX, ifstat3.RX]
+ Packet counters in the reverse direction are:
+ [ifstat3.TX, ifstat2.RX, ifstat1.TX, ifstat0.RX]
+
+ A packet path stats also carries the latency data for each direction of the
+ chain.
+ """
+
+ def __init__(self, config, if_stats, aggregate=False):
+ """Create a packet path stats intance with the list of associated if stats.
+
+ if_stats: a list of interface stats that compose this packet path stats
+ aggregate: True if this is an aggregate packet path stats
+
+ Aggregate packet path stats are the only one that should show counters for shared
+ interface stats
+ """
+ self.config = config
+ self.if_stats = if_stats
+ # latency for packets sent from port 0 and 1
+ self.latencies = [Latency(), Latency()]
+ self.aggregate = aggregate
+
+
+ def add_packet_path_stats(self, pps):
+ """Add another packet path stat to this instance.
+
+ pps: the other packet path stats to add to this instance
+
+ This is used only for aggregating/collapsing multiple pps into 1
+ to form a "total" pps
+ """
+ for index, ifstats in enumerate(self.if_stats):
+ # shared interface stats must not be self added
+ if not ifstats.shared:
+ ifstats.add_if_stats(pps.if_stats[index])
+
+ @staticmethod
+ def get_agg_packet_path_stats(config, pps_list):
+ """Get the aggregated packet path stats from a list of packet path stats.
+
+ Interface counters are added, latency stats are updated.
+ """
+ agg_pps = None
+ for pps in pps_list:
+ if agg_pps is None:
+ # Get a clone of the first in the list
+ agg_pps = PacketPathStats(config, pps.get_cloned_if_stats(), aggregate=True)
+ else:
+ agg_pps.add_packet_path_stats(pps)
+ # aggregate all latencies
+ agg_pps.latencies = [Latency([pps.latencies[port] for pps in pps_list])
+ for port in [0, 1]]
+ return agg_pps
+
+ def get_if_stats(self, reverse=False):
+ """Get interface stats for given direction.
+
+ reverse: if True, get the list of interface stats in the reverse direction
+ else (default) gets the ist in the forward direction.
+ return: the list of interface stats indexed by the chain index
+ """
+ return self.if_stats[::-1] if reverse else self.if_stats
+
+ def get_cloned_if_stats(self):
+ """Get a clone copy of the interface stats list."""
+ return [copy.copy(ifstat) for ifstat in self.if_stats]
+
+
+ def get_header_labels(self, reverse=False, aggregate=False):
+ """Get the list of header labels for this packet path stats."""
+ labels = []
+ dir = InterfaceStats.TX
+ for ifstat in self.get_if_stats(reverse):
+ # starts at TX then RX then TX again etc...
+ labels.append(ifstat.get_display_name(dir, aggregate=aggregate))
+ dir = InterfaceStats.get_reverse_direction(dir)
+ return labels
+
+ def get_stats(self, reverse=False):
+ """Get the list of packet counters and latency data for this packet path stats.
+
+ return: a dict of packet counters and latency stats
+
+ {'packets': [2000054, 1999996, 1999996],
+ 'min_usec': 10, 'max_usec': 187, 'avg_usec': 45},
+ """
+ counters = []
+ dir = InterfaceStats.TX
+ for ifstat in self.get_if_stats(reverse):
+ # starts at TX then RX then TX again etc...
+ if ifstat.shared and not self.aggregate:
+ # shared if stats countesr are only shown in aggregate pps
+ counters.append('')
+ else:
+ counters.append(ifstat.get_packet_count(dir))
+ dir = InterfaceStats.get_reverse_direction(dir)
+
+ # latency: use port 0 latency for forward, port 1 latency for reverse
+ latency = self.latencies[1] if reverse else self.latencies[0]
+
+ if latency.available():
+ results = {'lat_min_usec': latency.min_usec,
+ 'lat_max_usec': latency.max_usec,
+ 'lat_avg_usec': latency.avg_usec}
+ if latency.hdrh_available():
+ results['hdrh'] = latency.hdrh
+ decoded_histogram = HdrHistogram.decode(latency.hdrh)
+ results['lat_percentile'] = {}
+ # override min max and avg from hdrh (only if histogram is valid)
+ if decoded_histogram.get_total_count() != 0:
+ results['lat_min_usec'] = decoded_histogram.get_min_value()
+ results['lat_max_usec'] = decoded_histogram.get_max_value()
+ results['lat_avg_usec'] = decoded_histogram.get_mean_value()
+ for percentile in self.config.lat_percentiles:
+ results['lat_percentile'][percentile] = decoded_histogram.\
+ get_value_at_percentile(percentile)
+ else:
+ for percentile in self.config.lat_percentiles:
+ results['lat_percentile'][percentile] = 'n/a'
+ else:
+ results = {}
+ results['packets'] = counters
+ return results
+
+
+class PacketPathStatsManager(object):
+ """Manages all the packet path stats for all chains.
+
+ Each run will generate packet path stats for 1 or more chains.
+ """
+
+ def __init__(self, config, pps_list):
+ """Create a packet path stats intance with the list of associated if stats.
+
+ pps_list: a list of packet path stats indexed by the chain id.
+ All packet path stats must have the same length.
+ """
+ self.config = config
+ self.pps_list = pps_list
+
+ def insert_pps_list(self, chain_index, if_stats):
+ """Insert a list of interface stats for given chain right after the first in the list.
+
+ chain_index: index of chain where to insert
+ if_stats: list of interface stats to insert
+ """
+ # use slicing to insert the list
+ self.pps_list[chain_index].if_stats[1:1] = if_stats
+
+ def _get_if_agg_name(self, reverse):
+ """Get the aggegated name for all interface stats across all pps.
+
+ return: a list of aggregated names for each position of the chain for all chains
+
+ The agregated name is the interface stats name if there is only 1 chain.
+ Otherwise it is the common prefix for all interface stats names at same position in the
+ chain.
+ """
+ # if there is only one chain, use the if_stats names directly
+ return self.pps_list[0].get_header_labels(reverse, aggregate=(len(self.pps_list) > 1))
+
+ def _get_results(self, reverse=False):
+ """Get the digested stats for the forward or reverse directions.
+
+ return: a dict with all the labels, total and per chain counters
+ """
+ chains = {}
+ # insert the aggregated row if applicable
+ if len(self.pps_list) > 1:
+ agg_pps = PacketPathStats.get_agg_packet_path_stats(self.config, self.pps_list)
+ chains['total'] = agg_pps.get_stats(reverse)
+
+ for index, pps in enumerate(self.pps_list):
+ chains[str(index)] = pps.get_stats(reverse)
+ return {'interfaces': self._get_if_agg_name(reverse),
+ 'chains': chains}
+
+ def get_results(self):
+ """Get the digested stats for the forward and reverse directions.
+
+ return: a dictionary of results for each direction and each chain
+
+ Example:
+
+ {
+ 'Forward': {
+ 'interfaces': ['Port0', 'vhost0', 'Port1'],
+ 'chains': {
+ '0': {'packets': [2000054, 1999996, 1999996],
+ 'min_usec': 10,
+ 'max_usec': 187,
+ 'avg_usec': 45},
+ '1': {...},
+ 'total': {...}
+ }
+ },
+ 'Reverse': {...
+ }
+ }
+
+ """
+ results = {'Forward': self._get_results(),
+ 'Reverse': self._get_results(reverse=True)}
+ return results
diff --git a/nfvbench/service_chain.py b/nfvbench/service_chain.py
deleted file mode 100644
index 7ec1511..0000000
--- a/nfvbench/service_chain.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from collections import OrderedDict
-import time
-
-from chain_managers import StageManager
-from log import LOG
-from specs import ChainType
-
-
-class ServiceChain(object):
-
- def __init__(self, config, clients, cred, specs, factory, notifier=None):
- self.config = config
- self.clients = clients
- self.cred = cred
- self.specs = specs
- self.factory = factory
- self.notifier = notifier
- self.chain_name = self.config.service_chain
- self.vlans = None
- self.stage_manager = None
- self.stats_manager = None
- LOG.info('ServiceChain initialized.')
-
- def __set_helpers(self):
- self.stage_manager = StageManager(self.config, self.cred, self.factory)
- self.clients['vm'] = self.stage_manager
- self.vlans = self.stage_manager.get_vlans()
-
- STATS_CLASS = self.factory.get_stats_class(self.config.service_chain)
- self.stats_manager = STATS_CLASS(self.config,
- self.clients,
- self.specs,
- self.factory,
- self.vlans,
- self.notifier)
-
- def __set_vlan_tags(self):
- if self.config.vlan_tagging:
- # override with user-specified vlans if configured
- vlans = self.config.vlans if self.config.vlans else self.vlans[:2]
- for vlan, device in zip(vlans, self.config.generator_config.devices):
- self.stats_manager.set_vlan_tag(device, vlan)
-
- def __get_result_per_frame_size(self, frame_size, actual_frame_size, bidirectional):
- start_time = time.time()
- traffic_result = {
- frame_size: {}
- }
- result = {}
- if not self.config.no_traffic:
- self.clients['traffic'].set_traffic(actual_frame_size, bidirectional)
-
- if self.config.single_run:
- result = self.stats_manager.run()
- else:
- results = self.clients['traffic'].get_ndr_and_pdr()
-
- for dr in ['pdr', 'ndr']:
- if dr in results:
- if frame_size != actual_frame_size:
- results[dr]['l2frame_size'] = frame_size
- results[dr]['actual_l2frame_size'] = actual_frame_size
- traffic_result[frame_size][dr] = results[dr]
- if 'warning' in results[dr]['stats'] and results[dr]['stats']['warning']:
- traffic_result['warning'] = results[dr]['stats']['warning']
- traffic_result[frame_size]['iteration_stats'] = results['iteration_stats']
-
- result['analysis_duration_sec'] = time.time() - start_time
- if self.config.single_run:
- result['run_config'] = self.clients['traffic'].get_run_config(result)
- required = result['run_config']['direction-total']['orig']['rate_pps']
- actual = result['stats']['total_tx_rate']
- if frame_size != actual_frame_size:
- result['actual_l2frame_size'] = actual_frame_size
- warning = self.clients['traffic'].compare_tx_rates(required, actual)
- if warning is not None:
- result['run_config']['warning'] = warning
-
- traffic_result[frame_size].update(result)
- return traffic_result
-
- def __get_chain_result(self):
- result = OrderedDict()
- for fs, actual_fs in zip(self.config.frame_sizes, self.config.actual_frame_sizes):
- result.update(self.__get_result_per_frame_size(fs,
- actual_fs,
- self.config.traffic.bidirectional))
-
- chain_result = {
- 'flow_count': self.config.flow_count,
- 'service_chain_count': self.config.service_chain_count,
- 'bidirectional': self.config.traffic.bidirectional,
- 'profile': self.config.traffic.profile,
- 'compute_nodes': self.stats_manager.get_compute_nodes_bios(),
- 'result': result
- }
-
- return chain_result
-
- def __setup_traffic(self):
- self.clients['traffic'].setup()
- if not self.config.no_traffic:
- if self.config.service_chain == ChainType.EXT and not self.config.no_arp:
- self.clients['traffic'].ensure_arp_successful()
- self.clients['traffic'].ensure_end_to_end()
-
- def run(self):
- LOG.info('Starting %s chain...', self.chain_name)
- LOG.info('Dry run: %s', self.config.no_traffic)
- results = {}
-
- self.__set_helpers()
- self.__set_vlan_tags()
- self.stage_manager.set_vm_macs()
- self.__setup_traffic()
- results[self.chain_name] = {'result': self.__get_chain_result()}
-
- if self.config.service_chain == ChainType.PVVP:
- results[self.chain_name]['mode'] = 'inter-node' \
- if self.config.inter_node else 'intra-node'
-
- LOG.info("Service chain '%s' run completed.", self.chain_name)
- return results
-
- def get_version(self):
- return self.stats_manager.get_version()
-
- def close(self):
- if self.stage_manager:
- self.stage_manager.close()
- if self.stats_manager:
- self.stats_manager.close()
diff --git a/nfvbench/specs.py b/nfvbench/specs.py
index a84a55f..ec5e24e 100644
--- a/nfvbench/specs.py
+++ b/nfvbench/specs.py
@@ -17,11 +17,14 @@
class Encaps(object):
VLAN = "VLAN"
VxLAN = "VxLAN"
- BASIC = "BASIC"
+ MPLS = "MPLS"
+ NO_ENCAPS = "NONE"
encaps_mapping = {
'VLAN': VLAN,
- 'VXLAN': VxLAN
+ 'VXLAN': VxLAN,
+ 'MPLS': MPLS,
+ 'NONE': NO_ENCAPS
}
@classmethod
@@ -33,22 +36,13 @@ class ChainType(object):
PVP = "PVP"
PVVP = "PVVP"
EXT = "EXT"
-
- chain_mapping = {
- 'PVP': PVP,
- 'PVVP': PVVP,
- 'EXT': EXT
- }
-
- @classmethod
- def get_chain_type(cls, chain):
- return cls.chain_mapping.get(chain.upper(), None)
+ names = [EXT, PVP, PVVP]
class OpenStackSpec(object):
def __init__(self):
self.__vswitch = "BASIC"
- self.__encaps = Encaps.BASIC
+ self.__encaps = Encaps.NO_ENCAPS
@property
def vswitch(self):
diff --git a/nfvbench/stats_collector.py b/nfvbench/stats_collector.py
index 964d704..dc750db 100644
--- a/nfvbench/stats_collector.py
+++ b/nfvbench/stats_collector.py
@@ -56,9 +56,7 @@ class IntervalCollector(StatsCollector):
self.notifier = notifier
def add(self, stats):
- if self.notifier:
- current_stats = self.__compute_tx_rx_diff(stats)
- self.notifier.send_interval_stats(**current_stats)
+ pass
def reset(self):
# don't reset time!
@@ -66,52 +64,7 @@ class IntervalCollector(StatsCollector):
self.last_tx_pkts = 0
def add_ndr_pdr(self, tag, stats):
- if self.notifier:
-
- current_time = self._get_current_time_diff()
- rx_pps = self._get_rx_pps(stats['tx_pps'], stats['drop_percentage'])
-
- self.last_tx_pkts = stats['tx_pps'] / 1000 * (current_time - self.last_time)
- self.last_rx_pkts = rx_pps / 1000 * (current_time - self.last_time)
- self.last_time = current_time
-
- # 'drop_pct' key is an unfortunate name, since in iteration stats it means
- # number of the packets. More suitable would be 'drop_percentage'.
- # FDS frontend depends on this key
- current_stats = {
- '{}_pps'.format(tag): stats['tx_pps'],
- 'tx_pps': stats['tx_pps'],
- 'rx_pps': rx_pps,
- 'drop_pct': stats['drop_percentage'],
- 'time_ms': current_time
- }
-
- self.notifier.send_interval_stats(time_ms=current_stats['time_ms'],
- tx_pps=current_stats['tx_pps'],
- rx_pps=current_stats['rx_pps'],
- drop_pct=current_stats['drop_pct'])
- if tag == 'ndr':
- self.notifier.send_ndr_found(stats['tx_pps'])
- else:
- self.notifier.send_pdr_found(stats['tx_pps'])
-
- def __compute_tx_rx_diff(self, stats):
- current_time = self._get_current_time_diff()
- tx_diff = stats['overall']['tx']['total_pkts'] - self.last_tx_pkts
- tx_pps = (tx_diff * 1000) / (current_time - self.last_time)
- rx_diff = stats['overall']['rx']['total_pkts'] - self.last_rx_pkts
- rx_pps = (rx_diff * 1000) / (current_time - self.last_time)
-
- self.last_rx_pkts = stats['overall']['rx']['total_pkts']
- self.last_tx_pkts = stats['overall']['tx']['total_pkts']
- self.last_time = current_time
-
- return {
- 'tx_pps': tx_pps,
- 'rx_pps': rx_pps,
- 'drop_pct': max(0.0, (1 - (float(rx_pps) / tx_pps)) * 100),
- 'time_ms': current_time
- }
+ pass
class IterationCollector(StatsCollector):
diff --git a/nfvbench/stats_manager.py b/nfvbench/stats_manager.py
new file mode 100644
index 0000000..6fa98bd
--- /dev/null
+++ b/nfvbench/stats_manager.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# Copyright 2016 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+import time
+
+from .log import LOG
+from .packet_stats import PacketPathStatsManager
+from .stats_collector import IntervalCollector
+
+
+class StatsManager(object):
+ """A class to collect detailed stats and handle fixed rate runs for all chain types."""
+
+ def __init__(self, chain_runner):
+ self.chain_runner = chain_runner
+ self.config = chain_runner.config
+ self.traffic_client = chain_runner.traffic_client
+ self.specs = chain_runner.specs
+ self.notifier = chain_runner.notifier
+ self.interval_collector = None
+ self.factory = chain_runner.factory
+ # create a packet path stats manager for fixed rate runs only
+ if self.config.single_run:
+ pps_list = []
+ self.traffic_client.insert_interface_stats(pps_list)
+ self.pps_mgr = PacketPathStatsManager(self.config, pps_list)
+ else:
+ self.pps_mgr = None
+ self.worker = None
+
+ def create_worker(self):
+ """Create a worker to fetch custom data.
+
+ This is done late as we need to know the dest MAC for all VNFs, which can happen
+ as late as after ARP discovery.
+ """
+ if not self.worker and self.specs.openstack:
+ WORKER_CLASS = self.factory.get_chain_worker(self.specs.openstack.encaps,
+ self.config.service_chain)
+ self.worker = WORKER_CLASS(self)
+
+ def _generate_traffic(self):
+ if self.config.no_traffic:
+ return {}
+
+ self.interval_collector = IntervalCollector(time.time())
+ self.interval_collector.attach_notifier(self.notifier)
+ LOG.info('Starting to generate traffic...')
+ stats = {}
+ for stats in self.traffic_client.run_traffic():
+ self.interval_collector.add(stats)
+
+ LOG.info('...traffic generating ended.')
+ return stats
+
+ def get_stats(self):
+ return self.interval_collector.get() if self.interval_collector else []
+
+ def get_version(self):
+ return self.worker.get_version() if self.worker else {}
+
+ def _update_interface_stats(self, diff=False):
+ """Update interface stats for both the traffic generator and the worker."""
+ self.traffic_client.update_interface_stats(diff)
+ if self.worker:
+ self.worker.update_interface_stats(diff)
+
+ def run_fixed_rate(self):
+ """Run a fixed rate and analyze results."""
+ # Baseline the packet path stats
+ self._update_interface_stats()
+
+ in_flight_stats = self._generate_traffic()
+ result = {
+ 'stats': in_flight_stats
+ }
+ # New analysis code with packet path stats
+ # Diff all interface stats and return packet path stats analysis
+ # Diff the packet path stats
+ self._update_interface_stats(diff=True)
+ result['packet_path_stats'] = self.pps_mgr.get_results()
+ return result
+
+ def get_compute_nodes_bios(self):
+ return self.worker.get_compute_nodes_bios() if self.worker else {}
+
+ def close(self):
+ if self.worker:
+ self.worker.close()
diff --git a/nfvbench/summarizer.py b/nfvbench/summarizer.py
index b27ed6f..7c69f52 100644
--- a/nfvbench/summarizer.py
+++ b/nfvbench/summarizer.py
@@ -22,11 +22,64 @@ import bitmath
import pytz
from tabulate import tabulate
-from specs import ChainType
-
+def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
+ """Transform a plain chain stats into an annotated one.
+
+ Example:
+ {
+ 0: {'packets': [2000054, 1999996, 1999996, 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ 1: {...},
+ 'total': {...}
+ }
+ should become:
+ {
+ 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ 1: {...},
+ 'total': {...}
+ }
+
+ In the case of shared net, some columns in packets array can have ''.
+ Some columns cab also be None which means the data is not available.
+ """
+ for stats in list(chain_stats.values()):
+ packets = stats['packets']
+ count = len(packets)
+ if count > 1:
+ # keep the first counter
+ annotated_packets = [packets[0]]
+ # modify all remaining counters
+ prev_count = packets[0]
+ for index in range(1, count):
+ cur_count = packets[index]
+ if cur_count == '':
+ # an empty string indicates an unknown counter for a shared interface
+ # do not annotate those
+ annotated_value = ''
+ elif cur_count is None:
+ # Not available
+ annotated_value = 'n/a'
+ else:
+ drop = cur_count - prev_count
+ if drop:
+ dr = (drop * 100.0) / prev_count if prev_count else 0
+ annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
+ else:
+ # no drop
+ # if last column we display the value
+ annotated_value = cur_count if index == count - 1 else nodrop_marker
+ prev_count = cur_count
+ annotated_packets.append(annotated_value)
+
+ stats['packets'] = annotated_packets
class Formatter(object):
- """Collection of string formatter methods"""
+ """Collection of string formatter methods."""
@staticmethod
def fixed(data):
@@ -44,7 +97,7 @@ class Formatter(object):
def standard(data):
if isinstance(data, int):
return Formatter.int(data)
- elif isinstance(data, float):
+ if isinstance(data, float):
return Formatter.float(4)(data)
return Formatter.fixed(data)
@@ -77,16 +130,16 @@ class Formatter(object):
def percentage(data):
if data is None:
return ''
- elif math.isnan(data):
+ if math.isnan(data):
return '-'
return Formatter.suffix('%')(Formatter.float(4)(data))
class Table(object):
- """ASCII readable table class"""
+ """ASCII readable table class."""
def __init__(self, header):
- header_row, self.formatters = zip(*header)
+ header_row, self.formatters = list(zip(*header))
self.data = [header_row]
self.columns = len(header_row)
@@ -108,7 +161,7 @@ class Table(object):
class Summarizer(object):
- """Generic summarizer class"""
+ """Generic summarizer class."""
indent_per_level = 2
@@ -142,7 +195,7 @@ class Summarizer(object):
def _put_dict(self, data):
with self._create_block(False):
- for key, value in data.iteritems():
+ for key, value in list(data.items()):
if isinstance(value, dict):
self._put(key + ':')
self._put_dict(value)
@@ -164,58 +217,60 @@ class Summarizer(object):
class NFVBenchSummarizer(Summarizer):
- """Summarize nfvbench json result"""
-
- ndr_pdr_header = [
- ('-', Formatter.fixed),
- ('L2 Frame Size', Formatter.standard),
- ('Rate (fwd+rev)', Formatter.bits),
- ('Rate (fwd+rev)', Formatter.suffix(' pps')),
- ('Avg Drop Rate', Formatter.suffix('%')),
- ('Avg Latency (usec)', Formatter.standard),
- ('Min Latency (usec)', Formatter.standard),
- ('Max Latency (usec)', Formatter.standard)
- ]
-
- single_run_header = [
- ('L2 Frame Size', Formatter.standard),
- ('Drop Rate', Formatter.suffix('%')),
- ('Avg Latency (usec)', Formatter.standard),
- ('Min Latency (usec)', Formatter.standard),
- ('Max Latency (usec)', Formatter.standard)
- ]
-
- config_header = [
- ('Direction', Formatter.standard),
- ('Requested TX Rate (bps)', Formatter.bits),
- ('Actual TX Rate (bps)', Formatter.bits),
- ('RX Rate (bps)', Formatter.bits),
- ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
- ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
- ('RX Rate (pps)', Formatter.suffix(' pps'))
- ]
-
- chain_analysis_header = [
- ('Interface', Formatter.standard),
- ('Device', Formatter.standard),
- ('Packets (fwd)', Formatter.standard),
- ('Drops (fwd)', Formatter.standard),
- ('Drop% (fwd)', Formatter.percentage),
- ('Packets (rev)', Formatter.standard),
- ('Drops (rev)', Formatter.standard),
- ('Drop% (rev)', Formatter.percentage)
- ]
+ """Summarize nfvbench json result."""
direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
direction_names = ['Forward', 'Reverse', 'Total']
def __init__(self, result, sender):
+ """Create a summarizer instance."""
Summarizer.__init__(self)
self.result = result
self.config = self.result['config']
self.record_header = None
self.record_data = None
self.sender = sender
+
+ self.ndr_pdr_header = [
+ ('-', Formatter.fixed),
+ ('L2 Frame Size', Formatter.standard),
+ ('Rate (fwd+rev)', Formatter.bits),
+ ('Rate (fwd+rev)', Formatter.suffix(' pps')),
+ ('Avg Drop Rate', Formatter.suffix('%')),
+ ('Avg Latency (usec)', Formatter.standard),
+ ('Min Latency (usec)', Formatter.standard),
+ ('Max Latency (usec)', Formatter.standard)
+ ]
+
+ self.single_run_header = [
+ ('L2 Frame Size', Formatter.standard),
+ ('Drop Rate', Formatter.suffix('%')),
+ ('Avg Latency (usec)', Formatter.standard),
+ ('Min Latency (usec)', Formatter.standard),
+ ('Max Latency (usec)', Formatter.standard)
+ ]
+
+ self.config_header = [
+ ('Direction', Formatter.standard),
+ ('Requested TX Rate (bps)', Formatter.bits),
+ ('Actual TX Rate (bps)', Formatter.bits),
+ ('RX Rate (bps)', Formatter.bits),
+ ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
+ ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
+ ('RX Rate (pps)', Formatter.suffix(' pps'))
+ ]
+
+ # add percentiles headers if hdrh enabled
+ if not self.config.disable_hdrh:
+ for percentile in self.config.lat_percentiles:
+ # 'append' expects a single parameter => double parentheses
+ self.ndr_pdr_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
+ self.single_run_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
+
+ if self.config.periodic_gratuitous_arp:
+ self.direction_keys.insert(2, 'garp-direction-total')
+ self.direction_names.insert(2, 'Gratuitous ARP')
+
# if sender is available initialize record
if self.sender:
self.__record_init()
@@ -247,17 +302,14 @@ class NFVBenchSummarizer(Summarizer):
self._put('Components:')
with self._create_block():
- self._put('TOR:')
- with self._create_block(False):
- self._put('Type:', self.config['tor']['type'])
self._put('Traffic Generator:')
with self._create_block(False):
- self._put('Profile:', self.config['generator_config']['name'])
- self._put('Tool:', self.config['generator_config']['tool'])
+ self._put('Profile:', self.config['tg-name'])
+ self._put('Tool:', self.config['tg-tool'])
if network_benchmark['versions']:
self._put('Versions:')
with self._create_block():
- for component, version in network_benchmark['versions'].iteritems():
+ for component, version in list(network_benchmark['versions'].items()):
self._put(component + ':', version)
if self.config['ndr_run'] or self.config['pdr_run']:
@@ -268,15 +320,12 @@ class NFVBenchSummarizer(Summarizer):
if self.config['pdr_run']:
self._put('PDR:', self.config['measurement']['PDR'])
self._put('Service chain:')
- for result in network_benchmark['service_chain'].iteritems():
+ for result in list(network_benchmark['service_chain'].items()):
with self._create_block():
self.__chain_summarize(*result)
def __chain_summarize(self, chain_name, chain_benchmark):
self._put(chain_name + ':')
- if chain_name == ChainType.PVVP:
- self._put('Mode:', chain_benchmark.get('mode'))
- chain_name += "-" + chain_benchmark.get('mode')
self.__record_header_put('service_chain', chain_name)
with self._create_block():
self._put('Traffic:')
@@ -288,13 +337,13 @@ class NFVBenchSummarizer(Summarizer):
self._put('Bidirectional:', traffic_benchmark['bidirectional'])
self._put('Flow count:', traffic_benchmark['flow_count'])
self._put('Service chains count:', traffic_benchmark['service_chain_count'])
- self._put('Compute nodes:', traffic_benchmark['compute_nodes'].keys())
+ self._put('Compute nodes:', list(traffic_benchmark['compute_nodes'].keys()))
self.__record_header_put('profile', traffic_benchmark['profile'])
self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
- self.__record_header_put('compute_nodes', traffic_benchmark['compute_nodes'].keys())
+ self.__record_header_put('compute_nodes', list(traffic_benchmark['compute_nodes'].keys()))
with self._create_block(False):
self._put()
if not self.config['no_traffic']:
@@ -308,7 +357,7 @@ class NFVBenchSummarizer(Summarizer):
except KeyError:
pass
- for entry in traffic_benchmark['result'].iteritems():
+ for entry in list(traffic_benchmark['result'].items()):
if 'warning' in entry:
continue
self.__chain_analysis_summarize(*entry)
@@ -317,17 +366,6 @@ class NFVBenchSummarizer(Summarizer):
def __chain_analysis_summarize(self, frame_size, analysis):
self._put()
self._put('L2 frame size:', frame_size)
- if 'actual_l2frame_size' in analysis:
- self._put('Actual l2 frame size:', analysis['actual_l2frame_size'])
- elif self.config['ndr_run'] and 'actual_l2frame_size' in analysis['ndr']:
- self._put('Actual l2 frame size:', analysis['ndr']['actual_l2frame_size'])
- elif self.config['pdr_run'] and 'actual_l2frame_size' in analysis['pdr']:
- self._put('Actual l2 frame size:', analysis['pdr']['actual_l2frame_size'])
- if 'analysis_duration_sec' in analysis:
- self._put('Chain analysis duration:',
- Formatter.float(3)(analysis['analysis_duration_sec']), 'seconds')
- self.__record_data_put(frame_size, {'chain_analysis_duration': Formatter.float(3)(
- analysis['analysis_duration_sec'])})
if self.config['ndr_run']:
self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
'seconds')
@@ -350,12 +388,13 @@ class NFVBenchSummarizer(Summarizer):
self._put(analysis['run_config']['warning'])
self._put()
- if 'packet_analysis' in analysis:
- self._put('Chain Analysis:')
- self._put()
- with self._create_block(False):
- self._put_table(self.__get_chain_analysis_table(analysis['packet_analysis']))
+ if 'packet_path_stats' in analysis:
+ for dir in ['Forward', 'Reverse']:
+ self._put(dir + ' Chain Packet Counters and Latency:')
self._put()
+ with self._create_block(False):
+ self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
+ self._put()
def __get_summary_table(self, traffic_result):
if self.config['single_run']:
@@ -364,10 +403,11 @@ class NFVBenchSummarizer(Summarizer):
summary_table = Table(self.ndr_pdr_header)
if self.config['ndr_run']:
- for frame_size, analysis in traffic_result.iteritems():
+ for frame_size, analysis in list(traffic_result.items()):
if frame_size == 'warning':
continue
- summary_table.add_row([
+
+ row_data = [
'NDR',
frame_size,
analysis['ndr']['rate_bps'],
@@ -376,21 +416,34 @@ class NFVBenchSummarizer(Summarizer):
analysis['ndr']['stats']['overall']['avg_delay_usec'],
analysis['ndr']['stats']['overall']['min_delay_usec'],
analysis['ndr']['stats']['overall']['max_delay_usec']
- ])
- self.__record_data_put(frame_size, {'ndr': {
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['ndr']['stats']['overall']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ ndr_data = {
'type': 'NDR',
'rate_bps': analysis['ndr']['rate_bps'],
'rate_pps': analysis['ndr']['rate_pps'],
+ 'offered_tx_rate_bps': analysis['ndr']['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['ndr']['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['ndr']['stats']['theoretical_tx_rate_bps'],
'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
- }})
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['ndr']['stats']['overall']['lat_percentile'], ndr_data, True)
+ self.__record_data_put(frame_size, {'ndr': ndr_data})
if self.config['pdr_run']:
- for frame_size, analysis in traffic_result.iteritems():
+ for frame_size, analysis in list(traffic_result.items()):
if frame_size == 'warning':
continue
- summary_table.add_row([
+
+ row_data = [
'PDR',
frame_size,
analysis['pdr']['rate_bps'],
@@ -399,34 +452,73 @@ class NFVBenchSummarizer(Summarizer):
analysis['pdr']['stats']['overall']['avg_delay_usec'],
analysis['pdr']['stats']['overall']['min_delay_usec'],
analysis['pdr']['stats']['overall']['max_delay_usec']
- ])
- self.__record_data_put(frame_size, {'pdr': {
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['pdr']['stats']['overall']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ pdr_data = {
'type': 'PDR',
'rate_bps': analysis['pdr']['rate_bps'],
'rate_pps': analysis['pdr']['rate_pps'],
+ 'offered_tx_rate_bps': analysis['pdr']['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['pdr']['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['pdr']['stats']['theoretical_tx_rate_bps'],
'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
- }})
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['pdr']['stats']['overall']['lat_percentile'], pdr_data, True)
+ self.__record_data_put(frame_size, {'pdr': pdr_data})
if self.config['single_run']:
- for frame_size, analysis in traffic_result.iteritems():
- summary_table.add_row([
+ for frame_size, analysis in list(traffic_result.items()):
+ row_data = [
frame_size,
analysis['stats']['overall']['drop_rate_percent'],
analysis['stats']['overall']['rx']['avg_delay_usec'],
analysis['stats']['overall']['rx']['min_delay_usec'],
analysis['stats']['overall']['rx']['max_delay_usec']
- ])
- self.__record_data_put(frame_size, {'single_run': {
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['stats']['overall']['rx']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ single_run_data = {
'type': 'single_run',
+ 'offered_tx_rate_bps': analysis['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['stats']['theoretical_tx_rate_bps'],
'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
- }})
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['stats']['overall']['rx']['lat_percentile'], single_run_data, True)
+ self.__record_data_put(frame_size, {'single_run': single_run_data})
return summary_table
+ def extract_hdrh_percentiles(self, lat_percentile, data, add_key=False):
+ if add_key:
+ data['lat_percentile'] = {}
+ for percentile in self.config.lat_percentiles:
+ if add_key:
+ try:
+ data['lat_percentile_' + str(percentile)] = lat_percentile[percentile]
+ except TypeError:
+ data['lat_percentile_' + str(percentile)] = "n/a"
+ else:
+ try:
+ data.append(lat_percentile[percentile])
+ except TypeError:
+ data.append("n/a")
+
def __get_config_table(self, run_config, frame_size):
config_table = Table(self.config_header)
for key, name in zip(self.direction_keys, self.direction_names):
@@ -452,23 +544,64 @@ class NFVBenchSummarizer(Summarizer):
})
return config_table
- def __get_chain_analysis_table(self, packet_analysis):
- chain_analysis_table = Table(self.chain_analysis_header)
- forward_analysis = packet_analysis['direction-forward']
- reverse_analysis = packet_analysis['direction-reverse']
- reverse_analysis.reverse()
- for fwd, rev in zip(forward_analysis, reverse_analysis):
- chain_analysis_table.add_row([
- fwd['interface'],
- fwd['device'],
- fwd['packet_count'],
- fwd.get('packet_drop_count', None),
- fwd.get('packet_drop_percentage', None),
- rev['packet_count'],
- rev.get('packet_drop_count', None),
- rev.get('packet_drop_percentage', None),
- ])
- return chain_analysis_table
+ def _get_chain_table(self, chain_stats):
+ """Retrieve the table for a direction.
+
+ chain_stats: {
+ 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
+ 'chains': {
+ '0': {'packets': [2000054, '-0.023%', 1999996, 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ '1': {...},
+ 'total': {...}
+ }
+ }
+ """
+ chains = chain_stats['chains']
+ _annotate_chain_stats(chains)
+ header = [('Chain', Formatter.standard)] + \
+ [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
+ # add latency columns if available Avg, Min, Max and percentiles
+ lat_keys = []
+ lat_map = {'lat_avg_usec': 'Avg lat.',
+ 'lat_min_usec': 'Min lat.',
+ 'lat_max_usec': 'Max lat.'}
+ if 'lat_avg_usec' in chains['0']:
+ lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
+
+ if not self.config.disable_hdrh:
+ lat_keys.append('lat_percentile')
+ for percentile in self.config.lat_percentiles:
+ lat_map['lat_' + str(percentile) + '_percentile'] = \
+ str(percentile) + ' %ile lat.'
+
+ for lat_value in lat_map.values():
+ # 'append' expects a single parameter => double parentheses
+ header.append((lat_value, Formatter.standard))
+
+ table = Table(header)
+ for chain in sorted(list(chains.keys()), key=str):
+ row = [chain] + chains[chain]['packets']
+ for lat_key in lat_keys:
+
+ if lat_key != 'lat_percentile':
+ if chains[chain].get(lat_key, None):
+ row.append(Formatter.standard(chains[chain][lat_key]))
+ else:
+ row.append('n/a')
+ else:
+ if not self.config.disable_hdrh:
+ if chains[chain].get(lat_key, None):
+ for percentile in chains[chain][lat_key]:
+ row.append(Formatter.standard(
+ chains[chain][lat_key][percentile]))
+ else:
+ for _ in self.config.lat_percentiles:
+ row.append('n/a')
+ table.add_row(row)
+ return table
def __record_header_put(self, key, value):
if self.sender:
@@ -500,9 +633,9 @@ class NFVBenchSummarizer(Summarizer):
run_specific_data['pdr'] = data['pdr']
run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
del data['pdr']
- for key in run_specific_data:
+ for data_value in run_specific_data.values():
data_to_send = data.copy()
- data_to_send.update(run_specific_data[key])
+ data_to_send.update(data_value)
self.sender.record_send(data_to_send)
self.__record_init()
diff --git a/nfvbench/tor_client.py b/nfvbench/tor_client.py
deleted file mode 100644
index c8214c8..0000000
--- a/nfvbench/tor_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-
-class TORClientException(Exception):
- pass
-
-
-class BasicTORClient(object):
-
- def __init__(self, config):
- pass
-
- def get_int_counters(self):
- return {}
-
- def get_vni_counters(self, vni):
- return {}
-
- def get_vni_interface(self, vni, counters):
- return None
-
- def get_vni_for_vlan(self, vlans):
- return []
-
- def attach_tg_interfaces(self, network_vlans, switch_ports):
- pass
-
- def clear_nve(self):
- pass
-
- def clear_interface(self, vni):
- pass
-
- def close(self):
- pass
-
- def get_version(self):
- return {}
diff --git a/nfvbench/traffic_client.py b/nfvbench/traffic_client.py
index 5305da9..47af265 100755
--- a/nfvbench/traffic_client.py
+++ b/nfvbench/traffic_client.py
@@ -12,67 +12,88 @@
# License for the specific language governing permissions and limitations
# under the License.
-from datetime import datetime
-import re
+"""Interface to the traffic generator clients including NDR/PDR binary search."""
import socket
import struct
import time
+import sys
from attrdict import AttrDict
import bitmath
+from hdrh.histogram import HdrHistogram
from netaddr import IPNetwork
# pylint: disable=import-error
-from trex_stl_lib.api import STLError
+from trex.stl.api import Ether
+from trex.stl.api import STLError
+from trex.stl.api import UDP
+# pylint: disable=wrong-import-order
+from scapy.contrib.mpls import MPLS # flake8: noqa
+# pylint: enable=wrong-import-order
# pylint: enable=import-error
-from log import LOG
-from network import Interface
-from specs import ChainType
-from stats_collector import IntervalCollector
-from stats_collector import IterationCollector
-import traffic_gen.traffic_utils as utils
-from utils import cast_integer
-
+from .log import LOG
+from .packet_stats import InterfaceStats
+from .packet_stats import PacketPathStats
+from .stats_collector import IntervalCollector
+from .stats_collector import IterationCollector
+from .traffic_gen import traffic_utils as utils
+from .utils import cast_integer, find_max_size, find_tuples_equal_to_lcm_value, get_divisors, lcm
class TrafficClientException(Exception):
"""Generic traffic client exception."""
- pass
-
-
class TrafficRunner(object):
"""Serialize various steps required to run traffic."""
- def __init__(self, client, duration_sec, interval_sec=0):
+ def __init__(self, client, duration_sec, interval_sec=0, service_mode=False):
+ """Create a traffic runner."""
self.client = client
self.start_time = None
self.duration_sec = duration_sec
self.interval_sec = interval_sec
+ self.service_mode = service_mode
def run(self):
+ """Clear stats and instruct the traffic generator to start generating traffic."""
+ if self.is_running():
+ return None
LOG.info('Running traffic generator')
self.client.gen.clear_stats()
+ # Debug use only: the service_mode flag may have been set in
+ # the configuration, in order to enable the 'service' mode
+ # in the trex generator, before starting the traffic (run).
+ # From this point, a T-rex console (launched in readonly mode) would
+ # then be able to capture the transmitted and/or received traffic.
+ self.client.gen.set_service_mode(enabled=self.service_mode)
+ LOG.info('Service mode is %sabled', 'en' if self.service_mode else 'dis')
self.client.gen.start_traffic()
self.start_time = time.time()
return self.poll_stats()
def stop(self):
+ """Stop the current run and instruct the traffic generator to stop traffic."""
if self.is_running():
self.start_time = None
self.client.gen.stop_traffic()
def is_running(self):
+ """Check if a run is still pending."""
return self.start_time is not None
def time_elapsed(self):
+ """Return time elapsed since start of run."""
if self.is_running():
return time.time() - self.start_time
return self.duration_sec
def poll_stats(self):
+ """Poll latest stats from the traffic generator at fixed interval - sleeps if necessary.
+
+ return: latest stats or None if traffic is stopped
+ """
if not self.is_running():
return None
- if self.client.skip_sleep:
+ if self.client.skip_sleep():
self.stop()
return self.client.get_stats()
time_elapsed = self.time_elapsed()
@@ -96,7 +117,10 @@ class IpBlock(object):
"""Manage a block of IP addresses."""
def __init__(self, base_ip, step_ip, count_ip):
+ """Create an IP block."""
self.base_ip_int = Device.ip_to_int(base_ip)
+ if step_ip == 'random':
+ step_ip = '0.0.0.1'
self.step = Device.ip_to_int(step_ip)
self.max_available = count_ip
self.next_free = 0
@@ -104,11 +128,18 @@ class IpBlock(object):
def get_ip(self, index=0):
"""Return the IP address at given index."""
if index < 0 or index >= self.max_available:
- raise IndexError('Index out of bounds')
+ raise IndexError('Index out of bounds: %d (max=%d)' % (index, self.max_available))
return Device.int_to_ip(self.base_ip_int + index * self.step)
+ def get_ip_from_chain_first_ip(self, first_ip, index=0):
+ """Return the IP address at given index starting from chain first ip."""
+ if index < 0 or index >= self.max_available:
+ raise IndexError('Index out of bounds: %d (max=%d)' % (index, self.max_available))
+ return Device.int_to_ip(first_ip + index * self.step)
+
def reserve_ip_range(self, count):
- """Reserve a range of count consecutive IP addresses spaced by step."""
+ """Reserve a range of count consecutive IP addresses spaced by step.
+ """
if self.next_free + count > self.max_available:
raise IndexError('No more IP addresses next free=%d max_available=%d requested=%d' %
(self.next_free,
@@ -120,316 +151,652 @@ class IpBlock(object):
return (first_ip, last_ip)
def reset_reservation(self):
+ """Reset all reservations and restart with a completely unused IP block."""
self.next_free = 0
+class UdpPorts(object):
+
+ def __init__(self, src_min, src_max, dst_min, dst_max, udp_src_size, udp_dst_size, step):
+
+ self.src_min = int(src_min)
+ self.src_max = int(src_max)
+ self.dst_min = int(dst_min)
+ self.dst_max = int(dst_max)
+ self.udp_src_size = udp_src_size
+ self.udp_dst_size = udp_dst_size
+ self.step = step
+
+ def get_src_max(self, index=0):
+ """Return the UDP src port at given index."""
+ return int(self.src_min) + index * int(self.step)
+
+ def get_dst_max(self, index=0):
+ """Return the UDP dst port at given index."""
+ return int(self.dst_min) + index * int(self.step)
+
+
class Device(object):
- """Represent a port device and all information associated to it."""
-
- def __init__(self, port, pci, switch_port=None, vtep_vlan=None, ip=None, tg_gateway_ip=None,
- gateway_ip=None, ip_addrs_step=None, tg_gateway_ip_addrs_step=None,
- gateway_ip_addrs_step=None, udp_src_port=None, udp_dst_port=None,
- dst_mac=None, chain_count=1, flow_count=1, vlan_tagging=False):
- self.chain_count = chain_count
- self.flow_count = flow_count
- self.dst = None
+ """Represent a port device and all information associated to it.
+
+ In the curent version we only support 2 port devices for the traffic generator
+ identified as port 0 or port 1.
+ """
+
+ def __init__(self, port, generator_config):
+ """Create a new device for a given port."""
+ self.generator_config = generator_config
+ self.chain_count = generator_config.service_chain_count
+ if generator_config.bidirectional:
+ self.flow_count = generator_config.flow_count / 2
+ else:
+ self.flow_count = generator_config.flow_count
+
self.port = port
- self.switch_port = switch_port
- self.vtep_vlan = vtep_vlan
- self.vlan_tag = None
- self.vlan_tagging = vlan_tagging
- self.pci = pci
+ self.switch_port = generator_config.interfaces[port].get('switch_port', None)
+ self.vtep_vlan = None
+ self.vtep_src_mac = None
+ self.vxlan = False
+ self.mpls = False
+ self.inner_labels = None
+ self.outer_labels = None
+ self.pci = generator_config.interfaces[port].pci
self.mac = None
- self.dst_mac = dst_mac
- self.vm_mac_list = None
- subnet = IPNetwork(ip)
- self.ip = subnet.ip.format()
- self.ip_prefixlen = subnet.prefixlen
- self.ip_addrs_step = ip_addrs_step
- self.tg_gateway_ip_addrs_step = tg_gateway_ip_addrs_step
- self.gateway_ip_addrs_step = gateway_ip_addrs_step
- self.gateway_ip = gateway_ip
- self.tg_gateway_ip = tg_gateway_ip
- self.ip_block = IpBlock(self.ip, ip_addrs_step, flow_count)
- self.gw_ip_block = IpBlock(gateway_ip,
- gateway_ip_addrs_step,
- chain_count)
- self.tg_gw_ip_block = IpBlock(tg_gateway_ip,
- tg_gateway_ip_addrs_step,
- chain_count)
- self.udp_src_port = udp_src_port
- self.udp_dst_port = udp_dst_port
+ self.dest_macs = None
+ self.vtep_dst_mac = None
+ self.vtep_dst_ip = None
+ if generator_config.vteps is None:
+ self.vtep_src_ip = None
+ else:
+ self.vtep_src_ip = generator_config.vteps[port]
+ self.vnis = None
+ self.vlans = None
+ self.ip_addrs = generator_config.ip_addrs[port]
+ self.ip_src_static = generator_config.ip_src_static
+ self.ip_addrs_step = generator_config.ip_addrs_step
+ if self.ip_addrs_step == 'random':
+ # Set step to 1 to calculate the IP range size (see check_range_size below)
+ step = '0.0.0.1'
+ else:
+ step = self.ip_addrs_step
+ self.ip_size = self.check_range_size(IPNetwork(self.ip_addrs).size, Device.ip_to_int(step))
+ self.ip = str(IPNetwork(self.ip_addrs).network)
+ ip_addrs_left = generator_config.ip_addrs[0]
+ ip_addrs_right = generator_config.ip_addrs[1]
+ self.ip_addrs_size = {
+ 'left': self.check_range_size(IPNetwork(ip_addrs_left).size, Device.ip_to_int(step)),
+ 'right': self.check_range_size(IPNetwork(ip_addrs_right).size, Device.ip_to_int(step))}
+ udp_src_port = generator_config.gen_config.udp_src_port
+ if udp_src_port is None:
+ udp_src_port = 53
+ udp_dst_port = generator_config.gen_config.udp_dst_port
+ if udp_dst_port is None:
+ udp_dst_port = 53
+ src_max, src_min = self.define_udp_range(udp_src_port, 'udp_src_port')
+ dst_max, dst_min = self.define_udp_range(udp_dst_port, 'udp_dst_port')
+ if generator_config.gen_config.udp_port_step == 'random':
+ # Set step to 1 to calculate the UDP range size
+ udp_step = 1
+ else:
+ udp_step = int(generator_config.gen_config.udp_port_step)
+ udp_src_size = self.check_range_size(int(src_max) - int(src_min) + 1, udp_step)
+ udp_dst_size = self.check_range_size(int(dst_max) - int(dst_min) + 1, udp_step)
+ lcm_port = lcm(udp_src_size, udp_dst_size)
+ if self.ip_src_static is True:
+ lcm_ip = lcm(1, min(self.ip_addrs_size['left'], self.ip_addrs_size['right']))
+ else:
+ lcm_ip = lcm(self.ip_addrs_size['left'], self.ip_addrs_size['right'])
+ flow_max = lcm(lcm_port, lcm_ip)
+ if self.flow_count > flow_max:
+ raise TrafficClientException('Trying to set unachievable traffic (%d > %d)' %
+ (self.flow_count, flow_max))
+
+ self.udp_ports = UdpPorts(src_min, src_max, dst_min, dst_max, udp_src_size, udp_dst_size,
+ generator_config.gen_config.udp_port_step)
+
+ self.ip_block = IpBlock(self.ip, step, self.ip_size)
+
+ self.gw_ip_block = IpBlock(generator_config.gateway_ips[port],
+ generator_config.gateway_ip_addrs_step,
+ self.chain_count)
+ self.tg_gateway_ip_addrs = generator_config.tg_gateway_ip_addrs[port]
+ self.tg_gw_ip_block = IpBlock(self.tg_gateway_ip_addrs,
+ generator_config.tg_gateway_ip_addrs_step,
+ self.chain_count)
+
+ def limit_ip_udp_ranges(self, peer_ip_size, cur_chain_flow_count):
+ # init to min value in case of no matching values found with lcm calculation
+ new_src_ip_size = 1
+ new_peer_ip_size = 1
+ new_src_udp_size = 1
+ new_dst_udp_size = 1
+
+ if self.ip_src_static is True:
+ src_ip_size = 1
+ else:
+ src_ip_size = self.ip_size
+ ip_src_divisors = list(get_divisors(src_ip_size))
+ ip_dst_divisors = list(get_divisors(peer_ip_size))
+ udp_src_divisors = list(get_divisors(self.udp_ports.udp_src_size))
+ udp_dst_divisors = list(get_divisors(self.udp_ports.udp_dst_size))
+ fc = int(cur_chain_flow_count)
+ tuples_ip = list(find_tuples_equal_to_lcm_value(ip_src_divisors, ip_dst_divisors, fc))
+ tuples_udp = list(find_tuples_equal_to_lcm_value(udp_src_divisors, udp_dst_divisors, fc))
+
+ if tuples_ip:
+ new_src_ip_size = tuples_ip[-1][0]
+ new_peer_ip_size = tuples_ip[-1][1]
+
+ if tuples_udp:
+ new_src_udp_size = tuples_udp[-1][0]
+ new_dst_udp_size = tuples_udp[-1][1]
+
+ tuples_src = []
+ tuples_dst = []
+ if not tuples_ip and not tuples_udp:
+ # in case of not divisors in common matching LCM value (i.e. requested flow count)
+ # try to find an accurate UDP range to fit requested flow count
+ udp_src_int = range(self.udp_ports.src_min, self.udp_ports.src_max)
+ udp_dst_int = range(self.udp_ports.dst_min, self.udp_ports.dst_max)
+ tuples_src = list(find_tuples_equal_to_lcm_value(ip_src_divisors, udp_src_int, fc))
+ tuples_dst = list(find_tuples_equal_to_lcm_value(ip_dst_divisors, udp_dst_int, fc))
+
+ if not tuples_src and not tuples_dst:
+ # iterate IP and UDP ranges to find a tuple that match flow count values
+ src_ip_range = range(1,src_ip_size)
+ dst_ip_range = range(1, peer_ip_size)
+ tuples_src = list(find_tuples_equal_to_lcm_value(src_ip_range, udp_src_int, fc))
+ tuples_dst = list(find_tuples_equal_to_lcm_value(dst_ip_range, udp_dst_int, fc))
+
+ if tuples_src or tuples_dst:
+ if tuples_src:
+ new_src_ip_size = tuples_src[-1][0]
+ new_src_udp_size = tuples_src[-1][1]
+ if tuples_dst:
+ new_peer_ip_size = tuples_dst[-1][0]
+ new_dst_udp_size = tuples_dst[-1][1]
+ else:
+ if not tuples_ip:
+ if src_ip_size != 1:
+ if src_ip_size > fc:
+ new_src_ip_size = fc
+ else:
+ new_src_ip_size = find_max_size(src_ip_size, tuples_udp, fc)
+ if peer_ip_size != 1:
+ if peer_ip_size > fc:
+ new_peer_ip_size = fc
+ else:
+ new_peer_ip_size = find_max_size(peer_ip_size, tuples_udp, fc)
+
+ if not tuples_udp:
+ if self.udp_ports.udp_src_size != 1:
+ if self.udp_ports.udp_src_size > fc:
+ new_src_udp_size = fc
+ else:
+ new_src_udp_size = find_max_size(self.udp_ports.udp_src_size,
+ tuples_ip, fc)
+ if self.udp_ports.udp_dst_size != 1:
+ if self.udp_ports.udp_dst_size > fc:
+ new_dst_udp_size = fc
+ else:
+ new_dst_udp_size = find_max_size(self.udp_ports.udp_dst_size,
+ tuples_ip, fc)
+ max_possible_flows = lcm(lcm(new_src_ip_size, new_peer_ip_size),
+ lcm(new_src_udp_size, new_dst_udp_size))
+
+ LOG.debug("IP dst size: %d", new_peer_ip_size)
+ LOG.debug("LCM IP: %d", lcm(new_src_ip_size, new_peer_ip_size))
+ LOG.debug("LCM UDP: %d", lcm(new_src_udp_size, new_dst_udp_size))
+ LOG.debug("Global LCM: %d", max_possible_flows)
+ LOG.debug("IP src size: %d, IP dst size: %d, UDP src size: %d, UDP dst size: %d",
+ new_src_ip_size, new_peer_ip_size, self.udp_ports.udp_src_size,
+ self.udp_ports.udp_dst_size)
+ if not max_possible_flows == cur_chain_flow_count:
+ if (self.ip_addrs_step != '0.0.0.1' or self.udp_ports.step != '1') and not (
+ self.ip_addrs_step == 'random' and self.udp_ports.step == 'random'):
+ LOG.warning("Current values of ip_addrs_step and/or udp_port_step properties "
+ "do not allow to control an accurate flow count. "
+ "Values will be overridden as follows:")
+ if self.ip_addrs_step != '0.0.0.1':
+ LOG.info("ip_addrs_step='0.0.0.1' (previous value: ip_addrs_step='%s')",
+ self.ip_addrs_step)
+ self.ip_addrs_step = '0.0.0.1'
+
+ if self.udp_ports.step != '1':
+ LOG.info("udp_port_step='1' (previous value: udp_port_step='%s')",
+ self.udp_ports.step)
+ self.udp_ports.step = '1'
+ # override config for not logging random step warning message in trex_gen.py
+ self.generator_config.gen_config.udp_port_step = self.udp_ports.step
+ else:
+ LOG.error("Current values of ip_addrs_step and udp_port_step properties "
+ "do not allow to control an accurate flow count.")
+ else:
+ src_ip_size = new_src_ip_size
+ peer_ip_size = new_peer_ip_size
+ self.udp_ports.udp_src_size = new_src_udp_size
+ self.udp_ports.udp_dst_size = new_dst_udp_size
+ return src_ip_size, peer_ip_size
+
+ @staticmethod
+ def define_udp_range(udp_port, property_name):
+ if isinstance(udp_port, int):
+ min = udp_port
+ max = min
+ elif isinstance(udp_port, tuple):
+ min = udp_port[0]
+ max = udp_port[1]
+ else:
+ raise TrafficClientException('Invalid %s property value (53 or [\'53\',\'1024\'])'
+ % property_name)
+ return max, min
+
+
+ @staticmethod
+ def check_range_size(range_size, step):
+ """Check and set the available IPs or UDP ports, considering the step."""
+ try:
+ if range_size % step == 0:
+ value = range_size // step
+ else:
+ value = range_size // step + 1
+ return value
+ except ZeroDivisionError:
+ raise ZeroDivisionError("step can't be zero !") from ZeroDivisionError
def set_mac(self, mac):
+ """Set the local MAC for this port device."""
if mac is None:
raise TrafficClientException('Trying to set traffic generator MAC address as None')
self.mac = mac
- LOG.info("Port %d: src MAC %s", self.port, self.mac)
- def set_destination(self, dst):
- self.dst = dst
+ def get_peer_device(self):
+ """Get the peer device (device 0 -> device 1, or device 1 -> device 0)."""
+ return self.generator_config.devices[1 - self.port]
- def set_vm_mac_list(self, vm_mac_list):
- self.vm_mac_list = map(str, vm_mac_list)
+ def set_vtep_dst_mac(self, dest_macs):
+ """Set the list of dest MACs indexed by the chain id.
- def set_vlan_tag(self, vlan_tag):
- if self.vlan_tagging and vlan_tag is None:
- raise TrafficClientException('Trying to set VLAN tag as None')
- self.vlan_tag = vlan_tag
- LOG.info("Port %d: VLAN %d", self.port, self.vlan_tag)
+ This is only called in 2 cases:
+ - VM macs discovered using openstack API
+ - dest MACs provisioned in config file
+ """
+ self.vtep_dst_mac = list(map(str, dest_macs))
+
+ def set_dest_macs(self, dest_macs):
+ """Set the list of dest MACs indexed by the chain id.
+
+ This is only called in 2 cases:
+ - VM macs discovered using openstack API
+ - dest MACs provisioned in config file
+ """
+ self.dest_macs = list(map(str, dest_macs))
+
+ def get_dest_macs(self):
+ """Get the list of dest macs for this device.
+
+ If set_dest_macs was never called, assumes l2-loopback and return
+ a list of peer mac (as many as chains but normally only 1 chain)
+ """
+ if self.dest_macs:
+ return self.dest_macs
+ # assume this is l2-loopback
+ return [self.get_peer_device().mac] * self.chain_count
+
+ def set_vlans(self, vlans):
+ """Set the list of vlans to use indexed by the chain id."""
+ self.vlans = vlans
+ LOG.info("Port %d: VLANs %s", self.port, self.vlans)
+
+ def set_vtep_vlan(self, vlan):
+ """Set the vtep vlan to use indexed by specific port."""
+ self.vtep_vlan = vlan
+ self.vxlan = True
+ self.vlan_tagging = None
+ LOG.info("Port %d: VTEP VLANs %s", self.port, self.vtep_vlan)
+
+ def set_vxlan_endpoints(self, src_ip, dst_ip):
+ self.vtep_dst_ip = dst_ip
+ self.vtep_src_ip = src_ip
+ LOG.info("Port %d: src_vtep %s, dst_vtep %s", self.port,
+ self.vtep_src_ip, self.vtep_dst_ip)
+
+ def set_mpls_peers(self, src_ip, dst_ip):
+ self.mpls = True
+ self.vtep_dst_ip = dst_ip
+ self.vtep_src_ip = src_ip
+ LOG.info("Port %d: src_mpls_vtep %s, mpls_peer_ip %s", self.port,
+ self.vtep_src_ip, self.vtep_dst_ip)
+
+ def set_vxlans(self, vnis):
+ self.vnis = vnis
+ LOG.info("Port %d: VNIs %s", self.port, self.vnis)
+
+ def set_mpls_inner_labels(self, labels):
+ self.inner_labels = labels
+ LOG.info("Port %d: MPLS Inner Labels %s", self.port, self.inner_labels)
+
+ def set_mpls_outer_labels(self, labels):
+ self.outer_labels = labels
+ LOG.info("Port %d: MPLS Outer Labels %s", self.port, self.outer_labels)
+
+ def set_gw_ip(self, gateway_ip):
+ self.gw_ip_block = IpBlock(gateway_ip,
+ self.generator_config.gateway_ip_addrs_step,
+ self.chain_count)
def get_gw_ip(self, chain_index):
"""Retrieve the IP address assigned for the gateway of a given chain."""
return self.gw_ip_block.get_ip(chain_index)
- def get_stream_configs(self, service_chain):
+ def get_stream_configs(self):
+ """Get the stream config for a given chain on this device.
+
+ Called by the traffic generator driver to program the traffic generator properly
+ before generating traffic
+ """
configs = []
# exact flow count for each chain is calculated as follows:
# - all chains except the first will have the same flow count
# calculated as (total_flows + chain_count - 1) / chain_count
# - the first chain will have the remainder
# example 11 flows and 3 chains => 3, 4, 4
- flows_per_chain = (self.flow_count + self.chain_count - 1) / self.chain_count
- cur_chain_flow_count = self.flow_count - flows_per_chain * (self.chain_count - 1)
+ flows_per_chain = int((self.flow_count + self.chain_count - 1) / self.chain_count)
+ cur_chain_flow_count = int(self.flow_count - flows_per_chain * (self.chain_count - 1))
+ peer = self.get_peer_device()
self.ip_block.reset_reservation()
- self.dst.ip_block.reset_reservation()
-
- for chain_idx in xrange(self.chain_count):
- src_ip_first, src_ip_last = self.ip_block.reserve_ip_range(cur_chain_flow_count)
- dst_ip_first, dst_ip_last = self.dst.ip_block.reserve_ip_range(cur_chain_flow_count)
-
- dst_mac = self.dst_mac[chain_idx] if self.dst_mac is not None else self.dst.mac
- if not re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", dst_mac.lower()):
- raise TrafficClientException("Invalid MAC address '{mac}' specified in "
- "mac_addrs_left/right".format(mac=dst_mac))
+ peer.ip_block.reset_reservation()
+ dest_macs = self.get_dest_macs()
+
+ # limit ranges of UDP ports and IP to avoid overflow of the number of flows
+ peer_size = peer.ip_size // self.chain_count
+
+ for chain_idx in range(self.chain_count):
+ src_ip_size, peer_ip_size = self.limit_ip_udp_ranges(peer_size, cur_chain_flow_count)
+
+ src_ip_first, src_ip_last = self.ip_block.reserve_ip_range \
+ (src_ip_size)
+ dst_ip_first, dst_ip_last = peer.ip_block.reserve_ip_range \
+ (peer_ip_size)
+
+ if self.ip_addrs_step != 'random':
+ src_ip_last = self.ip_block.get_ip_from_chain_first_ip(
+ Device.ip_to_int(src_ip_first), src_ip_size - 1)
+ dst_ip_last = peer.ip_block.get_ip_from_chain_first_ip(
+ Device.ip_to_int(dst_ip_first), peer_ip_size - 1)
+ if self.udp_ports.step != 'random':
+ self.udp_ports.src_max = self.udp_ports.get_src_max(self.udp_ports.udp_src_size - 1)
+ self.udp_ports.dst_max = self.udp_ports.get_dst_max(self.udp_ports.udp_dst_size - 1)
+ if self.ip_src_static:
+ src_ip_last = src_ip_first
+
+ LOG.info("Port %d, chain %d: IP src range [%s,%s]", self.port, chain_idx,
+ src_ip_first, src_ip_last)
+ LOG.info("Port %d, chain %d: IP dst range [%s,%s]", self.port, chain_idx,
+ dst_ip_first, dst_ip_last)
+ LOG.info("Port %d, chain %d: UDP src range [%s,%s]", self.port, chain_idx,
+ self.udp_ports.src_min, self.udp_ports.src_max)
+ LOG.info("Port %d, chain %d: UDP dst range [%s,%s]", self.port, chain_idx,
+ self.udp_ports.dst_min, self.udp_ports.dst_max)
configs.append({
'count': cur_chain_flow_count,
'mac_src': self.mac,
- 'mac_dst': dst_mac if service_chain == ChainType.EXT else self.vm_mac_list[
- chain_idx],
+ 'mac_dst': dest_macs[chain_idx],
'ip_src_addr': src_ip_first,
'ip_src_addr_max': src_ip_last,
- 'ip_src_count': cur_chain_flow_count,
+ 'ip_src_count': src_ip_size,
'ip_dst_addr': dst_ip_first,
'ip_dst_addr_max': dst_ip_last,
- 'ip_dst_count': cur_chain_flow_count,
+ 'ip_dst_count': peer_ip_size,
'ip_addrs_step': self.ip_addrs_step,
- 'udp_src_port': self.udp_src_port,
- 'udp_dst_port': self.udp_dst_port,
+ 'ip_src_static': self.ip_src_static,
+ 'udp_src_port': self.udp_ports.src_min,
+ 'udp_src_port_max': self.udp_ports.src_max,
+ 'udp_src_count': self.udp_ports.udp_src_size,
+ 'udp_dst_port': self.udp_ports.dst_min,
+ 'udp_dst_port_max': self.udp_ports.dst_max,
+ 'udp_dst_count': self.udp_ports.udp_dst_size,
+ 'udp_port_step': self.udp_ports.step,
'mac_discovery_gw': self.get_gw_ip(chain_idx),
'ip_src_tg_gw': self.tg_gw_ip_block.get_ip(chain_idx),
- 'ip_dst_tg_gw': self.dst.tg_gw_ip_block.get_ip(chain_idx),
- 'vlan_tag': self.vlan_tag if self.vlan_tagging else None
+ 'ip_dst_tg_gw': peer.tg_gw_ip_block.get_ip(chain_idx),
+ 'vlan_tag': self.vlans[chain_idx] if self.vlans else None,
+ 'vxlan': self.vxlan,
+ 'vtep_vlan': self.vtep_vlan if self.vtep_vlan else None,
+ 'vtep_src_mac': self.mac if (self.vxlan or self.mpls) else None,
+ 'vtep_dst_mac': self.vtep_dst_mac if (self.vxlan or self.mpls) else None,
+ 'vtep_dst_ip': self.vtep_dst_ip if self.vxlan is True else None,
+ 'vtep_src_ip': self.vtep_src_ip if self.vxlan is True else None,
+ 'net_vni': self.vnis[chain_idx] if self.vxlan is True else None,
+ 'mpls': self.mpls,
+ 'mpls_outer_label': self.outer_labels[chain_idx] if self.mpls is True else None,
+ 'mpls_inner_label': self.inner_labels[chain_idx] if self.mpls is True else None
+
})
# after first chain, fall back to the flow count for all other chains
cur_chain_flow_count = flows_per_chain
-
return configs
- def ip_range_overlaps(self):
- """Check if this device ip range is overlapping with the dst device ip range."""
- src_base_ip = Device.ip_to_int(self.ip)
- dst_base_ip = Device.ip_to_int(self.dst.ip)
- src_last_ip = src_base_ip + self.flow_count - 1
- dst_last_ip = dst_base_ip + self.flow_count - 1
- return dst_last_ip >= src_base_ip and src_last_ip >= dst_base_ip
-
- @staticmethod
- def mac_to_int(mac):
- return int(mac.translate(None, ":.- "), 16)
-
- @staticmethod
- def int_to_mac(i):
- mac = format(i, 'x').zfill(12)
- blocks = [mac[x:x + 2] for x in xrange(0, len(mac), 2)]
- return ':'.join(blocks)
-
@staticmethod
def ip_to_int(addr):
+ """Convert an IP address from string to numeric."""
return struct.unpack("!I", socket.inet_aton(addr))[0]
@staticmethod
def int_to_ip(nvalue):
- return socket.inet_ntoa(struct.pack("!I", nvalue))
+ """Convert an IP address from numeric to string."""
+ return socket.inet_ntoa(struct.pack("!I", int(nvalue)))
-class RunningTrafficProfile(object):
+class GeneratorConfig(object):
"""Represents traffic configuration for currently running traffic profile."""
DEFAULT_IP_STEP = '0.0.0.1'
DEFAULT_SRC_DST_IP_STEP = '0.0.0.1'
- def __init__(self, config, generator_profile):
- generator_config = self.__match_generator_profile(config.traffic_generator,
- generator_profile)
- self.generator_config = generator_config
+ def __init__(self, config):
+ """Create a generator config."""
+ self.config = config
+ # name of the generator profile (normally trex or dummy)
+ # pick the default one if not specified explicitly from cli options
+ if not config.generator_profile:
+ config.generator_profile = config.traffic_generator.default_profile
+ # pick up the profile dict based on the name
+ gen_config = self.__match_generator_profile(config.traffic_generator,
+ config.generator_profile)
+ self.gen_config = gen_config
+ # copy over fields from the dict
+ self.tool = gen_config.tool
+ self.ip = gen_config.ip
+ # overrides on config.cores and config.mbuf_factor
+ if config.cores:
+ self.cores = config.cores
+ else:
+ self.cores = gen_config.get('cores', 1)
+ # let's report the value actually used in the end
+ config.cores_used = self.cores
+ self.mbuf_factor = config.mbuf_factor
+ self.mbuf_64 = config.mbuf_64
+ self.hdrh = not config.disable_hdrh
+ if config.intf_speed:
+ # interface speed is overriden from the command line
+ self.intf_speed = config.intf_speed
+ elif gen_config.intf_speed:
+ # interface speed is overriden from the generator config
+ self.intf_speed = gen_config.intf_speed
+ else:
+ self.intf_speed = "auto"
+ if self.intf_speed in ("auto", "0"):
+ # interface speed is discovered/provided by the traffic generator
+ self.intf_speed = 0
+ else:
+ self.intf_speed = bitmath.parse_string(self.intf_speed.replace('ps', '')).bits
+ self.name = gen_config.name
+ self.zmq_pub_port = gen_config.get('zmq_pub_port', 4500)
+ self.zmq_rpc_port = gen_config.get('zmq_rpc_port', 4501)
+ self.limit_memory = gen_config.get('limit_memory', 1024)
+ self.software_mode = gen_config.get('software_mode', False)
+ self.interfaces = gen_config.interfaces
+ if self.interfaces[0].port != 0 or self.interfaces[1].port != 1:
+ raise TrafficClientException('Invalid port order/id in generator_profile.interfaces')
self.service_chain = config.service_chain
self.service_chain_count = config.service_chain_count
self.flow_count = config.flow_count
- self.host_name = generator_config.host_name
- self.name = generator_config.name
- self.tool = generator_config.tool
- self.cores = generator_config.get('cores', 1)
- self.ip_addrs_step = generator_config.ip_addrs_step or self.DEFAULT_SRC_DST_IP_STEP
+ self.host_name = gen_config.host_name
+ self.bidirectional = config.traffic.bidirectional
+ self.tg_gateway_ip_addrs = gen_config.tg_gateway_ip_addrs
+ self.ip_addrs = gen_config.ip_addrs
+ self.ip_addrs_step = gen_config.ip_addrs_step or self.DEFAULT_SRC_DST_IP_STEP
self.tg_gateway_ip_addrs_step = \
- generator_config.tg_gateway_ip_addrs_step or self.DEFAULT_IP_STEP
- self.gateway_ip_addrs_step = generator_config.gateway_ip_addrs_step or self.DEFAULT_IP_STEP
- self.gateway_ips = generator_config.gateway_ip_addrs
- self.ip = generator_config.ip
- self.intf_speed = bitmath.parse_string(generator_config.intf_speed.replace('ps', '')).bits
+ gen_config.tg_gateway_ip_addrs_step or self.DEFAULT_IP_STEP
+ self.gateway_ip_addrs_step = gen_config.gateway_ip_addrs_step or self.DEFAULT_IP_STEP
+ self.gateway_ips = gen_config.gateway_ip_addrs
+ self.ip_src_static = gen_config.ip_src_static
+ self.vteps = gen_config.get('vteps')
+ self.devices = [Device(port, self) for port in [0, 1]]
+ # This should normally always be [0, 1]
+ self.ports = [device.port for device in self.devices]
+
+ # check that pci is not empty
+ if not gen_config.interfaces[0].get('pci', None) or \
+ not gen_config.interfaces[1].get('pci', None):
+ raise TrafficClientException("configuration interfaces pci fields cannot be empty")
+
+ self.pcis = [tgif['pci'] for tgif in gen_config.interfaces]
self.vlan_tagging = config.vlan_tagging
- self.no_arp = config.no_arp
- self.src_device = None
- self.dst_device = None
- self.vm_mac_list = None
- self.mac_addrs_left = generator_config.mac_addrs_left
- self.mac_addrs_right = generator_config.mac_addrs_right
- self.__prep_interfaces(generator_config)
-
- def to_json(self):
- return dict(self.generator_config)
-
- def set_vm_mac_list(self, vm_mac_list):
- self.src_device.set_vm_mac_list(vm_mac_list[0])
- self.dst_device.set_vm_mac_list(vm_mac_list[1])
-
- @staticmethod
- def __match_generator_profile(traffic_generator, generator_profile):
- generator_config = AttrDict(traffic_generator)
- generator_config.pop('default_profile')
- generator_config.pop('generator_profile')
- matching_profile = [profile for profile in traffic_generator.generator_profile if
- profile.name == generator_profile]
- if len(matching_profile) != 1:
- raise Exception('Traffic generator profile not found: ' + generator_profile)
-
- generator_config.update(matching_profile[0])
-
- return generator_config
-
- def __prep_interfaces(self, generator_config):
- src_config = {
- 'chain_count': self.service_chain_count,
- 'flow_count': self.flow_count / 2,
- 'ip': generator_config.ip_addrs[0],
- 'ip_addrs_step': self.ip_addrs_step,
- 'gateway_ip': self.gateway_ips[0],
- 'gateway_ip_addrs_step': self.gateway_ip_addrs_step,
- 'tg_gateway_ip': generator_config.tg_gateway_ip_addrs[0],
- 'tg_gateway_ip_addrs_step': self.tg_gateway_ip_addrs_step,
- 'udp_src_port': generator_config.udp_src_port,
- 'udp_dst_port': generator_config.udp_dst_port,
- 'vlan_tagging': self.vlan_tagging,
- 'dst_mac': generator_config.mac_addrs_left
- }
- dst_config = {
- 'chain_count': self.service_chain_count,
- 'flow_count': self.flow_count / 2,
- 'ip': generator_config.ip_addrs[1],
- 'ip_addrs_step': self.ip_addrs_step,
- 'gateway_ip': self.gateway_ips[1],
- 'gateway_ip_addrs_step': self.gateway_ip_addrs_step,
- 'tg_gateway_ip': generator_config.tg_gateway_ip_addrs[1],
- 'tg_gateway_ip_addrs_step': self.tg_gateway_ip_addrs_step,
- 'udp_src_port': generator_config.udp_src_port,
- 'udp_dst_port': generator_config.udp_dst_port,
- 'vlan_tagging': self.vlan_tagging,
- 'dst_mac': generator_config.mac_addrs_right
- }
- self.src_device = Device(**dict(src_config, **generator_config.interfaces[0]))
- self.dst_device = Device(**dict(dst_config, **generator_config.interfaces[1]))
- self.src_device.set_destination(self.dst_device)
- self.dst_device.set_destination(self.src_device)
+ # needed for result/summarizer
+ config['tg-name'] = gen_config.name
+ config['tg-tool'] = self.tool
- if self.service_chain == ChainType.EXT and not self.no_arp \
- and self.src_device.ip_range_overlaps():
- raise Exception('Overlapping IP address ranges src=%s dst=%d flows=%d' %
- self.src_device.ip,
- self.dst_device.ip,
- self.flow_count)
-
- @property
- def devices(self):
- return [self.src_device, self.dst_device]
+ def to_json(self):
+ """Get json form to display the content into the overall result dict."""
+ return dict(self.gen_config)
- @property
- def vlans(self):
- return [self.src_device.vtep_vlan, self.dst_device.vtep_vlan]
+ def set_dest_macs(self, port_index, dest_macs):
+ """Set the list of dest MACs indexed by the chain id on given port.
- @property
- def ports(self):
- return [self.src_device.port, self.dst_device.port]
+ port_index: the port for which dest macs must be set
+ dest_macs: a list of dest MACs indexed by chain id
+ """
+ if len(dest_macs) < self.config.service_chain_count:
+ raise TrafficClientException('Dest MAC list %s must have %d entries' %
+ (dest_macs, self.config.service_chain_count))
+ # only pass the first scc dest MACs
+ self.devices[port_index].set_dest_macs(dest_macs[:self.config.service_chain_count])
+ LOG.info('Port %d: dst MAC %s', port_index, [str(mac) for mac in dest_macs])
+
+ def set_vtep_dest_macs(self, port_index, dest_macs):
+ """Set the list of dest MACs indexed by the chain id on given port.
+
+ port_index: the port for which dest macs must be set
+ dest_macs: a list of dest MACs indexed by chain id
+ """
+ if len(dest_macs) != self.config.service_chain_count:
+ raise TrafficClientException('Dest MAC list %s must have %d entries' %
+ (dest_macs, self.config.service_chain_count))
+ self.devices[port_index].set_vtep_dst_mac(dest_macs)
+ LOG.info('Port %d: vtep dst MAC %s', port_index, {str(mac) for mac in dest_macs})
- @property
- def switch_ports(self):
- return [self.src_device.switch_port, self.dst_device.switch_port]
+ def get_dest_macs(self):
+ """Return the list of dest macs indexed by port."""
+ return [dev.get_dest_macs() for dev in self.devices]
- @property
- def pcis(self):
- return [self.src_device.pci, self.dst_device.pci]
+ def set_vlans(self, port_index, vlans):
+ """Set the list of vlans to use indexed by the chain id on given port.
+ port_index: the port for which VLANs must be set
+ vlans: a list of vlan lists indexed by chain id
+ """
+ if len(vlans) != self.config.service_chain_count:
+ raise TrafficClientException('VLAN list %s must have %d entries' %
+ (vlans, self.config.service_chain_count))
+ self.devices[port_index].set_vlans(vlans)
-class TrafficGeneratorFactory(object):
- """Factory class to generate a traffic generator."""
+ def set_vxlans(self, port_index, vxlans):
+ """Set the list of vxlans (VNIs) to use indexed by the chain id on given port.
- def __init__(self, config):
- self.config = config
+ port_index: the port for which VXLANs must be set
+ VXLANs: a list of VNIs lists indexed by chain id
+ """
+ if len(vxlans) != self.config.service_chain_count:
+ raise TrafficClientException('VXLAN list %s must have %d entries' %
+ (vxlans, self.config.service_chain_count))
+ self.devices[port_index].set_vxlans(vxlans)
- def get_tool(self):
- return self.config.generator_config.tool
+ def set_mpls_inner_labels(self, port_index, labels):
+ """Set the list of MPLS Labels to use indexed by the chain id on given port.
- def get_generator_client(self):
- tool = self.get_tool().lower()
- if tool == 'trex':
- from traffic_gen import trex
- return trex.TRex(self.config)
- elif tool == 'dummy':
- from traffic_gen import dummy
- return dummy.DummyTG(self.config)
- return None
+ port_index: the port for which Labels must be set
+ Labels: a list of Labels lists indexed by chain id
+ """
+ if len(labels) != self.config.service_chain_count:
+ raise TrafficClientException('Inner MPLS list %s must have %d entries' %
+ (labels, self.config.service_chain_count))
+ self.devices[port_index].set_mpls_inner_labels(labels)
- def list_generator_profile(self):
- return [profile.name for profile in self.config.traffic_generator.generator_profile]
+ def set_mpls_outer_labels(self, port_index, labels):
+ """Set the list of MPLS Labels to use indexed by the chain id on given port.
- def get_generator_config(self, generator_profile):
- return RunningTrafficProfile(self.config, generator_profile)
+ port_index: the port for which Labels must be set
+ Labels: a list of Labels lists indexed by chain id
+ """
+ if len(labels) != self.config.service_chain_count:
+ raise TrafficClientException('Outer MPLS list %s must have %d entries' %
+ (labels, self.config.service_chain_count))
+ self.devices[port_index].set_mpls_outer_labels(labels)
+
+ def set_vtep_vlan(self, port_index, vlan):
+ """Set the vtep vlan to use indexed by the chain id on given port.
+ port_index: the port for which VLAN must be set
+ """
+ self.devices[port_index].set_vtep_vlan(vlan)
- def get_matching_profile(self, traffic_profile_name):
- matching_profile = [profile for profile in self.config.traffic_profile if
- profile.name == traffic_profile_name]
+ def set_vxlan_endpoints(self, port_index, src_ip, dst_ip):
+ self.devices[port_index].set_vxlan_endpoints(src_ip, dst_ip)
- if len(matching_profile) > 1:
- raise Exception('Multiple traffic profiles with the same name found.')
- elif not matching_profile:
- raise Exception('No traffic profile found.')
+ def set_mpls_peers(self, port_index, src_ip, dst_ip):
+ self.devices[port_index].set_mpls_peers(src_ip, dst_ip)
- return matching_profile[0]
+ @staticmethod
+ def __match_generator_profile(traffic_generator, generator_profile):
+ gen_config = AttrDict(traffic_generator)
+ gen_config.pop('default_profile')
+ gen_config.pop('generator_profile')
+ matching_profile = [profile for profile in traffic_generator.generator_profile if
+ profile.name == generator_profile]
+ if len(matching_profile) != 1:
+ raise Exception('Traffic generator profile not found: ' + generator_profile)
- def get_frame_sizes(self, traffic_profile):
- matching_profile = self.get_matching_profile(traffic_profile)
- return matching_profile.l2frame_size
+ gen_config.update(matching_profile[0])
+ return gen_config
class TrafficClient(object):
- """Traffic generator client."""
+ """Traffic generator client with NDR/PDR binary seearch."""
PORTS = [0, 1]
- def __init__(self, config, notifier=None, skip_sleep=False):
- generator_factory = TrafficGeneratorFactory(config)
- self.gen = generator_factory.get_generator_client()
- self.tool = generator_factory.get_tool()
+ def __init__(self, config, notifier=None):
+ """Create a new TrafficClient instance.
+
+ config: nfvbench config
+ notifier: notifier (optional)
+
+ A new instance is created everytime the nfvbench config may have changed.
+ """
self.config = config
+ self.generator_config = GeneratorConfig(config)
+ self.tool = self.generator_config.tool
+ self.gen = self._get_generator()
self.notifier = notifier
self.interval_collector = None
self.iteration_collector = None
- self.runner = TrafficRunner(self, self.config.duration_sec, self.config.interval_sec)
- if self.gen is None:
- raise TrafficClientException('%s is not a supported traffic generator' % self.tool)
-
+ self.runner = TrafficRunner(self, self.config.duration_sec, self.config.interval_sec,
+ self.config.service_mode)
+ self.config.frame_sizes = self._get_frame_sizes()
self.run_config = {
'l2frame_size': None,
'duration_sec': self.config.duration_sec,
@@ -439,23 +806,73 @@ class TrafficClient(object):
self.current_total_rate = {'rate_percent': '10'}
if self.config.single_run:
self.current_total_rate = utils.parse_rate_str(self.config.rate)
- # UT with dummy TG can bypass all sleeps
- self.skip_sleep = skip_sleep
+ self.ifstats = None
+ # Speed is either discovered when connecting to TG or set from config
+ # This variable is 0 if not yet discovered from TG or must be the speed of
+ # each interface in bits per second
+ self.intf_speed = self.generator_config.intf_speed
+
+ def _get_generator(self):
+ tool = self.tool.lower()
+ if tool == 'trex':
+ from .traffic_gen import trex_gen
+ return trex_gen.TRex(self)
+ if tool == 'dummy':
+ from .traffic_gen import dummy
+ return dummy.DummyTG(self)
+ raise TrafficClientException('Unsupported generator tool name:' + self.tool)
- def set_macs(self):
- for mac, device in zip(self.gen.get_macs(), self.config.generator_config.devices):
- device.set_mac(mac)
+ def skip_sleep(self):
+ """Skip all sleeps when doing unit testing with dummy TG.
+
+ Must be overriden using mock.patch
+ """
+ return False
+
+ def _get_frame_sizes(self):
+ traffic_profile_name = self.config.traffic.profile
+ matching_profiles = [profile for profile in self.config.traffic_profile if
+ profile.name == traffic_profile_name]
+ if len(matching_profiles) > 1:
+ raise TrafficClientException('Multiple traffic profiles with name: ' +
+ traffic_profile_name)
+ if not matching_profiles:
+ raise TrafficClientException('Cannot find traffic profile: ' + traffic_profile_name)
+ return matching_profiles[0].l2frame_size
def start_traffic_generator(self):
- self.gen.init()
+ """Start the traffic generator process (traffic not started yet)."""
self.gen.connect()
+ # pick up the interface speed if it is not set from config
+ intf_speeds = self.gen.get_port_speed_gbps()
+ # convert Gbps unit into bps
+ tg_if_speed = bitmath.parse_string(str(intf_speeds[0]) + 'Gb').bits
+ if self.intf_speed:
+ # interface speed is overriden from config
+ if self.intf_speed != tg_if_speed:
+ # Warn the user if the speed in the config is different
+ LOG.warning(
+ 'Interface speed provided (%g Gbps) is different from actual speed (%d Gbps)',
+ self.intf_speed / 1000000000.0, intf_speeds[0])
+ else:
+ # interface speed not provisioned by config
+ self.intf_speed = tg_if_speed
+ # also update the speed in the tg config
+ self.generator_config.intf_speed = tg_if_speed
+ # let's report detected and actually used interface speed
+ self.config.intf_speed_detected = tg_if_speed
+ self.config.intf_speed_used = self.intf_speed
+
+ # Save the traffic generator local MAC
+ for mac, device in zip(self.gen.get_macs(), self.generator_config.devices):
+ device.set_mac(mac)
def setup(self):
- self.gen.set_mode()
- self.gen.config_interface()
+ """Set up the traffic client."""
self.gen.clear_stats()
def get_version(self):
+ """Get the traffic generator version."""
return self.gen.get_version()
def ensure_end_to_end(self):
@@ -478,45 +895,96 @@ class TrafficClient(object):
all 10 VMs 10 VMs are in operational state.
"""
LOG.info('Starting traffic generator to ensure end-to-end connectivity')
- rate_pps = {'rate_pps': str(self.config.service_chain_count * 1)}
- self.gen.create_traffic('64', [rate_pps, rate_pps], bidirectional=True, latency=False)
-
+ # send 2pps on each chain and each direction
+ rate_pps = {'rate_pps': str(self.config.service_chain_count * 2)}
+ self.gen.create_traffic('64', [rate_pps, rate_pps], bidirectional=True, latency=False,
+ e2e=True)
# ensures enough traffic is coming back
- retry_count = (self.config.check_traffic_time_sec +
- self.config.generic_poll_sec - 1) / self.config.generic_poll_sec
- mac_addresses = set()
- ln = 0
- for it in xrange(retry_count):
+ retry_count = int((self.config.check_traffic_time_sec +
+ self.config.generic_poll_sec - 1) / self.config.generic_poll_sec)
+
+ # we expect to see packets coming from 2 unique MAC per chain
+ # because there can be flooding in the case of shared net
+ # we must verify that packets from the right VMs are received
+ # and not just count unique src MAC
+ # create a dict of (port, chain) tuples indexed by dest mac
+ mac_map = {}
+ for port, dest_macs in enumerate(self.generator_config.get_dest_macs()):
+ for chain, mac in enumerate(dest_macs):
+ mac_map[mac] = (port, chain)
+ unique_src_mac_count = len(mac_map)
+ if self.config.vxlan and self.config.traffic_generator.vtep_vlan:
+ get_mac_id = lambda packet: packet['binary'][60:66]
+ elif self.config.vxlan:
+ get_mac_id = lambda packet: packet['binary'][56:62]
+ elif self.config.mpls:
+ get_mac_id = lambda packet: packet['binary'][24:30]
+ # mpls_transport_label = lambda packet: packet['binary'][14:18]
+ else:
+ get_mac_id = lambda packet: packet['binary'][6:12]
+ for it in range(retry_count):
self.gen.clear_stats()
self.gen.start_traffic()
self.gen.start_capture()
- LOG.info('Waiting for packets to be received back... (%d / %d)', it + 1, retry_count)
- if not self.skip_sleep:
+ LOG.info('Captured unique src mac %d/%d, capturing return packets (retry %d/%d)...',
+ unique_src_mac_count - len(mac_map), unique_src_mac_count,
+ it + 1, retry_count)
+ if not self.skip_sleep():
time.sleep(self.config.generic_poll_sec)
self.gen.stop_traffic()
self.gen.fetch_capture_packets()
self.gen.stop_capture()
-
for packet in self.gen.packet_list:
- mac_addresses.add(packet['binary'][6:12])
- if ln != len(mac_addresses):
- ln = len(mac_addresses)
- LOG.info('Flows passing traffic %d / %d', ln,
- self.config.service_chain_count * 2)
- if len(mac_addresses) == self.config.service_chain_count * 2:
- LOG.info('End-to-end connectivity ensured')
+ mac_id = get_mac_id(packet).decode('latin-1')
+ src_mac = ':'.join(["%02x" % ord(x) for x in mac_id])
+ if self.config.mpls:
+ if src_mac in mac_map and self.is_mpls(packet):
+ port, chain = mac_map[src_mac]
+ LOG.info('Received mpls packet from mac: %s (chain=%d, port=%d)',
+ src_mac, chain, port)
+ mac_map.pop(src_mac, None)
+ else:
+ if src_mac in mac_map and self.is_udp(packet):
+ port, chain = mac_map[src_mac]
+ LOG.info('Received udp packet from mac: %s (chain=%d, port=%d)',
+ src_mac, chain, port)
+ mac_map.pop(src_mac, None)
+
+ if not mac_map:
+ LOG.info('End-to-end connectivity established')
return
+ if self.config.l3_router and not self.config.no_arp:
+ # In case of L3 traffic mode, routers are not able to route traffic
+ # until VM interfaces are up and ARP requests are done
+ LOG.info('Waiting for loopback service completely started...')
+ LOG.info('Sending ARP request to assure end-to-end connectivity established')
+ self.ensure_arp_successful()
+ raise TrafficClientException('End-to-end connectivity cannot be ensured')
- if not self.skip_sleep:
- time.sleep(self.config.generic_poll_sec)
+ def is_udp(self, packet):
+ pkt = Ether(packet['binary'])
+ return UDP in pkt
- raise TrafficClientException('End-to-end connectivity cannot be ensured')
+ def is_mpls(self, packet):
+ pkt = Ether(packet['binary'])
+ return MPLS in pkt
def ensure_arp_successful(self):
- if not self.gen.resolve_arp():
+ """Resolve all IP using ARP and throw an exception in case of failure."""
+ dest_macs = self.gen.resolve_arp()
+ if dest_macs:
+ # all dest macs are discovered, saved them into the generator config
+ if self.config.vxlan or self.config.mpls:
+ self.generator_config.set_vtep_dest_macs(0, dest_macs[0])
+ self.generator_config.set_vtep_dest_macs(1, dest_macs[1])
+ else:
+ self.generator_config.set_dest_macs(0, dest_macs[0])
+ self.generator_config.set_dest_macs(1, dest_macs[1])
+ else:
raise TrafficClientException('ARP cannot be resolved')
def set_traffic(self, frame_size, bidirectional):
+ """Reconfigure the traffic generator for a new frame size."""
self.run_config['bidirectional'] = bidirectional
self.run_config['l2frame_size'] = frame_size
self.run_config['rates'] = [self.get_per_direction_rate()]
@@ -532,9 +1000,15 @@ class TrafficClient(object):
self.run_config['rates'][idx] = {'rate_pps': self.__convert_rates(rate)['rate_pps']}
self.gen.clear_streamblock()
- self.gen.create_traffic(frame_size, self.run_config['rates'], bidirectional, latency=True)
- def modify_load(self, load):
+ if self.config.no_latency_streams:
+ LOG.info("Latency streams are disabled")
+ # in service mode, we must disable flow stats (e2e=True)
+ self.gen.create_traffic(frame_size, self.run_config['rates'], bidirectional,
+ latency=not self.config.no_latency_streams,
+ e2e=self.runner.service_mode)
+
+ def _modify_load(self, load):
self.current_total_rate = {'rate_percent': str(load)}
rate_per_direction = self.get_per_direction_rate()
@@ -545,6 +1019,7 @@ class TrafficClient(object):
self.run_config['rates'][1] = rate_per_direction
def get_ndr_and_pdr(self):
+ """Start the NDR/PDR iteration and return the results."""
dst = 'Bidirectional' if self.run_config['bidirectional'] else 'Unidirectional'
targets = {}
if self.config.ndr_run:
@@ -587,31 +1062,38 @@ class TrafficClient(object):
return float(dropped_pkts) / total_pkts * 100
def get_stats(self):
- stats = self.gen.get_stats()
- retDict = {'total_tx_rate': stats['total_tx_rate']}
- for port in self.PORTS:
- retDict[port] = {'tx': {}, 'rx': {}}
+ """Collect final stats for previous run."""
+ stats = self.gen.get_stats(self.ifstats)
+ retDict = {'total_tx_rate': stats['total_tx_rate'],
+ 'offered_tx_rate_bps': stats['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_bps': stats['theoretical_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': stats['theoretical_tx_rate_pps']}
+
+ if self.config.periodic_gratuitous_arp:
+ retDict['garp_total_tx_rate'] = stats['garp_total_tx_rate']
tx_keys = ['total_pkts', 'total_pkt_bytes', 'pkt_rate', 'pkt_bit_rate']
rx_keys = tx_keys + ['dropped_pkts']
for port in self.PORTS:
+ port_stats = {'tx': {}, 'rx': {}}
for key in tx_keys:
- retDict[port]['tx'][key] = int(stats[port]['tx'][key])
+ port_stats['tx'][key] = int(stats[port]['tx'][key])
for key in rx_keys:
try:
- retDict[port]['rx'][key] = int(stats[port]['rx'][key])
+ port_stats['rx'][key] = int(stats[port]['rx'][key])
except ValueError:
- retDict[port]['rx'][key] = 0
- retDict[port]['rx']['avg_delay_usec'] = cast_integer(
+ port_stats['rx'][key] = 0
+ port_stats['rx']['avg_delay_usec'] = cast_integer(
stats[port]['rx']['avg_delay_usec'])
- retDict[port]['rx']['min_delay_usec'] = cast_integer(
+ port_stats['rx']['min_delay_usec'] = cast_integer(
stats[port]['rx']['min_delay_usec'])
- retDict[port]['rx']['max_delay_usec'] = cast_integer(
+ port_stats['rx']['max_delay_usec'] = cast_integer(
stats[port]['rx']['max_delay_usec'])
- retDict[port]['drop_rate_percent'] = self.__get_dropped_rate(retDict[port])
+ port_stats['drop_rate_percent'] = self.__get_dropped_rate(port_stats)
+ retDict[str(port)] = port_stats
- ports = sorted(retDict.keys())
+ ports = sorted(list(retDict.keys()), key=str)
if self.run_config['bidirectional']:
retDict['overall'] = {'tx': {}, 'rx': {}}
for key in tx_keys:
@@ -637,12 +1119,28 @@ class TrafficClient(object):
else:
retDict['overall'] = retDict[ports[0]]
retDict['overall']['drop_rate_percent'] = self.__get_dropped_rate(retDict['overall'])
+
+ if 'overall_hdrh' in stats:
+ retDict['overall']['hdrh'] = stats.get('overall_hdrh', None)
+ decoded_histogram = HdrHistogram.decode(retDict['overall']['hdrh'])
+ retDict['overall']['rx']['lat_percentile'] = {}
+ # override min max and avg from hdrh (only if histogram is valid)
+ if decoded_histogram.get_total_count() != 0:
+ retDict['overall']['rx']['min_delay_usec'] = decoded_histogram.get_min_value()
+ retDict['overall']['rx']['max_delay_usec'] = decoded_histogram.get_max_value()
+ retDict['overall']['rx']['avg_delay_usec'] = decoded_histogram.get_mean_value()
+ for percentile in self.config.lat_percentiles:
+ retDict['overall']['rx']['lat_percentile'][percentile] = \
+ decoded_histogram.get_value_at_percentile(percentile)
+ else:
+ for percentile in self.config.lat_percentiles:
+ retDict['overall']['rx']['lat_percentile'][percentile] = 'n/a'
return retDict
def __convert_rates(self, rate):
return utils.convert_rates(self.run_config['l2frame_size'],
rate,
- self.config.generator_config.intf_speed)
+ self.intf_speed)
def __ndr_pdr_found(self, tag, load):
rates = self.__convert_rates({'rate_percent': load})
@@ -652,6 +1150,7 @@ class TrafficClient(object):
def __format_output_stats(self, stats):
for key in self.PORTS + ['overall']:
+ key = str(key)
interface = stats[key]
stats[key] = {
'tx_pkts': interface['tx']['total_pkts'],
@@ -663,10 +1162,26 @@ class TrafficClient(object):
'min_delay_usec': interface['rx']['min_delay_usec'],
}
+ if key == 'overall':
+ if 'hdrh' in interface:
+ stats[key]['hdrh'] = interface.get('hdrh', None)
+ decoded_histogram = HdrHistogram.decode(stats[key]['hdrh'])
+ stats[key]['lat_percentile'] = {}
+ # override min max and avg from hdrh (only if histogram is valid)
+ if decoded_histogram.get_total_count() != 0:
+ stats[key]['min_delay_usec'] = decoded_histogram.get_min_value()
+ stats[key]['max_delay_usec'] = decoded_histogram.get_max_value()
+ stats[key]['avg_delay_usec'] = decoded_histogram.get_mean_value()
+ for percentile in self.config.lat_percentiles:
+ stats[key]['lat_percentile'][percentile] = decoded_histogram.\
+ get_value_at_percentile(percentile)
+ else:
+ for percentile in self.config.lat_percentiles:
+ stats[key]['lat_percentile'][percentile] = 'n/a'
return stats
def __targets_found(self, rate, targets, results):
- for tag, target in targets.iteritems():
+ for tag, target in list(targets.items()):
LOG.info('Found %s (%s) load: %s', tag, target, rate)
self.__ndr_pdr_found(tag, rate)
results[tag]['timestamp_sec'] = time.time()
@@ -702,7 +1217,7 @@ class TrafficClient(object):
# Split target dicts based on the avg drop rate
left_targets = {}
right_targets = {}
- for tag, target in targets.iteritems():
+ for tag, target in list(targets.items()):
if stats['overall']['drop_rate_percent'] <= target:
# record the best possible rate found for this target
results[tag] = rates
@@ -742,8 +1257,22 @@ class TrafficClient(object):
self.__range_search(middle, right, right_targets, results)
def __run_search_iteration(self, rate):
- # set load
- self.modify_load(rate)
+ """Run one iteration at the given rate level.
+
+ rate: the rate to send on each port in percent (0 to 100)
+ """
+ self._modify_load(rate)
+
+ # There used to be a inconsistency in case of interface speed override.
+ # The emulated 'intf_speed' value is unknown to the T-Rex generator which
+ # refers to the detected line rate for converting relative traffic loads.
+ # Therefore, we need to convert actual rates here, in terms of packets/s.
+
+ for idx, str_rate in enumerate(self.gen.rates):
+ if str_rate.endswith('%'):
+ float_rate = float(str_rate.replace('%', '').strip())
+ pps_rate = self.__convert_rates({'rate_percent': float_rate})['rate_pps']
+ self.gen.rates[idx] = str(pps_rate) + 'pps'
# poll interval stats and collect them
for stats in self.run_traffic():
@@ -751,11 +1280,13 @@ class TrafficClient(object):
time_elapsed_ratio = self.runner.time_elapsed() / self.run_config['duration_sec']
if time_elapsed_ratio >= 1:
self.cancel_traffic()
+ if not self.skip_sleep():
+ time.sleep(self.config.pause_sec)
self.interval_collector.reset()
# get stats from the run
stats = self.runner.client.get_stats()
- current_traffic_config = self.get_traffic_config()
+ current_traffic_config = self._get_traffic_config()
warning = self.compare_tx_rates(current_traffic_config['direction-total']['rate_pps'],
stats['total_tx_rate'])
if warning is not None:
@@ -764,26 +1295,34 @@ class TrafficClient(object):
# save reliable stats from whole iteration
self.iteration_collector.add(stats, current_traffic_config['direction-total']['rate_pps'])
LOG.info('Average drop rate: %f', stats['overall']['drop_rate_percent'])
-
return stats, current_traffic_config['direction-total']
- @staticmethod
- def log_stats(stats):
- report = {
- 'datetime': str(datetime.now()),
- 'tx_packets': stats['overall']['tx']['total_pkts'],
- 'rx_packets': stats['overall']['rx']['total_pkts'],
- 'drop_packets': stats['overall']['rx']['dropped_pkts'],
- 'drop_rate_percent': stats['overall']['drop_rate_percent']
- }
- LOG.info('TX: %(tx_packets)d; '
- 'RX: %(rx_packets)d; '
- 'Dropped: %(drop_packets)d; '
- 'Drop rate: %(drop_rate_percent).4f%%',
- report)
+ def log_stats(self, stats):
+ """Log estimated stats during run."""
+ # Calculate a rolling drop rate based on differential to
+ # the previous reading
+ cur_tx = stats['overall']['tx']['total_pkts']
+ cur_rx = stats['overall']['rx']['total_pkts']
+ delta_tx = cur_tx - self.prev_tx
+ delta_rx = cur_rx - self.prev_rx
+ drops = delta_tx - delta_rx
+ if delta_tx == 0:
+ LOG.info("\x1b[1mConfiguration issue!\x1b[0m (no transmission)")
+ sys.exit(0)
+ drop_rate_pct = 100 * (delta_tx - delta_rx)/delta_tx
+ self.prev_tx = cur_tx
+ self.prev_rx = cur_rx
+ LOG.info('TX: %15s; RX: %15s; (Est.) Dropped: %12s; Drop rate: %8.4f%%',
+ format(cur_tx, ',d'),
+ format(cur_rx, ',d'),
+ format(drops, ',d'),
+ drop_rate_pct)
def run_traffic(self):
+ """Start traffic and return intermediate stats for each interval."""
stats = self.runner.run()
+ self.prev_tx = 0
+ self.prev_rx = 0
while self.runner.is_running:
self.log_stats(stats)
yield stats
@@ -795,16 +1334,10 @@ class TrafficClient(object):
yield stats
def cancel_traffic(self):
+ """Stop traffic."""
self.runner.stop()
- def get_interface(self, port_index, stats):
- port = self.gen.port_handle[port_index]
- tx, rx = 0, 0
- if stats and port in stats:
- tx, rx = int(stats[port]['tx']['total_pkts']), int(stats[port]['rx']['total_pkts'])
- return Interface('traffic-generator', self.tool.lower(), tx, rx)
-
- def get_traffic_config(self):
+ def _get_traffic_config(self):
config = {}
load_total = 0.0
bps_total = 0.0
@@ -835,25 +1368,116 @@ class TrafficClient(object):
# because we want each direction to have the far end RX rates,
# use the far end index (1-idx) to retrieve the RX rates
for idx, key in enumerate(["direction-forward", "direction-reverse"]):
- tx_rate = results["stats"][idx]["tx"]["total_pkts"] / self.config.duration_sec
- rx_rate = results["stats"][1 - idx]["rx"]["total_pkts"] / self.config.duration_sec
+ tx_rate = results["stats"][str(idx)]["tx"]["total_pkts"] / self.config.duration_sec
+ rx_rate = results["stats"][str(1 - idx)]["rx"]["total_pkts"] / self.config.duration_sec
+
+ orig_rate = self.run_config['rates'][idx]
+ if self.config.periodic_gratuitous_arp:
+ orig_rate['rate_pps'] = float(
+ orig_rate['rate_pps']) - self.config.gratuitous_arp_pps
+
r[key] = {
- "orig": self.__convert_rates(self.run_config['rates'][idx]),
+ "orig": self.__convert_rates(orig_rate),
"tx": self.__convert_rates({'rate_pps': tx_rate}),
"rx": self.__convert_rates({'rate_pps': rx_rate})
}
+ if self.config.periodic_gratuitous_arp:
+ r['garp-direction-total'] = {
+ "orig": self.__convert_rates({'rate_pps': self.config.gratuitous_arp_pps * 2}),
+ "tx": self.__convert_rates({'rate_pps': results["stats"]["garp_total_tx_rate"]}),
+ "rx": self.__convert_rates({'rate_pps': 0})
+ }
+
total = {}
for direction in ['orig', 'tx', 'rx']:
total[direction] = {}
for unit in ['rate_percent', 'rate_bps', 'rate_pps']:
- total[direction][unit] = sum([float(x[direction][unit]) for x in r.values()])
+ total[direction][unit] = sum([float(x[direction][unit]) for x in list(r.values())])
r['direction-total'] = total
+
return r
+ def insert_interface_stats(self, pps_list):
+ """Insert interface stats to a list of packet path stats.
+
+ pps_list: a list of packet path stats instances indexed by chain index
+
+ This function will insert the packet path stats for the traffic gen ports 0 and 1
+ with itemized per chain tx/rx counters.
+ There will be as many packet path stats as chains.
+ Each packet path stats will have exactly 2 InterfaceStats for port 0 and port 1
+ self.pps_list:
+ [
+ PacketPathStats(InterfaceStats(chain 0, port 0), InterfaceStats(chain 0, port 1)),
+ PacketPathStats(InterfaceStats(chain 1, port 0), InterfaceStats(chain 1, port 1)),
+ ...
+ ]
+ """
+ def get_if_stats(chain_idx):
+ return [InterfaceStats('p' + str(port), self.tool)
+ for port in range(2)]
+ # keep the list of list of interface stats indexed by the chain id
+ self.ifstats = [get_if_stats(chain_idx)
+ for chain_idx in range(self.config.service_chain_count)]
+ # note that we need to make a copy of the ifs list so that any modification in the
+ # list from pps will not change the list saved in self.ifstats
+ self.pps_list = [PacketPathStats(self.config, list(ifs)) for ifs in self.ifstats]
+ # insert the corresponding pps in the passed list
+ pps_list.extend(self.pps_list)
+
+ def update_interface_stats(self, diff=False):
+ """Update all interface stats.
+
+ diff: if False, simply refresh the interface stats values with latest values
+ if True, diff the interface stats with the latest values
+ Make sure that the interface stats inserted in insert_interface_stats() are updated
+ with proper values.
+ self.ifstats:
+ [
+ [InterfaceStats(chain 0, port 0), InterfaceStats(chain 0, port 1)],
+ [InterfaceStats(chain 1, port 0), InterfaceStats(chain 1, port 1)],
+ ...
+ ]
+ """
+ if diff:
+ stats = self.gen.get_stats(self.ifstats)
+ for chain_idx, ifs in enumerate(self.ifstats):
+ # each ifs has exactly 2 InterfaceStats and 2 Latency instances
+ # corresponding to the
+ # port 0 and port 1 for the given chain_idx
+ # Note that we cannot use self.pps_list[chain_idx].if_stats to pick the
+ # interface stats for the pps because it could have been modified to contain
+ # additional interface stats
+ self.gen.get_stream_stats(stats, ifs, self.pps_list[chain_idx].latencies, chain_idx)
+ # special handling for vxlan
+ # in case of vxlan, flow stats are not available so all rx counters will be
+ # zeros when the total rx port counter is non zero.
+ # in that case,
+ for port in range(2):
+ total_rx = 0
+ for ifs in self.ifstats:
+ total_rx += ifs[port].rx
+ if total_rx == 0:
+ # check if the total port rx from Trex is also zero
+ port_rx = stats[port]['rx']['total_pkts']
+ if port_rx:
+ # the total rx for all chains from port level stats is non zero
+ # which means that the per-chain stats are not available
+ if len(self.ifstats) == 1:
+ # only one chain, simply report the port level rx to the chain rx stats
+ self.ifstats[0][port].rx = port_rx
+ else:
+ for ifs in self.ifstats:
+ # mark this data as unavailable
+ ifs[port].rx = None
+ # pitch in the total rx only in the last chain pps
+ self.ifstats[-1][port].rx_total = port_rx
+
@staticmethod
def compare_tx_rates(required, actual):
+ """Compare the actual TX rate to the required TX rate."""
threshold = 0.9
are_different = False
try:
@@ -872,6 +1496,7 @@ class TrafficClient(object):
return None
def get_per_direction_rate(self):
+ """Get the rate for each direction."""
divisor = 2 if self.run_config['bidirectional'] else 1
if 'rate_percent' in self.current_total_rate:
# don't split rate if it's percentage
@@ -880,6 +1505,7 @@ class TrafficClient(object):
return utils.divide_rate(self.current_total_rate, divisor)
def close(self):
+ """Close this instance."""
try:
self.gen.stop_traffic()
except Exception:
diff --git a/nfvbench/traffic_gen/dummy.py b/nfvbench/traffic_gen/dummy.py
index 788a53f..95147ab 100644
--- a/nfvbench/traffic_gen/dummy.py
+++ b/nfvbench/traffic_gen/dummy.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from traffic_base import AbstractTrafficGenerator
-import traffic_utils as utils
+from nfvbench.log import LOG
+from .traffic_base import AbstractTrafficGenerator
+from . import traffic_utils as utils
class DummyTG(AbstractTrafficGenerator):
@@ -23,33 +24,26 @@ class DummyTG(AbstractTrafficGenerator):
Useful for unit testing without actually generating any traffic.
"""
- def __init__(self, config):
- AbstractTrafficGenerator.__init__(self, config)
+ def __init__(self, traffic_client):
+ AbstractTrafficGenerator.__init__(self, traffic_client)
self.port_handle = []
self.rates = []
self.l2_frame_size = 0
- self.duration_sec = self.config.duration_sec
- self.intf_speed = config.generator_config.intf_speed
+ self.duration_sec = traffic_client.config.duration_sec
+ self.intf_speed = traffic_client.generator_config.intf_speed
self.set_response_curve()
- self.packet_list = [{
- "binary": "01234567890123456789"
- }, {
- "binary": "98765432109876543210"
- }]
+ self.packet_list = None
def get_version(self):
return "0.1"
- def init(self):
- pass
-
def get_tx_pps_dropped_pps(self, tx_rate):
- '''Get actual tx packets based on requested tx rate
+ """Get actual tx packets based on requested tx rate.
:param tx_rate: requested TX rate with unit ('40%', '1Mbps', '1000pps')
:return: the actual TX pps and the dropped pps corresponding to the requested TX rate
- '''
+ """
dr, tx = self.__get_dr_actual_tx(tx_rate)
actual_tx_bps = utils.load_to_bps(tx, self.intf_speed)
avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
@@ -61,14 +55,14 @@ class DummyTG(AbstractTrafficGenerator):
return int(tx_packets), int(dropped)
def set_response_curve(self, lr_dr=0, ndr=100, max_actual_tx=100, max_11_tx=100):
- '''Set traffic gen response characteristics
+ """Set traffic gen response characteristics.
Specifies the drop rate curve and the actual TX curve
:param float lr_dr: The actual drop rate at TX line rate (in %, 0..100)
:param float ndr: The true NDR (0 packet drop) in % (0..100) of line rate"
:param float max_actual_tx: highest actual TX when requested TX is 100%
:param float max_11_tx: highest requested TX that results in same actual TX
- '''
+ """
self.target_ndr = ndr
if ndr < 100:
self.dr_slope = float(lr_dr) / (100 - ndr)
@@ -82,10 +76,11 @@ class DummyTG(AbstractTrafficGenerator):
self.tx_slope = 0
def __get_dr_actual_tx(self, requested_tx_rate):
- '''Get drop rate at given requested tx rate
+ """Get drop rate at given requested tx rate.
+
:param float requested_tx_rate: requested tx rate in % (0..100)
:return: the drop rate and actual tx rate at that requested_tx_rate in % (0..100)
- '''
+ """
if requested_tx_rate <= self.max_11_tx:
actual_tx = requested_tx_rate
else:
@@ -97,24 +92,18 @@ class DummyTG(AbstractTrafficGenerator):
return dr, actual_tx
def connect(self):
- ports = list(self.config.generator_config.ports)
+ ports = list(self.traffic_client.generator_config.ports)
self.port_handle = ports
- def is_arp_successful(self):
- return True
-
- def config_interface(self):
- pass
-
- def create_traffic(self, l2frame_size, rates, bidirectional, latency=True):
+ def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
self.rates = [utils.to_rate_str(rate) for rate in rates]
self.l2_frame_size = l2frame_size
def clear_streamblock(self):
pass
- def get_stats(self):
- '''Get stats from current run.
+ def get_stats(self, ifstats=None):
+ """Get stats from current run.
The binary search mainly looks at 2 results to make the decision:
actual tx packets
@@ -122,7 +111,7 @@ class DummyTG(AbstractTrafficGenerator):
From the Requested TX rate - we get the Actual TX rate and the RX drop rate
From the Run duration and actual TX rate - we get the actual total tx packets
From the Actual tx packets and RX drop rate - we get the RX dropped packets
- '''
+ """
result = {}
total_tx_pps = 0
@@ -158,10 +147,31 @@ class DummyTG(AbstractTrafficGenerator):
total_tx_pps += tx_pps
# actual total tx rate in pps
result['total_tx_rate'] = total_tx_pps
+ # actual offered tx rate in bps
+ avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
+ total_tx_bps = utils.pps_to_bps(total_tx_pps, avg_packet_size)
+ result['offered_tx_rate_bps'] = total_tx_bps
+
+ result.update(self.get_theoretical_rates(avg_packet_size))
return result
+ def get_stream_stats(self, tg_stats, if_stats, latencies, chain_idx):
+ for port in range(2):
+ if_stats[port].tx = 1000
+ if_stats[port].rx = 1000
+ latencies[port].min_usec = 10
+ latencies[port].max_usec = 100
+ latencies[port].avg_usec = 50
+
def get_macs(self):
- return ['00.00.00.00.00.01', '00.00.00.00.00.02']
+ return ['00:00:00:00:00:01', '00:00:00:00:00:02']
+
+ def get_port_speed_gbps(self):
+ """Return the local port speeds.
+
+ return: a list of speed in Gbps indexed by the port#
+ """
+ return [10, 10]
def clear_stats(self):
pass
@@ -170,7 +180,17 @@ class DummyTG(AbstractTrafficGenerator):
pass
def fetch_capture_packets(self):
- pass
+ def _get_packet_capture(mac):
+ # convert text to binary
+ src_mac = bytearray.fromhex(mac.replace(':', '')).decode()
+ return {'binary': bytes('SSSSSS' + src_mac, 'ascii')}
+
+ # for packet capture, generate 2*scc random packets
+ # normally we should generate packets coming from the right dest macs
+ self.packet_list = []
+ for dest_macs in self.traffic_client.generator_config.get_dest_macs():
+ for mac in dest_macs:
+ self.packet_list.append(_get_packet_capture(mac))
def stop_traffic(self):
pass
@@ -187,5 +207,14 @@ class DummyTG(AbstractTrafficGenerator):
def set_mode(self):
pass
+ def set_service_mode(self, enabled=True):
+ pass
+
def resolve_arp(self):
- return True
+ """Resolve ARP sucessfully."""
+ def get_macs(port, scc):
+ return ['00:00:00:00:%02x:%02x' % (port, chain) for chain in range(scc)]
+ scc = self.traffic_client.generator_config.service_chain_count
+ res = [get_macs(port, scc) for port in range(2)]
+ LOG.info('Dummy TG ARP: %s', str(res))
+ return res
diff --git a/nfvbench/traffic_gen/traffic_base.py b/nfvbench/traffic_gen/traffic_base.py
index 81537b3..30aec6e 100644
--- a/nfvbench/traffic_gen/traffic_base.py
+++ b/nfvbench/traffic_gen/traffic_base.py
@@ -13,30 +13,67 @@
# under the License.
import abc
+import sys
from nfvbench.log import LOG
-import traffic_utils
-
+from . import traffic_utils
+from hdrh.histogram import HdrHistogram
+from functools import reduce
+
+
+class Latency(object):
+ """A class to hold latency data."""
+
+ def __init__(self, latency_list=None):
+ """Create a latency instance.
+
+ latency_list: aggregate all latency values from list if not None
+ """
+ self.min_usec = sys.maxsize
+ self.max_usec = 0
+ self.avg_usec = 0
+ self.hdrh = None
+ if latency_list:
+ hdrh_list = []
+ for lat in latency_list:
+ if lat.available():
+ self.min_usec = min(self.min_usec, lat.min_usec)
+ self.max_usec = max(self.max_usec, lat.max_usec)
+ self.avg_usec += lat.avg_usec
+ if lat.hdrh_available():
+ hdrh_list.append(HdrHistogram.decode(lat.hdrh))
+
+ # aggregate histograms if any
+ if hdrh_list:
+ def add_hdrh(x, y):
+ x.add(y)
+ return x
+ decoded_hdrh = reduce(add_hdrh, hdrh_list)
+ self.hdrh = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
+
+ # round to nearest usec
+ self.avg_usec = int(round(float(self.avg_usec) / len(latency_list)))
+
+ def available(self):
+ """Return True if latency information is available."""
+ return self.min_usec != sys.maxsize
+
+ def hdrh_available(self):
+ """Return True if latency histogram information is available."""
+ return self.hdrh is not None
class TrafficGeneratorException(Exception):
- pass
-
+ """Exception for traffic generator."""
class AbstractTrafficGenerator(object):
- def __init__(self, config):
- self.config = config
- self.imix_l2_sizes = [64, 594, 1518]
- self.imix_ratios = [7, 4, 1]
- self.imix_avg_l2_size = 0
- self.adjust_imix_min_size(64)
- @abc.abstractmethod
- def get_version(self):
- # Must be implemented by sub classes
- return None
+ def __init__(self, traffic_client):
+ self.traffic_client = traffic_client
+ self.generator_config = traffic_client.generator_config
+ self.config = traffic_client.config
@abc.abstractmethod
- def init(self):
+ def get_version(self):
# Must be implemented by sub classes
return None
@@ -46,32 +83,23 @@ class AbstractTrafficGenerator(object):
return None
@abc.abstractmethod
- def config_interface(self):
- # Must be implemented by sub classes
- return None
-
- @abc.abstractmethod
- def create_traffic(self, l2frame_size, rates, bidirectional, latency=True):
+ def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
# Must be implemented by sub classes
return None
def modify_rate(self, rate, reverse):
+ """Change the rate per port.
+
+ rate: new rate in % (0 to 100)
+ reverse: 0 for port 0, 1 for port 1
+ """
port_index = int(reverse)
port = self.port_handle[port_index]
self.rates[port_index] = traffic_utils.to_rate_str(rate)
- LOG.info('Modified traffic stream for %s, new rate=%s.', port,
- traffic_utils.to_rate_str(rate))
-
- def modify_traffic(self):
- # Must be implemented by sub classes
- return None
+ LOG.info('Modified traffic stream for port %s, new rate=%s.', port, self.rates[port_index])
@abc.abstractmethod
- def get_stats(self):
- # Must be implemented by sub classes
- return None
-
- def clear_traffic(self):
+ def get_stats(self, ifstats):
# Must be implemented by sub classes
return None
@@ -87,13 +115,53 @@ class AbstractTrafficGenerator(object):
@abc.abstractmethod
def cleanup(self):
- # Must be implemented by sub classes
+ """Cleanup the traffic generator."""
return None
- def adjust_imix_min_size(self, min_size):
- # assume the min size is always the first entry
- self.imix_l2_sizes[0] = min_size
- self.imix_avg_l2_size = sum(
- [1.0 * imix[0] * imix[1] for imix in zip(self.imix_l2_sizes, self.imix_ratios)]) / sum(
- self.imix_ratios)
- traffic_utils.imix_avg_l2_size = self.imix_avg_l2_size
+ def clear_streamblock(self):
+ """Clear all streams from the traffic generator."""
+
+ @abc.abstractmethod
+ def resolve_arp(self):
+ """Resolve all configured remote IP addresses.
+
+ return: None if ARP failed to resolve for all IP addresses
+ else a dict of list of dest macs indexed by port#
+ the dest macs in the list are indexed by the chain id
+ """
+
+ @abc.abstractmethod
+ def get_macs(self):
+ """Return the local port MAC addresses.
+
+ return: a list of MAC addresses indexed by the port#
+ """
+
+ @abc.abstractmethod
+ def get_port_speed_gbps(self):
+ """Return the local port speeds.
+
+ return: a list of speed in Gbps indexed by the port#
+ """
+
+ def get_theoretical_rates(self, avg_packet_size):
+
+ result = {}
+
+ # actual interface speed? (may be a virtual override)
+ intf_speed = self.config.intf_speed_used
+
+ if hasattr(self.config, 'user_info') and self.config.user_info is not None:
+ if "extra_encapsulation_bytes" in self.config.user_info:
+ frame_size_full_encapsulation = avg_packet_size + self.config.user_info[
+ "extra_encapsulation_bytes"]
+ result['theoretical_tx_rate_pps'] = traffic_utils.bps_to_pps(
+ intf_speed, frame_size_full_encapsulation) * 2
+ result['theoretical_tx_rate_bps'] = traffic_utils.pps_to_bps(
+ result['theoretical_tx_rate_pps'], avg_packet_size)
+ else:
+ result['theoretical_tx_rate_pps'] = traffic_utils.bps_to_pps(intf_speed,
+ avg_packet_size) * 2
+ result['theoretical_tx_rate_bps'] = traffic_utils.pps_to_bps(
+ result['theoretical_tx_rate_pps'], avg_packet_size)
+ return result
diff --git a/nfvbench/traffic_gen/traffic_utils.py b/nfvbench/traffic_gen/traffic_utils.py
index 4a7f855..4366a6c 100644
--- a/nfvbench/traffic_gen/traffic_utils.py
+++ b/nfvbench/traffic_gen/traffic_utils.py
@@ -14,42 +14,66 @@
import bitmath
-from nfvbench.utils import multiplier_map
-imix_avg_l2_size = None
+# IMIX frame size including the 4-byte FCS field
+IMIX_L2_SIZES = [64, 594, 1518]
+IMIX_RATIOS = [7, 4, 1]
+# weighted average l2 frame size includng the 4-byte FCS
+IMIX_AVG_L2_FRAME_SIZE = sum(
+ [1.0 * imix[0] * imix[1] for imix in zip(IMIX_L2_SIZES, IMIX_RATIOS)]) / sum(IMIX_RATIOS)
+multiplier_map = {
+ 'K': 1000,
+ 'M': 1000000,
+ 'G': 1000000000
+}
def convert_rates(l2frame_size, rate, intf_speed):
+ """Convert a given rate unit into the other rate units.
+
+ l2frame_size: size of the L2 frame in bytes (includes 32-bit FCS) or 'IMIX'
+ rate: a dict that has at least one of the following key:
+ 'rate_pps', 'rate_bps', 'rate_percent'
+ with the corresponding input value
+ intf_speed: the line rate speed in bits per second
+ """
avg_packet_size = get_average_packet_size(l2frame_size)
if 'rate_pps' in rate:
+ # input = packets/sec
initial_rate_type = 'rate_pps'
pps = rate['rate_pps']
bps = pps_to_bps(pps, avg_packet_size)
load = bps_to_load(bps, intf_speed)
elif 'rate_bps' in rate:
+ # input = bits per second
initial_rate_type = 'rate_bps'
bps = rate['rate_bps']
load = bps_to_load(bps, intf_speed)
pps = bps_to_pps(bps, avg_packet_size)
elif 'rate_percent' in rate:
+ # input = percentage of the line rate (between 0.0 and 100.0)
initial_rate_type = 'rate_percent'
load = rate['rate_percent']
bps = load_to_bps(load, intf_speed)
pps = bps_to_pps(bps, avg_packet_size)
else:
raise Exception('Traffic config needs to have a rate type key')
-
return {
'initial_rate_type': initial_rate_type,
- 'rate_pps': int(pps),
+ 'rate_pps': int(float(pps)),
'rate_percent': load,
- 'rate_bps': int(bps)
+ 'rate_bps': int(float(bps))
}
def get_average_packet_size(l2frame_size):
+ """Retrieve the average L2 frame size
+
+ l2frame_size: an L2 frame size in bytes (including FCS) or 'IMIX'
+ return: average l2 frame size inlcuding the 32-bit FCS
+ """
if l2frame_size.upper() == 'IMIX':
- return imix_avg_l2_size
+ return IMIX_AVG_L2_FRAME_SIZE
return float(l2frame_size)
@@ -92,23 +116,22 @@ def parse_rate_str(rate_str):
rate_pps = rate_pps[:-1]
except KeyError:
multiplier = 1
- rate_pps = int(rate_pps.strip()) * multiplier
+ rate_pps = int(float(rate_pps.strip()) * multiplier)
if rate_pps <= 0:
raise Exception('%s is out of valid range' % rate_str)
return {'rate_pps': str(rate_pps)}
- elif rate_str.endswith('ps'):
+ if rate_str.endswith('ps'):
rate = rate_str.replace('ps', '').strip()
bit_rate = bitmath.parse_string(rate).bits
if bit_rate <= 0:
raise Exception('%s is out of valid range' % rate_str)
return {'rate_bps': str(int(bit_rate))}
- elif rate_str.endswith('%'):
+ if rate_str.endswith('%'):
rate_percent = float(rate_str.replace('%', '').strip())
if rate_percent <= 0 or rate_percent > 100.0:
raise Exception('%s is out of valid range (must be 1-100%%)' % rate_str)
return {'rate_percent': str(rate_percent)}
- else:
- raise Exception('Unknown rate string format %s' % rate_str)
+ raise Exception('Unknown rate string format %s' % rate_str)
def get_load_from_rate(rate_str, avg_frame_size=64, line_rate='10Gbps'):
'''From any rate string (with unit) return the corresponding load (in % unit)
@@ -151,10 +174,10 @@ def to_rate_str(rate):
if 'rate_pps' in rate:
pps = rate['rate_pps']
return '{}pps'.format(pps)
- elif 'rate_bps' in rate:
+ if 'rate_bps' in rate:
bps = rate['rate_bps']
return '{}bps'.format(bps)
- elif 'rate_percent' in rate:
+ if 'rate_percent' in rate:
load = rate['rate_percent']
return '{}%'.format(load)
assert False
@@ -164,7 +187,7 @@ def to_rate_str(rate):
def nan_replace(d):
"""Replaces every occurence of 'N/A' with float nan."""
- for k, v in d.iteritems():
+ for k, v in d.items():
if isinstance(v, dict):
nan_replace(v)
elif v == 'N/A':
@@ -179,5 +202,5 @@ def mac_to_int(mac):
def int_to_mac(i):
"""Converts integer representation of MAC address to hex string."""
mac = format(i, 'x').zfill(12)
- blocks = [mac[x:x + 2] for x in xrange(0, len(mac), 2)]
+ blocks = [mac[x:x + 2] for x in range(0, len(mac), 2)]
return ':'.join(blocks)
diff --git a/nfvbench/traffic_gen/trex.py b/nfvbench/traffic_gen/trex.py
deleted file mode 100644
index c468802..0000000
--- a/nfvbench/traffic_gen/trex.py
+++ /dev/null
@@ -1,482 +0,0 @@
-# Copyright 2016 Cisco Systems, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import random
-import time
-import traceback
-
-from collections import defaultdict
-from itertools import count
-from nfvbench.log import LOG
-from nfvbench.specs import ChainType
-from nfvbench.traffic_server import TRexTrafficServer
-from nfvbench.utils import cast_integer
-from nfvbench.utils import timeout
-from nfvbench.utils import TimeoutError
-from traffic_base import AbstractTrafficGenerator
-from traffic_base import TrafficGeneratorException
-import traffic_utils as utils
-
-# pylint: disable=import-error
-from trex_stl_lib.api import CTRexVmInsFixHwCs
-from trex_stl_lib.api import Dot1Q
-from trex_stl_lib.api import Ether
-from trex_stl_lib.api import IP
-from trex_stl_lib.api import STLClient
-from trex_stl_lib.api import STLError
-from trex_stl_lib.api import STLFlowLatencyStats
-from trex_stl_lib.api import STLFlowStats
-from trex_stl_lib.api import STLPktBuilder
-from trex_stl_lib.api import STLScVmRaw
-from trex_stl_lib.api import STLStream
-from trex_stl_lib.api import STLTXCont
-from trex_stl_lib.api import STLVmFixChecksumHw
-from trex_stl_lib.api import STLVmFlowVar
-from trex_stl_lib.api import STLVmFlowVarRepetableRandom
-from trex_stl_lib.api import STLVmWrFlowVar
-from trex_stl_lib.api import UDP
-from trex_stl_lib.services.trex_stl_service_arp import STLServiceARP
-
-
-# pylint: enable=import-error
-
-
-class TRex(AbstractTrafficGenerator):
- LATENCY_PPS = 1000
-
- def __init__(self, runner):
- AbstractTrafficGenerator.__init__(self, runner)
- self.client = None
- self.id = count()
- self.latencies = defaultdict(list)
- self.stream_ids = defaultdict(list)
- self.port_handle = []
- self.streamblock = defaultdict(list)
- self.rates = []
- self.arps = {}
- self.capture_id = None
- self.packet_list = []
-
- def get_version(self):
- return self.client.get_server_version()
-
- def extract_stats(self, in_stats):
- """Extract stats from dict returned by Trex API.
-
- :param in_stats: dict as returned by TRex api
- """
- utils.nan_replace(in_stats)
- LOG.debug(in_stats)
-
- result = {}
- # port_handles should have only 2 elements: [0, 1]
- # so (1 - ph) will be the index for the far end port
- for ph in self.port_handle:
- stats = in_stats[ph]
- far_end_stats = in_stats[1 - ph]
- result[ph] = {
- 'tx': {
- 'total_pkts': cast_integer(stats['opackets']),
- 'total_pkt_bytes': cast_integer(stats['obytes']),
- 'pkt_rate': cast_integer(stats['tx_pps']),
- 'pkt_bit_rate': cast_integer(stats['tx_bps'])
- },
- 'rx': {
- 'total_pkts': cast_integer(stats['ipackets']),
- 'total_pkt_bytes': cast_integer(stats['ibytes']),
- 'pkt_rate': cast_integer(stats['rx_pps']),
- 'pkt_bit_rate': cast_integer(stats['rx_bps']),
- # how many pkts were dropped in RX direction
- # need to take the tx counter on the far end port
- 'dropped_pkts': cast_integer(
- far_end_stats['opackets'] - stats['ipackets'])
- }
- }
-
- lat = self.__combine_latencies(in_stats, ph)
- result[ph]['rx']['max_delay_usec'] = cast_integer(
- lat['total_max']) if 'total_max' in lat else float('nan')
- result[ph]['rx']['min_delay_usec'] = cast_integer(
- lat['total_min']) if 'total_min' in lat else float('nan')
- result[ph]['rx']['avg_delay_usec'] = cast_integer(
- lat['average']) if 'average' in lat else float('nan')
- total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
- result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
- return result
-
- def __combine_latencies(self, in_stats, port_handle):
- """Traverses TRex result dictionary and combines chosen latency stats."""
- if not self.latencies[port_handle]:
- return {}
-
- result = defaultdict(float)
- result['total_min'] = float("inf")
- for lat_id in self.latencies[port_handle]:
- lat = in_stats['latency'][lat_id]
- result['dropped_pkts'] += lat['err_cntrs']['dropped']
- result['total_max'] = max(lat['latency']['total_max'], result['total_max'])
- result['total_min'] = min(lat['latency']['total_min'], result['total_min'])
- result['average'] += lat['latency']['average']
-
- result['average'] /= len(self.latencies[port_handle])
-
- return result
-
- def create_pkt(self, stream_cfg, l2frame_size):
-
- pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
- if stream_cfg['vlan_tag'] is not None:
- # 50 = 14 (Ethernet II) + 4 (Vlan tag) + 4 (CRC Checksum) + 20 (IPv4) + 8 (UDP)
- pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
- l2payload_size = int(l2frame_size) - 50
- else:
- # 46 = 14 (Ethernet II) + 4 (CRC Checksum) + 20 (IPv4) + 8 (UDP)
- l2payload_size = int(l2frame_size) - 46
- payload = 'x' * l2payload_size
- udp_args = {}
- if stream_cfg['udp_src_port']:
- udp_args['sport'] = int(stream_cfg['udp_src_port'])
- if stream_cfg['udp_dst_port']:
- udp_args['dport'] = int(stream_cfg['udp_dst_port'])
- pkt_base /= IP() / UDP(**udp_args)
-
- if stream_cfg['ip_addrs_step'] == 'random':
- src_fv = STLVmFlowVarRepetableRandom(
- name="ip_src",
- min_value=stream_cfg['ip_src_addr'],
- max_value=stream_cfg['ip_src_addr_max'],
- size=4,
- seed=random.randint(0, 32767),
- limit=stream_cfg['ip_src_count'])
- dst_fv = STLVmFlowVarRepetableRandom(
- name="ip_dst",
- min_value=stream_cfg['ip_dst_addr'],
- max_value=stream_cfg['ip_dst_addr_max'],
- size=4,
- seed=random.randint(0, 32767),
- limit=stream_cfg['ip_dst_count'])
- else:
- src_fv = STLVmFlowVar(
- name="ip_src",
- min_value=stream_cfg['ip_src_addr'],
- max_value=stream_cfg['ip_src_addr'],
- size=4,
- op="inc",
- step=stream_cfg['ip_addrs_step'])
- dst_fv = STLVmFlowVar(
- name="ip_dst",
- min_value=stream_cfg['ip_dst_addr'],
- max_value=stream_cfg['ip_dst_addr_max'],
- size=4,
- op="inc",
- step=stream_cfg['ip_addrs_step'])
-
- vm_param = [
- src_fv,
- STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP.src"),
- dst_fv,
- STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP.dst"),
- STLVmFixChecksumHw(l3_offset="IP",
- l4_offset="UDP",
- l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP)
- ]
-
- return STLPktBuilder(pkt=pkt_base / payload, vm=STLScVmRaw(vm_param))
-
- def generate_streams(self, port_handle, stream_cfg, l2frame, isg=0.0, latency=True):
- idx_lat = None
- streams = []
- if l2frame == 'IMIX':
- min_size = 64 if stream_cfg['vlan_tag'] is None else 68
- self.adjust_imix_min_size(min_size)
- for t, (ratio, l2_frame_size) in enumerate(zip(self.imix_ratios, self.imix_l2_sizes)):
- pkt = self.create_pkt(stream_cfg, l2_frame_size)
- streams.append(STLStream(packet=pkt,
- isg=0.1 * t,
- flow_stats=STLFlowStats(
- pg_id=self.stream_ids[port_handle]),
- mode=STLTXCont(pps=ratio)))
-
- if latency:
- idx_lat = self.id.next()
- pkt = self.create_pkt(stream_cfg, self.imix_avg_l2_size)
- sl = STLStream(packet=pkt,
- isg=isg,
- flow_stats=STLFlowLatencyStats(pg_id=idx_lat),
- mode=STLTXCont(pps=self.LATENCY_PPS))
- streams.append(sl)
- else:
- pkt = self.create_pkt(stream_cfg, l2frame)
- streams.append(STLStream(packet=pkt,
- flow_stats=STLFlowStats(pg_id=self.stream_ids[port_handle]),
- mode=STLTXCont()))
-
- if latency:
- idx_lat = self.id.next()
- streams.append(STLStream(packet=pkt,
- flow_stats=STLFlowLatencyStats(pg_id=idx_lat),
- mode=STLTXCont(pps=self.LATENCY_PPS)))
-
- if latency:
- self.latencies[port_handle].append(idx_lat)
-
- return streams
-
- def init(self):
- pass
-
- @timeout(5)
- def __connect(self, client):
- client.connect()
-
- def __connect_after_start(self):
- # after start, Trex may take a bit of time to initialize
- # so we need to retry a few times
- for it in xrange(self.config.generic_retry_count):
- try:
- time.sleep(1)
- self.client.connect()
- break
- except Exception as ex:
- if it == (self.config.generic_retry_count - 1):
- raise ex
- LOG.info("Retrying connection to TRex (%s)...", ex.message)
-
- def connect(self):
- LOG.info("Connecting to TRex...")
- server_ip = self.config.generator_config.ip
-
- # Connect to TRex server
- self.client = STLClient(server=server_ip)
- try:
- self.__connect(self.client)
- except (TimeoutError, STLError) as e:
- if server_ip == '127.0.0.1':
- try:
- self.__start_server()
- self.__connect_after_start()
- except (TimeoutError, STLError) as e:
- LOG.error('Cannot connect to TRex')
- LOG.error(traceback.format_exc())
- logpath = '/tmp/trex.log'
- if os.path.isfile(logpath):
- # Wait for TRex to finish writing error message
- last_size = 0
- for _ in xrange(self.config.generic_retry_count):
- size = os.path.getsize(logpath)
- if size == last_size:
- # probably not writing anymore
- break
- last_size = size
- time.sleep(1)
- with open(logpath, 'r') as f:
- message = f.read()
- else:
- message = e.message
- raise TrafficGeneratorException(message)
- else:
- raise TrafficGeneratorException(e.message)
-
- ports = list(self.config.generator_config.ports)
- self.port_handle = ports
- # Prepare the ports
- self.client.reset(ports)
-
- def set_mode(self):
- if self.config.service_chain == ChainType.EXT and not self.config.no_arp:
- self.__set_l3_mode()
- else:
- self.__set_l2_mode()
-
- def __set_l3_mode(self):
- self.client.set_service_mode(ports=self.port_handle, enabled=True)
- for port, device in zip(self.port_handle, self.config.generator_config.devices):
- try:
- self.client.set_l3_mode(port=port,
- src_ipv4=device.tg_gateway_ip,
- dst_ipv4=device.dst.gateway_ip,
- vlan=device.vlan_tag if device.vlan_tagging else None)
- except STLError:
- # TRex tries to resolve ARP already, doesn't have to be successful yet
- continue
- self.client.set_service_mode(ports=self.port_handle, enabled=False)
-
- def __set_l2_mode(self):
- self.client.set_service_mode(ports=self.port_handle, enabled=True)
- for port, device in zip(self.port_handle, self.config.generator_config.devices):
- for cfg in device.get_stream_configs(self.config.generator_config.service_chain):
- self.client.set_l2_mode(port=port, dst_mac=cfg['mac_dst'])
- self.client.set_service_mode(ports=self.port_handle, enabled=False)
-
- def __start_server(self):
- server = TRexTrafficServer()
- server.run_server(self.config.generator_config, self.config.vlan_tagging)
-
- def resolve_arp(self):
- self.client.set_service_mode(ports=self.port_handle)
- LOG.info('Polling ARP until successful')
- resolved = 0
- attempt = 0
- for port, device in zip(self.port_handle, self.config.generator_config.devices):
- ctx = self.client.create_service_ctx(port=port)
-
- arps = [
- STLServiceARP(ctx,
- src_ip=cfg['ip_src_tg_gw'],
- dst_ip=cfg['mac_discovery_gw'],
- vlan=device.vlan_tag if device.vlan_tagging else None)
- for cfg in device.get_stream_configs(self.config.generator_config.service_chain)
- ]
-
- for _ in xrange(self.config.generic_retry_count):
- attempt += 1
- try:
- ctx.run(arps)
- except STLError:
- LOG.error(traceback.format_exc())
- continue
-
- self.arps[port] = [arp.get_record().dst_mac for arp in arps
- if arp.get_record().dst_mac is not None]
-
- if len(self.arps[port]) == self.config.service_chain_count:
- resolved += 1
- LOG.info('ARP resolved successfully for port %s', port)
- break
- else:
- failed = [arp.get_record().dst_ip for arp in arps
- if arp.get_record().dst_mac is None]
- LOG.info('Retrying ARP for: %s (%d / %d)',
- failed, attempt, self.config.generic_retry_count)
- time.sleep(self.config.generic_poll_sec)
-
- self.client.set_service_mode(ports=self.port_handle, enabled=False)
- return resolved == len(self.port_handle)
-
- def config_interface(self):
- pass
-
- def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
- """Check if rate provided by user is above requirements. Applies only if latency is True."""
- intf_speed = self.config.generator_config.intf_speed
- if latency:
- if bidirectional:
- mult = 2
- total_rate = 0
- for rate in rates:
- r = utils.convert_rates(l2frame_size, rate, intf_speed)
- total_rate += int(r['rate_pps'])
- else:
- mult = 1
- total_rate = utils.convert_rates(l2frame_size, rates[0], intf_speed)
- # rate must be enough for latency stream and at least 1 pps for base stream per chain
- required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
- result = utils.convert_rates(l2frame_size,
- {'rate_pps': required_rate},
- intf_speed * mult)
- result['result'] = total_rate >= required_rate
- return result
-
- return {'result': True}
-
- def create_traffic(self, l2frame_size, rates, bidirectional, latency=True):
- r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
- if not r['result']:
- raise TrafficGeneratorException(
- 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
- .format(pps=r['rate_pps'],
- bps=r['rate_bps'],
- load=r['rate_percent']))
-
- stream_cfgs = [d.get_stream_configs(self.config.generator_config.service_chain)
- for d in self.config.generator_config.devices]
- self.rates = [utils.to_rate_str(rate) for rate in rates]
-
- for ph in self.port_handle:
- # generate one pg_id for each direction
- self.stream_ids[ph] = self.id.next()
-
- for i, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
- if self.config.service_chain == ChainType.EXT and not self.config.no_arp:
- fwd_stream_cfg['mac_dst'] = self.arps[self.port_handle[0]][i]
- rev_stream_cfg['mac_dst'] = self.arps[self.port_handle[1]][i]
-
- self.streamblock[0].extend(self.generate_streams(self.port_handle[0],
- fwd_stream_cfg,
- l2frame_size,
- latency=latency))
- if len(self.rates) > 1:
- self.streamblock[1].extend(self.generate_streams(self.port_handle[1],
- rev_stream_cfg,
- l2frame_size,
- isg=10.0,
- latency=bidirectional and latency))
-
- for ph in self.port_handle:
- self.client.add_streams(self.streamblock[ph], ports=ph)
- LOG.info('Created traffic stream for port %s.', ph)
-
- def clear_streamblock(self):
- self.streamblock = defaultdict(list)
- self.latencies = defaultdict(list)
- self.stream_ids = defaultdict(list)
- self.rates = []
- self.client.reset(self.port_handle)
- LOG.info('Cleared all existing streams.')
-
- def get_stats(self):
- stats = self.client.get_stats()
- return self.extract_stats(stats)
-
- def get_macs(self):
- return [self.client.get_port_attr(port=port)['src_mac'] for port in self.port_handle]
-
- def clear_stats(self):
- if self.port_handle:
- self.client.clear_stats()
-
- def start_traffic(self):
- for port, rate in zip(self.port_handle, self.rates):
- self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
-
- def stop_traffic(self):
- self.client.stop(ports=self.port_handle)
-
- def start_capture(self):
- if self.capture_id:
- self.stop_capture()
- self.client.set_service_mode(ports=self.port_handle)
- self.capture_id = self.client.start_capture(rx_ports=self.port_handle)
-
- def fetch_capture_packets(self):
- if self.capture_id:
- self.packet_list = []
- self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
- output=self.packet_list)
-
- def stop_capture(self):
- if self.capture_id:
- self.client.stop_capture(capture_id=self.capture_id['id'])
- self.capture_id = None
- self.client.set_service_mode(ports=self.port_handle, enabled=False)
-
- def cleanup(self):
- if self.client:
- try:
- self.client.reset(self.port_handle)
- self.client.disconnect()
- except STLError:
- # TRex does not like a reset while in disconnected state
- pass
diff --git a/nfvbench/traffic_gen/trex_gen.py b/nfvbench/traffic_gen/trex_gen.py
new file mode 100644
index 0000000..dff72ac
--- /dev/null
+++ b/nfvbench/traffic_gen/trex_gen.py
@@ -0,0 +1,1208 @@
+# Copyright 2016 Cisco Systems, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Driver module for TRex traffic generator."""
+
+import math
+import os
+import sys
+import random
+import time
+import traceback
+from functools import reduce
+
+from itertools import count
+# pylint: disable=import-error
+from scapy.contrib.mpls import MPLS # flake8: noqa
+# pylint: enable=import-error
+from nfvbench.log import LOG
+from nfvbench.specs import ChainType
+from nfvbench.traffic_server import TRexTrafficServer
+from nfvbench.utils import cast_integer
+from nfvbench.utils import timeout
+from nfvbench.utils import TimeoutError
+
+from hdrh.histogram import HdrHistogram
+
+# pylint: disable=import-error
+from trex.common.services.trex_service_arp import ServiceARP
+from trex.stl.api import ARP
+from trex.stl.api import bind_layers
+from trex.stl.api import CTRexVmInsFixHwCs
+from trex.stl.api import Dot1Q
+from trex.stl.api import Ether
+from trex.stl.api import FlagsField
+from trex.stl.api import IP
+from trex.stl.api import Packet
+from trex.stl.api import STLClient
+from trex.stl.api import STLError
+from trex.stl.api import STLFlowLatencyStats
+from trex.stl.api import STLFlowStats
+from trex.stl.api import STLPktBuilder
+from trex.stl.api import STLScVmRaw
+from trex.stl.api import STLStream
+from trex.stl.api import STLTXCont
+from trex.stl.api import STLTXMultiBurst
+from trex.stl.api import STLVmFixChecksumHw
+from trex.stl.api import STLVmFixIpv4
+from trex.stl.api import STLVmFlowVar
+from trex.stl.api import STLVmFlowVarRepeatableRandom
+from trex.stl.api import STLVmTupleGen
+from trex.stl.api import STLVmWrFlowVar
+from trex.stl.api import ThreeBytesField
+from trex.stl.api import UDP
+from trex.stl.api import XByteField
+
+# pylint: enable=import-error
+
+from .traffic_base import AbstractTrafficGenerator
+from .traffic_base import TrafficGeneratorException
+from . import traffic_utils as utils
+from .traffic_utils import IMIX_AVG_L2_FRAME_SIZE
+from .traffic_utils import IMIX_L2_SIZES
+from .traffic_utils import IMIX_RATIOS
+
+class VXLAN(Packet):
+ """VxLAN class."""
+
+ _VXLAN_FLAGS = ['R' * 27] + ['I'] + ['R' * 5]
+ name = "VXLAN"
+ fields_desc = [FlagsField("flags", 0x08000000, 32, _VXLAN_FLAGS),
+ ThreeBytesField("vni", 0),
+ XByteField("reserved", 0x00)]
+
+ def mysummary(self):
+ """Summary."""
+ return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
+
+class TRex(AbstractTrafficGenerator):
+ """TRex traffic generator driver."""
+
+ LATENCY_PPS = 1000
+ CHAIN_PG_ID_MASK = 0x007F
+ PORT_PG_ID_MASK = 0x0080
+ LATENCY_PG_ID_MASK = 0x0100
+
+ def __init__(self, traffic_client):
+ """Trex driver."""
+ AbstractTrafficGenerator.__init__(self, traffic_client)
+ self.client = None
+ self.id = count()
+ self.port_handle = []
+ self.chain_count = self.generator_config.service_chain_count
+ self.rates = []
+ self.capture_id = None
+ self.packet_list = []
+ self.l2_frame_size = 0
+
+ def get_version(self):
+ """Get the Trex version."""
+ return self.client.get_server_version() if self.client else ''
+
+ def get_pg_id(self, port, chain_id):
+ """Calculate the packet group IDs to use for a given port/stream type/chain_id.
+
+ port: 0 or 1
+ chain_id: identifies to which chain the pg_id is associated (0 to 255)
+ return: pg_id, lat_pg_id
+
+ We use a bit mask to set up the 3 fields:
+ 0x007F: chain ID (8 bits for a max of 128 chains)
+ 0x0080: port bit
+ 0x0100: latency bit
+ """
+ pg_id = port * TRex.PORT_PG_ID_MASK | chain_id
+ return pg_id, pg_id | TRex.LATENCY_PG_ID_MASK
+
+ def extract_stats(self, in_stats, ifstats):
+ """Extract stats from dict returned by Trex API.
+
+ :param in_stats: dict as returned by TRex api
+ """
+ utils.nan_replace(in_stats)
+ # LOG.debug(in_stats)
+
+ result = {}
+ # port_handles should have only 2 elements: [0, 1]
+ # so (1 - ph) will be the index for the far end port
+ for ph in self.port_handle:
+ stats = in_stats[ph]
+ far_end_stats = in_stats[1 - ph]
+ result[ph] = {
+ 'tx': {
+ 'total_pkts': cast_integer(stats['opackets']),
+ 'total_pkt_bytes': cast_integer(stats['obytes']),
+ 'pkt_rate': cast_integer(stats['tx_pps']),
+ 'pkt_bit_rate': cast_integer(stats['tx_bps'])
+ },
+ 'rx': {
+ 'total_pkts': cast_integer(stats['ipackets']),
+ 'total_pkt_bytes': cast_integer(stats['ibytes']),
+ 'pkt_rate': cast_integer(stats['rx_pps']),
+ 'pkt_bit_rate': cast_integer(stats['rx_bps']),
+ # how many pkts were dropped in RX direction
+ # need to take the tx counter on the far end port
+ 'dropped_pkts': cast_integer(
+ far_end_stats['opackets'] - stats['ipackets'])
+ }
+ }
+ self.__combine_latencies(in_stats, result[ph]['rx'], ph)
+
+ total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
+
+ # in case of GARP packets we need to base total_tx_pkts value using flow_stats
+ # as no GARP packets have no flow stats and will not be received on the other port
+ if self.config.periodic_gratuitous_arp:
+ if not self.config.no_flow_stats and not self.config.no_latency_stats:
+ global_total_tx_pkts = total_tx_pkts
+ total_tx_pkts = 0
+ if ifstats:
+ for chain_id, _ in enumerate(ifstats):
+ for ph in self.port_handle:
+ pg_id, lat_pg_id = self.get_pg_id(ph, chain_id)
+ flows_tx_pkts = in_stats['flow_stats'][pg_id]['tx_pkts']['total'] + \
+ in_stats['flow_stats'][lat_pg_id]['tx_pkts']['total']
+ result[ph]['tx']['total_pkts'] = flows_tx_pkts
+ total_tx_pkts += flows_tx_pkts
+ else:
+ for pg_id in in_stats['flow_stats']:
+ if pg_id != 'global':
+ total_tx_pkts += in_stats['flow_stats'][pg_id]['tx_pkts']['total']
+ result["garp_total_tx_rate"] = cast_integer(
+ (global_total_tx_pkts - total_tx_pkts) / self.config.duration_sec)
+ else:
+ LOG.warning("Gratuitous ARP are not received by the other port so TRex and NFVbench"
+ " see these packets as dropped. Please do not activate no_flow_stats"
+ " and no_latency_stats properties to have a better drop rate.")
+
+ result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
+ # actual offered tx rate in bps
+ avg_packet_size = utils.get_average_packet_size(self.l2_frame_size)
+ total_tx_bps = utils.pps_to_bps(result["total_tx_rate"], avg_packet_size)
+ result['offered_tx_rate_bps'] = total_tx_bps
+
+ result.update(self.get_theoretical_rates(avg_packet_size))
+
+ result["flow_stats"] = in_stats["flow_stats"]
+ result["latency"] = in_stats["latency"]
+
+ # Merge HDRHistogram to have an overall value for all chains and ports
+ # (provided that the histogram exists in the stats returned by T-Rex)
+ # Of course, empty histograms will produce an empty (invalid) histogram.
+ try:
+ hdrh_list = []
+ if ifstats:
+ for chain_id, _ in enumerate(ifstats):
+ for ph in self.port_handle:
+ _, lat_pg_id = self.get_pg_id(ph, chain_id)
+ hdrh_list.append(
+ HdrHistogram.decode(in_stats['latency'][lat_pg_id]['latency']['hdrh']))
+ else:
+ for pg_id in in_stats['latency']:
+ if pg_id != 'global':
+ hdrh_list.append(
+ HdrHistogram.decode(in_stats['latency'][pg_id]['latency']['hdrh']))
+
+ def add_hdrh(x, y):
+ x.add(y)
+ return x
+ decoded_hdrh = reduce(add_hdrh, hdrh_list)
+ result["overall_hdrh"] = HdrHistogram.encode(decoded_hdrh).decode('utf-8')
+ except KeyError:
+ pass
+
+ return result
+
+ def get_stream_stats(self, trex_stats, if_stats, latencies, chain_idx):
+ """Extract the aggregated stats for a given chain.
+
+ trex_stats: stats as returned by get_stats()
+ if_stats: a list of 2 interface stats to update (port 0 and 1)
+ latencies: a list of 2 Latency instances to update for this chain (port 0 and 1)
+ latencies[p] is the latency for packets sent on port p
+ if there are no latency streams, the Latency instances are not modified
+ chain_idx: chain index of the interface stats
+
+ The packet counts include normal and latency streams.
+
+ Trex returns flows stats as follows:
+
+ 'flow_stats': {0: {'rx_bps': {0: 0, 1: 0, 'total': 0},
+ 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'rx_bytes': {0: nan, 1: nan, 'total': nan},
+ 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
+ 'rx_pps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
+ 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
+ 'tx_pps': {0: 0, 1: 0, 'total': 0}},
+ 1: {'rx_bps': {0: 0, 1: 0, 'total': 0},
+ 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'rx_bytes': {0: nan, 1: nan, 'total': nan},
+ 'rx_pkts': {0: 0, 1: 15001, 'total': 15001},
+ 'rx_pps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'tx_bytes': {0: 1020068, 1: 0, 'total': 1020068},
+ 'tx_pkts': {0: 15001, 1: 0, 'total': 15001},
+ 'tx_pps': {0: 0, 1: 0, 'total': 0}},
+ 128: {'rx_bps': {0: 0, 1: 0, 'total': 0},
+ 'rx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'rx_bytes': {0: nan, 1: nan, 'total': nan},
+ 'rx_pkts': {0: 15001, 1: 0, 'total': 15001},
+ 'rx_pps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps': {0: 0, 1: 0, 'total': 0},
+ 'tx_bps_l1': {0: 0.0, 1: 0.0, 'total': 0.0},
+ 'tx_bytes': {0: 0, 1: 1020068, 'total': 1020068},
+ 'tx_pkts': {0: 0, 1: 15001, 'total': 15001},
+ 'tx_pps': {0: 0, 1: 0, 'total': 0}},etc...
+
+ the pg_id (0, 1, 128,...) is the key of the dict and is obtained using the
+ get_pg_id() method.
+ packet counters for a given stream sent on port p are reported as:
+ - tx_pkts[p] on port p
+ - rx_pkts[1-p] on the far end port
+
+ This is a tricky/critical counter transposition operation because
+ the results are grouped by port (not by stream):
+ tx_pkts_port(p=0) comes from pg_id(port=0, chain_idx)['tx_pkts'][0]
+ rx_pkts_port(p=0) comes from pg_id(port=1, chain_idx)['rx_pkts'][0]
+ tx_pkts_port(p=1) comes from pg_id(port=1, chain_idx)['tx_pkts'][1]
+ rx_pkts_port(p=1) comes from pg_id(port=0, chain_idx)['rx_pkts'][1]
+
+ or using a more generic formula:
+ tx_pkts_port(p) comes from pg_id(port=p, chain_idx)['tx_pkts'][p]
+ rx_pkts_port(p) comes from pg_id(port=1-p, chain_idx)['rx_pkts'][p]
+
+ the second formula is equivalent to
+ rx_pkts_port(1-p) comes from pg_id(port=p, chain_idx)['rx_pkts'][1-p]
+
+ If there are latency streams, those same counters need to be added in the same way
+ """
+ def get_latency(lval):
+ try:
+ return int(round(lval))
+ except ValueError:
+ return 0
+
+ for ifs in if_stats:
+ ifs.tx = ifs.rx = 0
+ for port in range(2):
+ pg_id, lat_pg_id = self.get_pg_id(port, chain_idx)
+ for pid in [pg_id, lat_pg_id]:
+ try:
+ pg_stats = trex_stats['flow_stats'][pid]
+ if_stats[port].tx += pg_stats['tx_pkts'][port]
+ if_stats[1 - port].rx += pg_stats['rx_pkts'][1 - port]
+ except KeyError:
+ pass
+ try:
+ lat = trex_stats['latency'][lat_pg_id]['latency']
+ # dropped_pkts += lat['err_cntrs']['dropped']
+ latencies[port].max_usec = get_latency(lat['total_max'])
+ if math.isnan(lat['total_min']):
+ latencies[port].min_usec = 0
+ latencies[port].avg_usec = 0
+ else:
+ latencies[port].min_usec = get_latency(lat['total_min'])
+ latencies[port].avg_usec = get_latency(lat['average'])
+ # pick up the HDR histogram if present (otherwise will raise KeyError)
+ latencies[port].hdrh = lat['hdrh']
+ except KeyError:
+ pass
+
+ def __combine_latencies(self, in_stats, results, port_handle):
+ """Traverse TRex result dictionary and combines chosen latency stats.
+
+ example of latency dict returned by trex (2 chains):
+ 'latency': {256: {'err_cntrs': {'dropped': 0,
+ 'dup': 0,
+ 'out_of_order': 0,
+ 'seq_too_high': 0,
+ 'seq_too_low': 0},
+ 'latency': {'average': 26.5,
+ 'hdrh': u'HISTFAAAAEx4nJNpmSgz...bFRgxi',
+ 'histogram': {20: 303,
+ 30: 320,
+ 40: 300,
+ 50: 73,
+ 60: 4,
+ 70: 1},
+ 'jitter': 14,
+ 'last_max': 63,
+ 'total_max': 63,
+ 'total_min': 20}},
+ 257: {'err_cntrs': {'dropped': 0,
+ 'dup': 0,
+ 'out_of_order': 0,
+ 'seq_too_high': 0,
+ 'seq_too_low': 0},
+ 'latency': {'average': 29.75,
+ 'hdrh': u'HISTFAAAAEV4nJN...CALilDG0=',
+ 'histogram': {20: 261,
+ 30: 431,
+ 40: 3,
+ 50: 80,
+ 60: 225},
+ 'jitter': 23,
+ 'last_max': 67,
+ 'total_max': 67,
+ 'total_min': 20}},
+ 384: {'err_cntrs': {'dropped': 0,
+ 'dup': 0,
+ 'out_of_order': 0,
+ 'seq_too_high': 0,
+ 'seq_too_low': 0},
+ 'latency': {'average': 18.0,
+ 'hdrh': u'HISTFAAAADR4nJNpm...MjCwDDxAZG',
+ 'histogram': {20: 987, 30: 14},
+ 'jitter': 0,
+ 'last_max': 34,
+ 'total_max': 34,
+ 'total_min': 20}},
+ 385: {'err_cntrs': {'dropped': 0,
+ 'dup': 0,
+ 'out_of_order': 0,
+ 'seq_too_high': 0,
+ 'seq_too_low': 0},
+ 'latency': {'average': 19.0,
+ 'hdrh': u'HISTFAAAADR4nJNpm...NkYmJgDdagfK',
+ 'histogram': {20: 989, 30: 11},
+ 'jitter': 0,
+ 'last_max': 38,
+ 'total_max': 38,
+ 'total_min': 20}},
+ 'global': {'bad_hdr': 0, 'old_flow': 0}},
+ """
+ total_max = 0
+ average = 0
+ total_min = float("inf")
+ for chain_id in range(self.chain_count):
+ try:
+ _, lat_pg_id = self.get_pg_id(port_handle, chain_id)
+ lat = in_stats['latency'][lat_pg_id]['latency']
+ # dropped_pkts += lat['err_cntrs']['dropped']
+ total_max = max(lat['total_max'], total_max)
+ total_min = min(lat['total_min'], total_min)
+ average += lat['average']
+ except KeyError:
+ pass
+ if total_min == float("inf"):
+ total_min = 0
+ results['min_delay_usec'] = total_min
+ results['max_delay_usec'] = total_max
+ results['avg_delay_usec'] = int(average / self.chain_count)
+
+ def _bind_vxlan(self):
+ bind_layers(UDP, VXLAN, dport=4789)
+ bind_layers(VXLAN, Ether)
+
+ def _create_pkt(self, stream_cfg, l2frame_size, disable_random_latency_flow=False):
+ """Create a packet of given size.
+
+ l2frame_size: size of the L2 frame in bytes (including the 32-bit FCS)
+ """
+ # Trex will add the FCS field, so we need to remove 4 bytes from the l2 frame size
+ frame_size = int(l2frame_size) - 4
+ vm_param = []
+ if stream_cfg['vxlan'] is True:
+ self._bind_vxlan()
+ encap_level = '1'
+ pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
+ if stream_cfg['vtep_vlan'] is not None:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
+ pkt_base /= IP(src=stream_cfg['vtep_src_ip'], dst=stream_cfg['vtep_dst_ip'])
+ pkt_base /= UDP(sport=random.randint(1337, 32767), dport=4789)
+ pkt_base /= VXLAN(vni=stream_cfg['net_vni'])
+ pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
+ # need to randomize the outer header UDP src port based on flow
+ vxlan_udp_src_fv = STLVmFlowVar(
+ name="vxlan_udp_src",
+ min_value=1337,
+ max_value=32767,
+ size=2,
+ op="random")
+ vm_param = [vxlan_udp_src_fv,
+ STLVmWrFlowVar(fv_name="vxlan_udp_src", pkt_offset="UDP.sport")]
+ elif stream_cfg['mpls'] is True:
+ encap_level = '0'
+ pkt_base = Ether(src=stream_cfg['vtep_src_mac'], dst=stream_cfg['vtep_dst_mac'])
+ if stream_cfg['vtep_vlan'] is not None:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
+ if stream_cfg['mpls_outer_label'] is not None:
+ pkt_base /= MPLS(label=stream_cfg['mpls_outer_label'], cos=1, s=0, ttl=255)
+ if stream_cfg['mpls_inner_label'] is not None:
+ pkt_base /= MPLS(label=stream_cfg['mpls_inner_label'], cos=1, s=1, ttl=255)
+ # Flow stats and MPLS labels randomization TBD
+ pkt_base /= Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
+ else:
+ encap_level = '0'
+ pkt_base = Ether(src=stream_cfg['mac_src'], dst=stream_cfg['mac_dst'])
+
+ if stream_cfg['vlan_tag'] is not None:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
+
+ udp_args = {}
+ if stream_cfg['udp_src_port']:
+ udp_args['sport'] = int(stream_cfg['udp_src_port'])
+ if stream_cfg['udp_port_step'] == 'random':
+ step = 1
+ else:
+ step = stream_cfg['udp_port_step']
+ udp_args['sport_step'] = int(step)
+ udp_args['sport_max'] = int(stream_cfg['udp_src_port_max'])
+ if stream_cfg['udp_dst_port']:
+ udp_args['dport'] = int(stream_cfg['udp_dst_port'])
+ if stream_cfg['udp_port_step'] == 'random':
+ step = 1
+ else:
+ step = stream_cfg['udp_port_step']
+ udp_args['dport_step'] = int(step)
+ udp_args['dport_max'] = int(stream_cfg['udp_dst_port_max'])
+
+ pkt_base /= IP(src=stream_cfg['ip_src_addr'], dst=stream_cfg['ip_dst_addr']) / \
+ UDP(dport=udp_args['dport'], sport=udp_args['sport'])
+
+ # STLVmTupleGen need flow count >= cores used by TRex, if FC < cores we used STLVmFlowVar
+ if stream_cfg['ip_addrs_step'] == '0.0.0.1' and stream_cfg['udp_port_step'] == '1' and \
+ stream_cfg['count'] >= self.generator_config.cores:
+ src_fv = STLVmTupleGen(ip_min=stream_cfg['ip_src_addr'],
+ ip_max=stream_cfg['ip_src_addr_max'],
+ port_min=udp_args['sport'],
+ port_max=udp_args['sport_max'],
+ name="tuple_src",
+ limit_flows=stream_cfg['count'])
+ dst_fv = STLVmTupleGen(ip_min=stream_cfg['ip_dst_addr'],
+ ip_max=stream_cfg['ip_dst_addr_max'],
+ port_min=udp_args['dport'],
+ port_max=udp_args['dport_max'],
+ name="tuple_dst",
+ limit_flows=stream_cfg['count'])
+ vm_param = [
+ src_fv,
+ STLVmWrFlowVar(fv_name="tuple_src.ip",
+ pkt_offset="IP:{}.src".format(encap_level)),
+ STLVmWrFlowVar(fv_name="tuple_src.port",
+ pkt_offset="UDP:{}.sport".format(encap_level)),
+ dst_fv,
+ STLVmWrFlowVar(fv_name="tuple_dst.ip",
+ pkt_offset="IP:{}.dst".format(encap_level)),
+ STLVmWrFlowVar(fv_name="tuple_dst.port",
+ pkt_offset="UDP:{}.dport".format(encap_level)),
+ ]
+ else:
+ if disable_random_latency_flow:
+ src_fv_ip = STLVmFlowVar(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr'],
+ size=4)
+ dst_fv_ip = STLVmFlowVar(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr'],
+ size=4)
+ elif stream_cfg['ip_addrs_step'] == 'random':
+ src_fv_ip = STLVmFlowVarRepeatableRandom(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr_max'],
+ size=4,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['ip_src_count'])
+ dst_fv_ip = STLVmFlowVarRepeatableRandom(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr_max'],
+ size=4,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['ip_dst_count'])
+ else:
+ src_fv_ip = STLVmFlowVar(
+ name="ip_src",
+ min_value=stream_cfg['ip_src_addr'],
+ max_value=stream_cfg['ip_src_addr_max'],
+ size=4,
+ op="inc",
+ step=stream_cfg['ip_addrs_step'])
+ dst_fv_ip = STLVmFlowVar(
+ name="ip_dst",
+ min_value=stream_cfg['ip_dst_addr'],
+ max_value=stream_cfg['ip_dst_addr_max'],
+ size=4,
+ op="inc",
+ step=stream_cfg['ip_addrs_step'])
+
+ if disable_random_latency_flow:
+ src_fv_port = STLVmFlowVar(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport'],
+ size=2)
+ dst_fv_port = STLVmFlowVar(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport'],
+ size=2)
+ elif stream_cfg['udp_port_step'] == 'random':
+ src_fv_port = STLVmFlowVarRepeatableRandom(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport_max'],
+ size=2,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['udp_src_count'])
+ dst_fv_port = STLVmFlowVarRepeatableRandom(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport_max'],
+ size=2,
+ seed=random.randint(0, 32767),
+ limit=stream_cfg['udp_dst_count'])
+ else:
+ src_fv_port = STLVmFlowVar(
+ name="p_src",
+ min_value=udp_args['sport'],
+ max_value=udp_args['sport_max'],
+ size=2,
+ op="inc",
+ step=udp_args['sport_step'])
+ dst_fv_port = STLVmFlowVar(
+ name="p_dst",
+ min_value=udp_args['dport'],
+ max_value=udp_args['dport_max'],
+ size=2,
+ op="inc",
+ step=udp_args['dport_step'])
+ vm_param = [
+ src_fv_ip,
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset="IP:{}.src".format(encap_level)),
+ src_fv_port,
+ STLVmWrFlowVar(fv_name="p_src", pkt_offset="UDP:{}.sport".format(encap_level)),
+ dst_fv_ip,
+ STLVmWrFlowVar(fv_name="ip_dst", pkt_offset="IP:{}.dst".format(encap_level)),
+ dst_fv_port,
+ STLVmWrFlowVar(fv_name="p_dst", pkt_offset="UDP:{}.dport".format(encap_level)),
+ ]
+ # Use HW Offload to calculate the outter IP/UDP packet
+ vm_param.append(STLVmFixChecksumHw(l3_offset="IP:0",
+ l4_offset="UDP:0",
+ l4_type=CTRexVmInsFixHwCs.L4_TYPE_UDP))
+ # Use software to fix the inner IP/UDP payload for VxLAN packets
+ if int(encap_level):
+ vm_param.append(STLVmFixIpv4(offset="IP:1"))
+ pad = max(0, frame_size - len(pkt_base)) * 'x'
+
+ return STLPktBuilder(pkt=pkt_base / pad,
+ vm=STLScVmRaw(vm_param, cache_size=int(self.config.cache_size)))
+
+ def _create_gratuitous_arp_pkt(self, stream_cfg):
+ """Create a GARP packet.
+
+ """
+ pkt_base = Ether(src=stream_cfg['mac_src'], dst="ff:ff:ff:ff:ff:ff")
+
+ if self.config.vxlan or self.config.mpls:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vtep_vlan'])
+ elif stream_cfg['vlan_tag'] is not None:
+ pkt_base /= Dot1Q(vlan=stream_cfg['vlan_tag'])
+
+ pkt_base /= ARP(psrc=stream_cfg['ip_src_tg_gw'], hwsrc=stream_cfg['mac_src'],
+ hwdst=stream_cfg['mac_src'], pdst=stream_cfg['ip_src_tg_gw'])
+
+ return STLPktBuilder(pkt=pkt_base)
+
+ def generate_streams(self, port, chain_id, stream_cfg, l2frame, latency=True,
+ e2e=False):
+ """Create a list of streams corresponding to a given chain and stream config.
+
+ port: port where the streams originate (0 or 1)
+ chain_id: the chain to which the streams are associated to
+ stream_cfg: stream configuration
+ l2frame: L2 frame size (including 4-byte FCS) or 'IMIX'
+ latency: if True also create a latency stream
+ e2e: True if performing "end to end" connectivity check
+ """
+ streams = []
+ pg_id, lat_pg_id = self.get_pg_id(port, chain_id)
+ if l2frame == 'IMIX':
+ for ratio, l2_frame_size in zip(IMIX_RATIOS, IMIX_L2_SIZES):
+ pkt = self._create_pkt(stream_cfg, l2_frame_size)
+ if e2e or stream_cfg['mpls']:
+ streams.append(STLStream(packet=pkt,
+ mode=STLTXCont(pps=ratio)))
+ else:
+ if stream_cfg['vxlan'] is True:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowStats(pg_id=pg_id,
+ vxlan=True)
+ if not self.config.no_flow_stats else None,
+ mode=STLTXCont(pps=ratio)))
+ else:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowStats(pg_id=pg_id)
+ if not self.config.no_flow_stats else None,
+ mode=STLTXCont(pps=ratio)))
+
+ if latency:
+ # for IMIX, the latency packets have the average IMIX packet size
+ if stream_cfg['ip_addrs_step'] == 'random' or \
+ stream_cfg['udp_port_step'] == 'random':
+ # Force latency flow to only one flow to avoid creating flows
+ # over requested flow count
+ pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE, True)
+ else:
+ pkt = self._create_pkt(stream_cfg, IMIX_AVG_L2_FRAME_SIZE)
+
+ else:
+ l2frame_size = int(l2frame)
+ pkt = self._create_pkt(stream_cfg, l2frame_size)
+ if self.config.periodic_gratuitous_arp:
+ requested_pps = int(utils.parse_rate_str(self.rates[0])[
+ 'rate_pps']) - self.config.gratuitous_arp_pps
+ if latency:
+ requested_pps -= self.LATENCY_PPS
+ stltx_cont = STLTXCont(pps=requested_pps)
+ else:
+ stltx_cont = STLTXCont()
+ if e2e or stream_cfg['mpls']:
+ streams.append(STLStream(packet=pkt,
+ # Flow stats is disabled for MPLS now
+ # flow_stats=STLFlowStats(pg_id=pg_id),
+ mode=stltx_cont))
+ else:
+ if stream_cfg['vxlan'] is True:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowStats(pg_id=pg_id,
+ vxlan=True)
+ if not self.config.no_flow_stats else None,
+ mode=stltx_cont))
+ else:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowStats(pg_id=pg_id)
+ if not self.config.no_flow_stats else None,
+ mode=stltx_cont))
+ # for the latency stream, the minimum payload is 16 bytes even in case of vlan tagging
+ # without vlan, the min l2 frame size is 64
+ # with vlan it is 68
+ # This only applies to the latency stream
+ if latency:
+ if stream_cfg['vlan_tag'] and l2frame_size < 68:
+ l2frame_size = 68
+ if stream_cfg['ip_addrs_step'] == 'random' or \
+ stream_cfg['udp_port_step'] == 'random':
+ # Force latency flow to only one flow to avoid creating flows
+ # over requested flow count
+ pkt = self._create_pkt(stream_cfg, l2frame_size, True)
+ else:
+ pkt = self._create_pkt(stream_cfg, l2frame_size)
+
+ if latency:
+ if self.config.no_latency_stats:
+ LOG.info("Latency flow statistics are disabled.")
+ if stream_cfg['vxlan'] is True:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id,
+ vxlan=True)
+ if not self.config.no_latency_stats else None,
+ mode=STLTXCont(pps=self.LATENCY_PPS)))
+ else:
+ streams.append(STLStream(packet=pkt,
+ flow_stats=STLFlowLatencyStats(pg_id=lat_pg_id)
+ if not self.config.no_latency_stats else None,
+ mode=STLTXCont(pps=self.LATENCY_PPS)))
+
+ if self.config.periodic_gratuitous_arp and (
+ self.config.l3_router or self.config.service_chain == ChainType.EXT):
+ # In case of L3 router feature or EXT chain with router
+ # and depending on ARP stale time SUT configuration
+ # Gratuitous ARP from TG port to the router is needed to keep traffic up
+ garp_pkt = self._create_gratuitous_arp_pkt(stream_cfg)
+ ibg = self.config.gratuitous_arp_pps * 1000000.0
+ packets_count = int(self.config.duration_sec / self.config.gratuitous_arp_pps)
+ streams.append(
+ STLStream(packet=garp_pkt,
+ mode=STLTXMultiBurst(pkts_per_burst=1, count=packets_count, ibg=ibg)))
+ return streams
+
+ @timeout(5)
+ def __connect(self, client):
+ client.connect()
+
+ def __local_server_status(self):
+ """ The TRex server may have started but failed initializing... and stopped.
+ This piece of code is especially designed to address
+ the case when a fatal failure occurs on a DPDK init call.
+ The TRex algorihm should be revised to include some missing timeouts (?)
+ status returned:
+ 0: no error detected
+ 1: fatal error detected - should lead to exiting the run
+ 2: error detected that could be solved by starting again
+ The diagnostic is based on parsing the local trex log file (improvable)
+ """
+ status = 0
+ message = None
+ failure = None
+ exited = None
+ cause = None
+ error = None
+ before = None
+ after = None
+ last = None
+ try:
+ with open('/tmp/trex.log', 'r', encoding="utf-8") as trex_log:
+ for _line in trex_log:
+ line = _line.strip()
+ if line.startswith('Usage:'):
+ break
+ if 'ports are bound' in line:
+ continue
+ if 'please wait' in line:
+ continue
+ if 'exit' in line.lower():
+ exited = line
+ elif 'cause' in line.lower():
+ cause = line
+ elif 'fail' in line.lower():
+ failure = line
+ elif 'msg' in line.lower():
+ message = line
+ elif (error is not None) and line:
+ after = line
+ elif line.startswith('Error:') or line.startswith('ERROR'):
+ error = line
+ before = last
+ last = line
+ except FileNotFoundError:
+ pass
+ if exited is not None:
+ status = 1
+ LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', exited)
+ if cause is not None:
+ LOG.info("TRex [cont'd] %s", cause)
+ if failure is not None:
+ LOG.info("TRex [cont'd] %s", failure)
+ if message is not None:
+ LOG.info("TRex [cont'd] %s", message)
+ if 'not supported yet' in message.lower():
+ LOG.info("TRex [cont'd] Try starting again!")
+ status = 2
+ elif error is not None:
+ status = 1
+ LOG.info("\x1b[1m%s\x1b[0m %s", 'TRex failed initializing:', error)
+ if after is not None:
+ LOG.info("TRex [cont'd] %s", after)
+ elif before is not None:
+ LOG.info("TRex [cont'd] %s", before)
+ return status
+
+ def __connect_after_start(self):
+ # after start, Trex may take a bit of time to initialize
+ # so we need to retry a few times
+ # we try to capture recoverable error cases (checking status)
+ status = 0
+ for it in range(self.config.generic_retry_count):
+ try:
+ time.sleep(1)
+ self.client.connect()
+ break
+ except Exception as ex:
+ if it == (self.config.generic_retry_count - 1):
+ raise
+ status = self.__local_server_status()
+ if status > 0:
+ # No need to wait anymore, something went wrong and TRex exited
+ if status == 1:
+ LOG.info("\x1b[1m%s\x1b[0m", 'TRex failed starting!')
+ print("More information? Try the command: "
+ + "\x1b[1mnfvbench --show-trex-log\x1b[0m")
+ sys.exit(0)
+ if status == 2:
+ # a new start will follow
+ return status
+ LOG.info("Retrying connection to TRex (%s)...", ex.msg)
+ return status
+
+ def connect(self):
+ """Connect to the TRex server."""
+ status = 0
+ server_ip = self.generator_config.ip
+ LOG.info("Connecting to TRex (%s)...", server_ip)
+
+ # Connect to TRex server
+ self.client = STLClient(server=server_ip, sync_port=self.generator_config.zmq_rpc_port,
+ async_port=self.generator_config.zmq_pub_port)
+ try:
+ self.__connect(self.client)
+ if server_ip == '127.0.0.1':
+ config_updated = self.__check_config()
+ if config_updated or self.config.restart:
+ status = self.__restart()
+ except (TimeoutError, STLError) as e:
+ if server_ip == '127.0.0.1':
+ status = self.__start_local_server()
+ else:
+ raise TrafficGeneratorException(e.message) from e
+
+ if status == 2:
+ # Workaround in case of a failed TRex server initialization
+ # we try to start it again (twice maximum)
+ # which may allow low level initialization to complete.
+ if self.__start_local_server() == 2:
+ self.__start_local_server()
+
+ ports = list(self.generator_config.ports)
+ self.port_handle = ports
+ # Prepare the ports
+ self.client.reset(ports)
+ # Read HW information from each port
+ # this returns an array of dict (1 per port)
+ """
+ Example of output for Intel XL710
+ [{'arp': '-', 'src_ipv4': '-', u'supp_speeds': [40000], u'is_link_supported': True,
+ 'grat_arp': 'off', 'speed': 40, u'index': 0, 'link_change_supported': 'yes',
+ u'rx': {u'counters': 127, u'caps': [u'flow_stats', u'latency']},
+ u'is_virtual': 'no', 'prom': 'off', 'src_mac': u'3c:fd:fe:a8:24:48', 'status': 'IDLE',
+ u'description': u'Ethernet Controller XL710 for 40GbE QSFP+',
+ 'dest': u'fa:16:3e:3c:63:04', u'is_fc_supported': False, 'vlan': '-',
+ u'driver': u'net_i40e', 'led_change_supported': 'yes', 'rx_filter_mode': 'hardware match',
+ 'fc': 'none', 'link': 'UP', u'hw_mac': u'3c:fd:fe:a8:24:48', u'pci_addr': u'0000:5e:00.0',
+ 'mult': 'off', 'fc_supported': 'no', u'is_led_supported': True, 'rx_queue': 'off',
+ 'layer_mode': 'Ethernet', u'numa': 0}, ...]
+ """
+ self.port_info = self.client.get_port_info(ports)
+ LOG.info('Connected to TRex')
+ for id, port in enumerate(self.port_info):
+ LOG.info(' Port %d: %s speed=%dGbps mac=%s pci=%s driver=%s',
+ id, port['description'], port['speed'], port['src_mac'],
+ port['pci_addr'], port['driver'])
+ # Make sure the 2 ports have the same speed
+ if self.port_info[0]['speed'] != self.port_info[1]['speed']:
+ raise TrafficGeneratorException('Traffic generator ports speed mismatch: %d/%d Gbps' %
+ (self.port_info[0]['speed'],
+ self.port_info[1]['speed']))
+
+ def __start_local_server(self):
+ try:
+ LOG.info("Starting TRex ...")
+ self.__start_server()
+ status = self.__connect_after_start()
+ except (TimeoutError, STLError) as e:
+ LOG.error('Cannot connect to TRex')
+ LOG.error(traceback.format_exc())
+ logpath = '/tmp/trex.log'
+ if os.path.isfile(logpath):
+ # Wait for TRex to finish writing error message
+ last_size = 0
+ for _ in range(self.config.generic_retry_count):
+ size = os.path.getsize(logpath)
+ if size == last_size:
+ # probably not writing anymore
+ break
+ last_size = size
+ time.sleep(1)
+ with open(logpath, 'r', encoding="utf-8") as f:
+ message = f.read()
+ else:
+ message = e.message
+ raise TrafficGeneratorException(message) from e
+ return status
+
+ def __start_server(self):
+ server = TRexTrafficServer()
+ server.run_server(self.generator_config)
+
+ def __check_config(self):
+ server = TRexTrafficServer()
+ return server.check_config_updated(self.generator_config)
+
+ def __restart(self):
+ LOG.info("Restarting TRex ...")
+ self.__stop_server()
+ # Wait for server stopped
+ for _ in range(self.config.generic_retry_count):
+ time.sleep(1)
+ if not self.client.is_connected():
+ LOG.info("TRex is stopped...")
+ break
+ # Start and report a possible failure
+ return self.__start_local_server()
+
+ def __stop_server(self):
+ if self.generator_config.ip == '127.0.0.1':
+ ports = self.client.get_acquired_ports()
+ LOG.info('Release ports %s and stopping TRex...', ports)
+ try:
+ if ports:
+ self.client.release(ports=ports)
+ self.client.server_shutdown()
+ except STLError as e:
+ LOG.warning('Unable to stop TRex. Error: %s', e)
+ else:
+ LOG.info('Using remote TRex. Unable to stop TRex')
+
+ def resolve_arp(self):
+ """Resolve all configured remote IP addresses.
+
+ return: None if ARP failed to resolve for all IP addresses
+ else a dict of list of dest macs indexed by port#
+ the dest macs in the list are indexed by the chain id
+ """
+ self.client.set_service_mode(ports=self.port_handle)
+ LOG.info('Polling ARP until successful...')
+ arp_dest_macs = {}
+ for port, device in zip(self.port_handle, self.generator_config.devices):
+ # there should be 1 stream config per chain
+ stream_configs = device.get_stream_configs()
+ chain_count = len(stream_configs)
+ ctx = self.client.create_service_ctx(port=port)
+ # all dest macs on this port indexed by chain ID
+ dst_macs = [None] * chain_count
+ dst_macs_count = 0
+ # the index in the list is the chain id
+ if self.config.vxlan or self.config.mpls:
+ arps = [
+ ServiceARP(ctx,
+ src_ip=device.vtep_src_ip,
+ dst_ip=device.vtep_dst_ip,
+ vlan=device.vtep_vlan)
+ for cfg in stream_configs
+ ]
+ else:
+ arps = [
+ ServiceARP(ctx,
+ src_ip=cfg['ip_src_tg_gw'],
+ dst_ip=cfg['mac_discovery_gw'],
+ # will be None if no vlan tagging
+ vlan=cfg['vlan_tag'])
+ for cfg in stream_configs
+ ]
+
+ for attempt in range(self.config.generic_retry_count):
+ try:
+ ctx.run(arps)
+ except STLError:
+ LOG.error(traceback.format_exc())
+ continue
+
+ unresolved = []
+ for chain_id, mac in enumerate(dst_macs):
+ if not mac:
+ arp_record = arps[chain_id].get_record()
+ if arp_record.dst_mac:
+ dst_macs[chain_id] = arp_record.dst_mac
+ dst_macs_count += 1
+ LOG.info(' ARP: port=%d chain=%d src IP=%s dst IP=%s -> MAC=%s',
+ port, chain_id,
+ arp_record.src_ip,
+ arp_record.dst_ip, arp_record.dst_mac)
+ else:
+ unresolved.append(arp_record.dst_ip)
+ if dst_macs_count == chain_count:
+ arp_dest_macs[port] = dst_macs
+ LOG.info('ARP resolved successfully for port %s', port)
+ break
+
+ retry = attempt + 1
+ LOG.info('Retrying ARP for: %s (retry %d/%d)',
+ unresolved, retry, self.config.generic_retry_count)
+ if retry < self.config.generic_retry_count:
+ time.sleep(self.config.generic_poll_sec)
+ else:
+ LOG.error('ARP timed out for port %s (resolved %d out of %d)',
+ port,
+ dst_macs_count,
+ chain_count)
+ break
+
+ # A traffic capture may have been started (from a T-Rex console) at this time.
+ # If asked so, we keep the service mode enabled here, and disable it otherwise.
+ # | Disabling the service mode while a capture is in progress
+ # | would cause the application to stop/crash with an error.
+ if not self.config.service_mode:
+ self.client.set_service_mode(ports=self.port_handle, enabled=False)
+ if len(arp_dest_macs) == len(self.port_handle):
+ return arp_dest_macs
+ return None
+
+ def __is_rate_enough(self, l2frame_size, rates, bidirectional, latency):
+ """Check if rate provided by user is above requirements. Applies only if latency is True."""
+ intf_speed = self.generator_config.intf_speed
+ if latency:
+ if bidirectional:
+ mult = 2
+ total_rate = 0
+ for rate in rates:
+ r = utils.convert_rates(l2frame_size, rate, intf_speed)
+ total_rate += int(r['rate_pps'])
+ else:
+ mult = 1
+ r = utils.convert_rates(l2frame_size, rates[0], intf_speed)
+ total_rate = int(r['rate_pps'])
+ # rate must be enough for latency stream and at least 1 pps for base stream per chain
+ if self.config.periodic_gratuitous_arp:
+ required_rate = (self.LATENCY_PPS + 1 + self.config.gratuitous_arp_pps) \
+ * self.config.service_chain_count * mult
+ else:
+ required_rate = (self.LATENCY_PPS + 1) * self.config.service_chain_count * mult
+ result = utils.convert_rates(l2frame_size,
+ {'rate_pps': required_rate},
+ intf_speed * mult)
+ result['result'] = total_rate >= required_rate
+ return result
+
+ return {'result': True}
+
+ def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False):
+ """Program all the streams in Trex server.
+
+ l2frame_size: L2 frame size or IMIX
+ rates: a list of 2 rates to run each direction
+ each rate is a dict like {'rate_pps': '10kpps'}
+ bidirectional: True if bidirectional
+ latency: True if latency measurement is needed
+ e2e: True if performing "end to end" connectivity check
+ """
+ if self.config.no_flow_stats:
+ LOG.info("Traffic flow statistics are disabled.")
+ r = self.__is_rate_enough(l2frame_size, rates, bidirectional, latency)
+ if not r['result']:
+ raise TrafficGeneratorException(
+ 'Required rate in total is at least one of: \n{pps}pps \n{bps}bps \n{load}%.'
+ .format(pps=r['rate_pps'],
+ bps=r['rate_bps'],
+ load=r['rate_percent']))
+ self.l2_frame_size = l2frame_size
+ # a dict of list of streams indexed by port#
+ # in case of fixed size, has self.chain_count * 2 * 2 streams
+ # (1 normal + 1 latency stream per direction per chain)
+ # for IMIX, has self.chain_count * 2 * 4 streams
+ # (3 normal + 1 latency stream per direction per chain)
+ streamblock = {}
+ for port in self.port_handle:
+ streamblock[port] = []
+ stream_cfgs = [d.get_stream_configs() for d in self.generator_config.devices]
+ if self.generator_config.ip_addrs_step == 'random' \
+ or self.generator_config.gen_config.udp_port_step == 'random':
+ LOG.warning("Using random step, the number of flows can be less than "
+ "the requested number of flows due to repeatable multivariate random "
+ "generation which can reproduce the same pattern of values")
+ self.rates = [utils.to_rate_str(rate) for rate in rates]
+ for chain_id, (fwd_stream_cfg, rev_stream_cfg) in enumerate(zip(*stream_cfgs)):
+ streamblock[0].extend(self.generate_streams(self.port_handle[0],
+ chain_id,
+ fwd_stream_cfg,
+ l2frame_size,
+ latency=latency,
+ e2e=e2e))
+ if len(self.rates) > 1:
+ streamblock[1].extend(self.generate_streams(self.port_handle[1],
+ chain_id,
+ rev_stream_cfg,
+ l2frame_size,
+ latency=bidirectional and latency,
+ e2e=e2e))
+
+ for port in self.port_handle:
+ if self.config.vxlan:
+ self.client.set_port_attr(ports=port, vxlan_fs=[4789])
+ else:
+ self.client.set_port_attr(ports=port, vxlan_fs=None)
+ self.client.add_streams(streamblock[port], ports=port)
+ LOG.info('Created %d traffic streams for port %s.', len(streamblock[port]), port)
+
+ def clear_streamblock(self):
+ """Clear all streams from TRex."""
+ self.rates = []
+ self.client.reset(self.port_handle)
+ LOG.info('Cleared all existing streams')
+
+ def get_stats(self, ifstats=None):
+ """Get stats from Trex."""
+ stats = self.client.get_stats()
+ return self.extract_stats(stats, ifstats)
+
+ def get_macs(self):
+ """Return the Trex local port MAC addresses.
+
+ return: a list of MAC addresses indexed by the port#
+ """
+ return [port['src_mac'] for port in self.port_info]
+
+ def get_port_speed_gbps(self):
+ """Return the Trex local port MAC addresses.
+
+ return: a list of speed in Gbps indexed by the port#
+ """
+ return [port['speed'] for port in self.port_info]
+
+ def clear_stats(self):
+ """Clear all stats in the traffic gneerator."""
+ if self.port_handle:
+ self.client.clear_stats()
+
+ def start_traffic(self):
+ """Start generating traffic in all ports."""
+ for port, rate in zip(self.port_handle, self.rates):
+ self.client.start(ports=port, mult=rate, duration=self.config.duration_sec, force=True)
+
+ def stop_traffic(self):
+ """Stop generating traffic."""
+ self.client.stop(ports=self.port_handle)
+
+ def start_capture(self):
+ """Capture all packets on both ports that are unicast to us."""
+ if self.capture_id:
+ self.stop_capture()
+ # Need to filter out unwanted packets so we do not end up counting
+ # src MACs of frames that are not unicast to us
+ src_mac_list = self.get_macs()
+ bpf_filter = "ether dst %s or ether dst %s" % (src_mac_list[0], src_mac_list[1])
+ # ports must be set in service in order to enable capture
+ self.client.set_service_mode(ports=self.port_handle)
+ self.capture_id = self.client.start_capture \
+ (rx_ports=self.port_handle, bpf_filter=bpf_filter)
+
+ def fetch_capture_packets(self):
+ """Fetch capture packets in capture mode."""
+ if self.capture_id:
+ self.packet_list = []
+ self.client.fetch_capture_packets(capture_id=self.capture_id['id'],
+ output=self.packet_list)
+
+ def stop_capture(self):
+ """Stop capturing packets."""
+ if self.capture_id:
+ self.client.stop_capture(capture_id=self.capture_id['id'])
+ self.capture_id = None
+ # A traffic capture may have been started (from a T-Rex console) at this time.
+ # If asked so, we keep the service mode enabled here, and disable it otherwise.
+ # | Disabling the service mode while a capture is in progress
+ # | would cause the application to stop/crash with an error.
+ if not self.config.service_mode:
+ self.client.set_service_mode(ports=self.port_handle, enabled=False)
+
+ def cleanup(self):
+ """Cleanup Trex driver."""
+ if self.client:
+ try:
+ self.client.reset(self.port_handle)
+ self.client.disconnect()
+ except STLError:
+ # TRex does not like a reset while in disconnected state
+ pass
+
+ def set_service_mode(self, enabled=True):
+ """Enable/disable the 'service' mode."""
+ self.client.set_service_mode(ports=self.port_handle, enabled=enabled)
diff --git a/nfvbench/traffic_server.py b/nfvbench/traffic_server.py
index dcb83fb..5111b32 100644
--- a/nfvbench/traffic_server.py
+++ b/nfvbench/traffic_server.py
@@ -16,7 +16,7 @@ import os
import subprocess
import yaml
-from log import LOG
+from .log import LOG
class TrafficServerException(Exception):
@@ -34,36 +34,149 @@ class TRexTrafficServer(TrafficServer):
assert len(contents) == 1
self.trex_dir = os.path.join(trex_base_dir, contents[0])
- def run_server(self, traffic_profile, vlan_tagging, filename='/etc/trex_cfg.yaml'):
- """
- Runs TRex server for specified traffic profile.
+ def run_server(self, generator_config, filename='/etc/trex_cfg.yaml'):
+ """Run TRex server for specified traffic profile.
:param traffic_profile: traffic profile object based on config file
:param filename: path where to save TRex config file
"""
- cfg = self.__save_config(traffic_profile, filename)
- cores = traffic_profile.cores
- sw_mode = "--software" if traffic_profile.generator_config.software_mode else ""
- vlan_opt = "--vlan" if vlan_tagging else ""
- subprocess.Popen(['nohup', '/bin/bash', '-c',
- './t-rex-64 -i -c {} --iom 0 --no-scapy-server --close-at-end {} '
- '{} --cfg {} &> /tmp/trex.log & disown'.format(cores, sw_mode,
- vlan_opt, cfg)],
- cwd=self.trex_dir)
- LOG.info('TRex server is running...')
-
- def __save_config(self, traffic_profile, filename):
- ifs = ",".join([repr(pci) for pci in traffic_profile.pcis])
-
- result = """# Config generated by NFVBench tool
- - port_limit : 2
- version : 2
- interfaces : [{ifs}]""".format(ifs=ifs)
+ cfg = self.__save_config(generator_config, filename)
+ cores = generator_config.cores
+ vtep_vlan = generator_config.gen_config.get('vtep_vlan')
+ sw_mode = "--software" if generator_config.software_mode else ""
+ vlan_opt = "--vlan" if (generator_config.vlan_tagging or vtep_vlan) else ""
+ if generator_config.mbuf_factor:
+ mbuf_opt = "--mbuf-factor " + str(generator_config.mbuf_factor)
+ else:
+ mbuf_opt = ""
+ hdrh_opt = "--hdrh" if generator_config.hdrh else ""
+ # --unbind-unused-ports: for NIC that have more than 2 ports such as Intel X710
+ # this will instruct trex to unbind all ports that are unused instead of
+ # erroring out with an exception (i40e only)
+ # Try: --ignore-528-issue -> neither unbind nor exit with error,
+ # just proceed cause it might work!
+ # Note that force unbinding is probably a bad choice:
+ # we can't assume for sure that other ports are "unused".
+ # The default TRex behaviour - exit - is indeed a safer option;
+ # a message informs about the ports that should be unbound.
+ i40e_opt = ("--ignore-528-issue" if
+ generator_config.config.i40e_mixed == 'ignore' else
+ "--unbind-unused-ports" if
+ generator_config.config.i40e_mixed == 'unbind' else "")
+ cmd = ['nohup', '/bin/bash', '-c',
+ './t-rex-64 -i -c {} --iom 0 --no-scapy-server '
+ '--close-at-end {} {} {} '
+ '{} {} --cfg {} &> /tmp/trex.log & disown'.format(cores, sw_mode,
+ i40e_opt,
+ vlan_opt,
+ hdrh_opt,
+ mbuf_opt, cfg)]
+ LOG.info(' '.join(cmd))
+ with subprocess.Popen(cmd, cwd=self.trex_dir) as trex_process:
+ LOG.info('TRex server is running (PID: %s)...', trex_process.pid)
+
+ def __load_config(self, filename):
+ result = {}
+ if os.path.exists(filename):
+ with open(filename, 'r', encoding="utf-8") as stream:
+ try:
+ result = yaml.safe_load(stream)
+ except yaml.YAMLError as exc:
+ print(exc)
+ return result
+ def __save_config(self, generator_config, filename):
+ result = self.__prepare_config(generator_config)
yaml.safe_load(result)
if os.path.exists(filename):
os.remove(filename)
- with open(filename, 'w') as f:
+ with open(filename, 'w', encoding="utf-8") as f:
f.write(result)
-
return filename
+
+ def __prepare_config(self, generator_config):
+ ifs = ",".join([repr(pci) for pci in generator_config.pcis])
+
+ # For consistency and stability reasons, the T-Rex server
+ # should be forciby restarted each time the value of a
+ # parameter, specified as one of the starting command line
+ # arguments, has been modified since the last launch.
+ # Hence we add some extra fields to the config file
+ # (nb_cores, use_vlan, mbuf_factor, i40e_mixed, hdrh)
+ # which will serve as a memory between runs -
+ # while being actually ignored by the T-Rex server.
+
+ result = """# Config generated by NFVbench
+ - port_limit : 2
+ version : 2
+ zmq_pub_port : {zmq_pub_port}
+ zmq_rpc_port : {zmq_rpc_port}
+ prefix : {prefix}
+ limit_memory : {limit_memory}
+ command_line :
+ sw_mode : {sw_mode}
+ mbuf_factor: {mbuf_factor}
+ hdrh : {hdrh}
+ nb_cores : {nb_cores}
+ use_vlan : {use_vlan}
+ i40e_mixed : {i40e_mixed}
+ interfaces : [{ifs}]""".format(
+ zmq_pub_port=generator_config.zmq_pub_port,
+ zmq_rpc_port=generator_config.zmq_rpc_port,
+ prefix=generator_config.name,
+ limit_memory=generator_config.limit_memory,
+ sw_mode=generator_config.software_mode,
+ mbuf_factor=generator_config.mbuf_factor,
+ hdrh=generator_config.hdrh,
+ nb_cores=generator_config.cores,
+ use_vlan=generator_config.gen_config.get('vtep_vlan') or
+ generator_config.vlan_tagging,
+ i40e_mixed=generator_config.config.i40e_mixed,
+ ifs=ifs)
+
+ if hasattr(generator_config, 'mbuf_64') and generator_config.mbuf_64:
+ result += """
+ memory :
+ mbuf_64 : {mbuf_64}""".format(mbuf_64=generator_config.mbuf_64)
+
+ if self.__check_platform_config(generator_config):
+ try:
+ platform = """
+ platform :
+ master_thread_id : {master_thread_id}
+ latency_thread_id : {latency_thread_id}
+ dual_if:""".format(master_thread_id=generator_config.gen_config.platform.
+ master_thread_id,
+ latency_thread_id=generator_config.gen_config.platform.
+ latency_thread_id)
+ result += platform
+
+ for core in generator_config.gen_config.platform.dual_if:
+ threads = ""
+ try:
+ threads = ",".join([repr(thread) for thread in core.threads])
+ except TypeError:
+ LOG.warning("No threads defined for socket %s", core.socket)
+ core_result = """
+ - socket : {socket}
+ threads : [{threads}]""".format(socket=core.socket, threads=threads)
+ result += core_result
+ except (KeyError, AttributeError):
+ pass
+ return result + "\n"
+
+ def __check_platform_config(self, generator_config):
+ return hasattr(generator_config.gen_config, 'platform') \
+ and hasattr(generator_config.gen_config.platform, "master_thread_id") \
+ and generator_config.gen_config.platform.master_thread_id is not None \
+ and hasattr(generator_config.gen_config.platform, "latency_thread_id") \
+ and generator_config.gen_config.platform.latency_thread_id is not None
+
+ def check_config_updated(self, generator_config):
+ existing_config = self.__load_config(filename='/etc/trex_cfg.yaml')
+ new_config = yaml.safe_load(self.__prepare_config(generator_config))
+ LOG.debug("Existing config: %s", existing_config)
+ LOG.debug("New config: %s", new_config)
+ if existing_config == new_config:
+ return False
+ return True
diff --git a/nfvbench/utils.py b/nfvbench/utils.py
index cc649bd..07a38cb 100644
--- a/nfvbench/utils.py
+++ b/nfvbench/utils.py
@@ -13,6 +13,8 @@
# under the License.
import glob
+import time
+from math import gcd
from math import isnan
import os
import re
@@ -23,8 +25,9 @@ import errno
import fcntl
from functools import wraps
import json
-from log import LOG
-
+from .log import LOG
+from nfvbench.traffic_gen.traffic_utils import multiplier_map
+from novaclient.exceptions import NotFound
class TimeoutError(Exception):
pass
@@ -50,7 +53,7 @@ def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def save_json_result(result, json_file, std_json_path, service_chain, service_chain_count,
- flow_count, frame_sizes):
+ flow_count, frame_sizes, user_id=None, group_id=None):
"""Save results in json format file."""
filepaths = []
if json_file:
@@ -63,96 +66,102 @@ def save_json_result(result, json_file, std_json_path, service_chain, service_ch
if filepaths:
for file_path in filepaths:
LOG.info('Saving results in json file: %s...', file_path)
- with open(file_path, 'w') as jfp:
+ with open(file_path, 'w', encoding="utf-8") as jfp:
json.dump(result,
jfp,
indent=4,
sort_keys=True,
separators=(',', ': '),
default=lambda obj: obj.to_json())
-
-
-def byteify(data, ignore_dicts=False):
- # if this is a unicode string, return its string representation
- if isinstance(data, unicode):
- return data.encode('utf-8')
- # if this is a list of values, return list of byteified values
- if isinstance(data, list):
- return [byteify(item, ignore_dicts=ignore_dicts) for item in data]
- # if this is a dictionary, return dictionary of byteified keys and values
- # but only if we haven't already byteified it
- if isinstance(data, dict) and not ignore_dicts:
- return {byteify(key, ignore_dicts=ignore_dicts): byteify(value, ignore_dicts=ignore_dicts)
- for key, value in data.iteritems()}
- # if it's anything else, return it in its original form
- return data
+ # possibly change file ownership
+ if group_id is None:
+ group_id = user_id
+ if user_id is not None:
+ os.chown(file_path, user_id, group_id)
def dict_to_json_dict(record):
return json.loads(json.dumps(record, default=lambda obj: obj.to_json()))
-def get_intel_pci(nic_ports):
- """Returns the first two PCI addresses of sorted PCI list for Intel NIC (i40e, ixgbe)"""
+def get_intel_pci(nic_slot=None, nic_ports=None):
+ """Returns two PCI address that will be used for NFVbench
+
+ @param nic_slot: The physical PCIe slot number in motherboard
+ @param nic_ports: Array of two integers indicating the ports to use on the NIC
+
+ When nic_slot and nic_ports are both supplied, the function will just return
+ the PCI addresses for them. The logic used is:
+ (1) Run "dmidecode -t slot"
+ (2) Grep for "SlotID:" with given nic_slot, and derive the bus address;
+ (3) Based on given nic_ports, generate the pci addresses based on above
+ base address;
+
+ When either nic_slot or nic_ports is not supplied, the function will
+ traverse all Intel NICs which use i40e or ixgbe driver, sorted by PCI
+ address, and return first two available ports which are not bonded
+ (802.11ad).
+ """
+
+ if nic_slot and nic_ports:
+ dmidecode = subprocess.check_output(['dmidecode', '-t', 'slot'])
+ regex = r"(?<=SlotID:{}).*?(....:..:..\..)".format(nic_slot)
+ match = re.search(regex, dmidecode.decode('utf-8'), flags=re.DOTALL)
+ if not match:
+ return None
+
+ pcis = []
+ # On some servers, the "Bus Address" returned by dmidecode is not the
+ # base pci address of the NIC. So only keeping the bus part of the
+ # address for better compability.
+ bus = match.group(1)[:match.group(1).rindex(':') + 1] + "00."
+ for port in nic_ports:
+ pcis.append(bus + str(port))
+
+ return pcis
+
hx = r'[0-9a-fA-F]'
regex = r'({hx}{{4}}:({hx}{{2}}:{hx}{{2}}\.{hx}{{1}})).*(drv={driver}|.*unused=.*{driver})'
pcis = []
-
try:
trex_base_dir = '/opt/trex'
contents = os.listdir(trex_base_dir)
trex_dir = os.path.join(trex_base_dir, contents[0])
- process = subprocess.Popen(['python', 'dpdk_setup_ports.py', '-s'],
- cwd=trex_dir,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- devices, _ = process.communicate()
+ with subprocess.Popen(['python', 'dpdk_setup_ports.py', '-s'],
+ cwd=trex_dir,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as process:
+ devices, _ = process.communicate()
except Exception:
devices = ''
for driver in ['i40e', 'ixgbe']:
- matches = re.findall(regex.format(hx=hx, driver=driver), devices)
+ matches = re.findall(regex.format(hx=hx, driver=driver), devices.decode("utf-8"))
if not matches:
continue
matches.sort()
- if nic_ports:
- if max(nic_ports) > len(matches) - 1:
- # If this is hard requirements (i.e. ports are defined
- # explictly), but there are not enough ports for the
- # current NIC, just skip the current NIC and looking for
- # next available one.
- continue
- else:
- return [matches[idx][1] for idx in nic_ports]
- else:
- for port in matches:
- intf_name = glob.glob("/sys/bus/pci/devices/%s/net/*" % port[0])
- if not intf_name:
- # Interface is not bind to kernel driver, so take it
- pcis.append(port[1])
- else:
- intf_name = intf_name[0][intf_name[0].rfind('/') + 1:]
- process = subprocess.Popen(['ip', '-o', '-d', 'link', 'show', intf_name],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ device_list = list(x[0].split('.')[0] for x in matches)
+ device_ports_list = {i: {'ports': device_list.count(i)} for i in device_list}
+ for port in matches:
+ intf_name = glob.glob("/sys/bus/pci/devices/%s/net/*" % port[0])
+ if intf_name:
+ intf_name = intf_name[0][intf_name[0].rfind('/') + 1:]
+ with subprocess.Popen(['ip', '-o', '-d', 'link', 'show', intf_name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as process:
intf_info, _ = process.communicate()
- if not re.search('team_slave|bond_slave', intf_info):
- pcis.append(port[1])
-
- if len(pcis) == 2:
- break
+ if re.search('team_slave|bond_slave', intf_info.decode("utf-8")):
+ device_ports_list[port[0].split('.')[0]]['busy'] = True
+ for port in matches:
+ if not device_ports_list[port[0].split('.')[0]].get('busy'):
+ pcis.append(port[1])
+ if len(pcis) == 2:
+ break
return pcis
-multiplier_map = {
- 'K': 1000,
- 'M': 1000000,
- 'G': 1000000000
-}
-
-
def parse_flow_count(flow_count):
flow_count = str(flow_count)
input_fc = flow_count
@@ -164,13 +173,14 @@ def parse_flow_count(flow_count):
try:
flow_count = int(flow_count)
except ValueError:
- raise Exception("Unknown flow count format '{}'".format(input_fc))
+ raise Exception("Unknown flow count format '{}'".format(input_fc)) from ValueError
return flow_count * multiplier
def cast_integer(value):
- return int(value) if not isnan(value) else value
+ # force 0 value if NaN value from TRex to avoid error in JSON result parsing
+ return int(value) if not isnan(value) else 0
class RunLock(object):
@@ -187,8 +197,8 @@ class RunLock(object):
try:
self._fd = os.open(self._path, os.O_CREAT)
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except (OSError, IOError):
- raise Exception('Other NFVbench process is running. Please wait')
+ except (OSError, IOError) as e:
+ raise Exception('Other NFVbench process is running. Please wait') from e
def __exit__(self, *args):
fcntl.flock(self._fd, fcntl.LOCK_UN)
@@ -200,3 +210,82 @@ class RunLock(object):
os.unlink(self._path)
except (OSError, IOError):
pass
+
+
+def get_divisors(n):
+ for i in range(1, int(n / 2) + 1):
+ if n % i == 0:
+ yield i
+ yield n
+
+
+def lcm(a, b):
+ """
+ Calculate the maximum possible value for both IP and ports,
+ eventually for maximum possible flow.
+ """
+ if a != 0 and b != 0:
+ lcm_value = a * b // gcd(a, b)
+ return lcm_value
+ raise TypeError(" IP size or port range can't be zero !")
+
+
+def find_tuples_equal_to_lcm_value(a, b, lcm_value):
+ """
+ Find numbers from two list matching a LCM value.
+ """
+ for x in a:
+ for y in b:
+ if lcm(x, y) == lcm_value:
+ yield (x, y)
+
+
+def find_max_size(max_size, tuples, flow):
+ if tuples:
+ if max_size > tuples[-1][0]:
+ max_size = tuples[-1][0]
+ return int(max_size)
+ if max_size > tuples[-1][1]:
+ max_size = tuples[-1][1]
+ return int(max_size)
+
+ for i in range(max_size, 1, -1):
+ if flow % i == 0:
+ return int(i)
+ return 1
+
+
+def delete_server(nova_client, server):
+ try:
+ LOG.info('Deleting instance %s...', server.name)
+ nova_client.servers.delete(server.id)
+ except Exception:
+ LOG.exception("Instance %s deletion failed", server.name)
+
+
+def instance_exists(nova_client, server):
+ try:
+ nova_client.servers.get(server.id)
+ except NotFound:
+ return False
+ return True
+
+
+def waiting_servers_deletion(nova_client, servers):
+ LOG.info(' Waiting for %d instances to be fully deleted...', len(servers))
+ retry_count = 15 + len(servers) * 5
+ while True:
+ retry_count -= 1
+ servers = [server for server in servers if instance_exists(nova_client, server)]
+ if not servers:
+ break
+
+ if retry_count:
+ LOG.info(' %d yet to be deleted by Nova, retries left=%d...',
+ len(servers), retry_count)
+ time.sleep(2)
+ else:
+ LOG.warning(
+ ' instance deletion verification time-out: %d still not deleted',
+ len(servers))
+ break