aboutsummaryrefslogtreecommitdiffstats
path: root/nfvbench/cfg.default.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'nfvbench/cfg.default.yaml')
-rw-r--r--nfvbench/cfg.default.yaml712
1 files changed, 624 insertions, 88 deletions
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index 07d48f3..c76e738 100644
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -18,18 +18,41 @@
# Fields that can be over-ridden at the command line are marked with the corresponding
# option, e.g. "--interval"
-# The OpenStack openrc file to use (must be a valid full pathname). If running
+
+# The OpenStack openrc file to use - must be a valid full pathname. If running
# in a container, this path must be valid in the container.
#
# The only case where this field can be empty is when measuring a system that does not run
# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
# desirable. In that case the EXT service chain must be used.
+#
+# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# WARNING: Not used if clouds_detail is sets
openrc_file:
+# The OpenStack clouds configuration from clouds.yaml file to use.
+# clouds.yaml file must be in one of the following paths:
+# - ~/.config/openstack
+# - /etc/openstack
+# Note: If running in a container, this path must be valid in the container.
+# The only case where this field can be empty is when measuring a system that does not run
+# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
+# desirable. In that case the EXT service chain must be used.
+#
+# If user is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# If a value is sets, this parameter disable the use of openrc file
+clouds_detail:
+
# Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
vm_forwarder: testpmd
-# By default (empty) NFVBench will try to locate a VM image file
+# By default (empty) NFVbench will try to locate a VM image file
# from the package root directory named "nfvbench-<version>.qcow2" and
# upload that file. The image name will be "nfvbench-<version>"
# This can be overridden by specifying here a pathname of a file
@@ -45,16 +68,16 @@ vm_image_file:
# Otherwise, a new flavor will be created with attributes listed below.
flavor_type: 'nfvbench.medium'
-# Custom flavor attributes
+# Custom flavor attributes for the test VM
flavor:
- # Number of vCPUs for the flavor
+ # Number of vCPUs for the flavor, must be at least 2!
vcpus: 2
# Memory for the flavor in MB
ram: 4096
# Size of local disk in GB
disk: 0
# metadata are supported and can be added if needed, optional
- # note that if your openstack does not have NUMA optimization
+ # note that if your OpenStack does not have NUMA optimization
# (cpu pinning and huge pages)
# you must comment out extra_specs completely otherwise
# loopback VM creation will fail
@@ -62,43 +85,38 @@ flavor:
"hw:cpu_policy": dedicated
"hw:mem_page_size": large
+# Enable multiqueue for all test VM interfaces (PVP and PVVP only).
+# When enabled, the test VM image will get added the property to enable
+# multiqueue (hw_vif_multiqueue_enabled='true').
+# The number of queues per interace will be set to the number of vCPUs configured for
+# the VM.
+# By default there is only 1 queue per interface
+# The max allowed queue per interface is 8.
+# The valid range for this parameter is [1..min(8, vcpu_count)]
+# When multiqueue is used the recommended setting is to set it to same value as the
+# number of vCPU used - up to a max of 8 queues.
+# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
+# vif_multiqueue_size is set to 2, OpenStack will create 4 queues per interface but the
+# test VM will only use the first 2 queues.
+vif_multiqueue_size: 1
+
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
-# If the selected zone contains only 1 compute node and PVVP inter-node flow is selected,
-# application will use intra-node PVVP flow.
-# List of compute nodes can be specified, must be in given availability zone if not empty
-#availability_zone: 'nova'
+# availability_zone: 'nova'
+# If openrc is not admin set a valid value
availability_zone:
+# To force placement on a given hypervisor, set the name here
+# (if multiple names are provided, the first will be used)
+# Leave empty to let OpenStack pick the hypervisor
compute_nodes:
-
-
-# Credentials for SSH connection to TOR switches.
-tor:
- # Leave type empty or switch list empty to skip TOR switches configuration.
- # Preferably use 'no_tor_access' to achieve the same behavior.
- # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces
- # to the service chain under test, needed only if configured in access mode)
- type:
- # Switches are only needed if type is not empty.
- # You can configure 0, 1 or 2 switches
- # no switch: in this case NFVbench will not attempt to ssh to the switch
- # and stitching of traffic must be done externally
- # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch
- # 2 switches: this is the recommended setting wuth redundant switches, in this case each
- # traffic generator interface must be wired to a different switch
- switches:
- - host:
- username:
- password:
- port:
-
-# Skip TOR switch configuration and retrieving of stats
-# Can be overriden by --no-tor-access
-no_tor_access: false
-
-# Skip vswitch configuration and retrieving of stats
-# Can be overriden by --no-vswitch-access
-no_vswitch_access: false
+# If openrc is not admin set a valid value for hypervisor hostname
+# Example of value: hypervisor_hostname: "server1"
+hypervisor_hostname:
# Type of service chain to run, possible options are PVP, PVVP and EXT
# PVP - port to VM to port
@@ -112,6 +130,9 @@ service_chain: 'PVP'
# Can be overriden by --service-chain-count
service_chain_count: 1
+# Specifies if all chains share the same right/left/middle networks
+service_chain_shared_net: false
+
# Total number of traffic flows for all chains and directions generated by the traffic generator.
# Minimum is '2 * service_chain_count', it is automatically adjusted if too small
# value was configured. Must be even.
@@ -119,17 +140,21 @@ service_chain_count: 1
# Can be overriden by --flow-count
flow_count: 10000
-# Used by PVVP chain to spawn VMs on different compute nodes
-# Can be overriden by --inter-node
-inter_node: false
-
# set to true if service chains should use SRIOV
# This requires SRIOV to be available on compute nodes
sriov: false
-# Skip interfaces config on EXT service chain
-# Can be overriden by --no-int-config
-no_int_config: false
+# Perform port to port loopback (direct or through switch)
+# e.g. for unitary testing of the switch or the bench itself.
+# When selected, this mode forces EXT service chain and no ARP mode
+# Destination MAC for each port is set to the other (peer) port MAC.
+# VLAN tagging is defined by 'vlans' & 'vlan_tagging' properties.
+# Can be overriden by --l2-loopback (including vlan tagging spec).
+l2_loopback: false
+# No assumption is made about the loop implementation.
+# Multiple L2 vlan tagged service chains are allowed,
+# the vlan ID lists' size must be at least service_chain_count.
+# If not vlan tagging, the service chain count is forced to 1.
# Resources created by NFVbench will not be removed
# Can be overriden by --no-cleanup
@@ -146,58 +171,245 @@ traffic_generator:
default_profile: trex-local
# IP addresses for L3 traffic.
+ # This section describes the addresses to use to fill in the UDP packets sent by the
+ # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change.
+ # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs
+ # so that UDP packets can be routed back to the peer port of the traffic generator.
+
# All of the IPs are used as base for IP sequence computed based on chain or flow count.
+ # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ...
+ # -(vnf-right)---(tg-gateway-right)---(sim-devices-right)
#
# `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
+ # these are used for addressing virtual devices simulated by the traffic generator
+ # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs
# `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
- # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count
- # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
- # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count
- # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
- # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
- # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
ip_addrs_step: 0.0.0.1
- tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
+
+ #'ip_src_static': an attribute to precise the state of source IP during the generation of traffic, It indicates whether
+ # the IP source variate or remain constant. Use True for constant IP and False for varying IPs.
+ # default value is True
+ ip_src_static: True
+
+ # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs
+ # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
+ # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
+ tg_gateway_ip_addrs: ['192.168.1.100', '192.168.2.100']
+ tg_gateway_ip_cidrs: ['192.168.1.0/24','192.168.2.0/24']
tg_gateway_ip_addrs_step: 0.0.0.1
- gateway_ip_addrs: ['1.1.0.2', '2.2.0.2']
+ # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
+ # must correspond to the public IP on the left and right networks
+ # for each left-most and right-most VNF of every chain.
+ # must be the same subnet but not same IP as tg_gateway_ip_addrs.
+ # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used
+ # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
+ gateway_ip_addrs: ['192.168.1.1', '192.168.2.1']
gateway_ip_addrs_step: 0.0.0.1
+
+ # UDP DEFINED VARIABLES
+ # TRex pick default UDP port (53) but the range of UDP source and destination ports are also
+ # defined from configuration file by using the following attributes:
+ #
+ # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
+ # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
+ # `udp_src_port` and `udp_dst_port` can be defined by a single port or a range. Example:
+ # udp_src_port: 80
+ # udp_dst_port: ['1024','65000']
+ # `udp_port_step`: the step between two generated ports, default is equal to '1'
+ #
+ # NOTICE:
+ # Following TRex functionalities, incrementation and decrementation of source port and destination
+ # port values occur simultaneously.
+ # So, in order to reach the highest possible number of packets, it's recommended that the range of source ports
+ # minus the range of destination ports should be different of 1
+ # i.e: |range[source_port] - range[destination_port]| = 1
udp_src_port:
udp_dst_port:
+ udp_port_step: '1'
+
+ # VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay
+ # This is used if the vxlan tunnels are running on a specific VLAN.
+ # Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
+ # for all VxLAN tunneled traffic
+ vtep_vlan:
+ # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
+ src_vteps:
+ # VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
+ dst_vtep:
+ # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep.
+ # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep.
+ # This parameter is mandatory for MPLS only
+ vtep_gateway_ips:
+ # L2 ADDRESSING OF UDP PACKETS
+ # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
+ # Leave empty for PVP, PVVP, EXT with ARP
+ # Only used when `service_chain` is EXT and `no_arp` is true.
+ # - If both lists are empty the far end MAC of the traffic generator will be used for left and right
+ # (this is typicaly used to loop back on the first hop switch or using a loopback cable)
+ # - The length of each list must match the number of chains being used!
+ # - The index of each list must correspond to the chain index to ensure proper pairing.
+ # - Below is an example of using two chains:
+ # - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00']
+ # - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01']
+ # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and
+ # dest MAC '00:00:00:00:02:00' for chain #1
+ # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and
+ # dest MAC '00:00:00:00:02:01' for chain #1
+ # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC
+ # of the looping UDP packet so that it can reach back to the peer port of the traffic
+ # generator.
+ #
+ mac_addrs_left:
+ mac_addrs_right:
# Traffic Generator Profiles
# In case you have multiple testbeds or traffic generators,
# you can define one traffic generator profile per testbed/traffic generator.
+ # In most cases you only need to fill in the pci address for the 2 ports used by the
+ # traffic generator and leave all other fields unchanged
#
# Generator profiles are listed in the following format:
# `name`: Traffic generator profile name (use a unique name, no space or special character)
+ # Do not change this field
# `tool`: Traffic generator tool to be used (currently supported is `TRex`).
+ # Do not change this field
# `ip`: IP address of the traffic generator.
- # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local.
+ # The default loopback address is used when the traffic generator runs on the same host
+ # as NFVbench.
+ # `cores`: Specify the number of cores for running the TRex traffic generator.
+ # ONLY applies to trex-local.
# `software_mode`: Advice TRex to use software mode which provides the best compability. But
# note that TRex will not use any hardware acceleration technology under
# software mode, therefore the performance of TRex will be significantly
# lower. ONLY applies to trex-local.
+ # Recommended to leave the default value (false)
+ # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount
+ # of packet memory used. (Passed to dpdk as -m arg)
+ # ONLY applies to trex-local.
+ # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500).
+ # ONLY applies to trex-local.
+ # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501).
+ # ONLY applies to trex-local.
# `interfaces`: Configuration of traffic generator interfaces.
# `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
- # `interfaces.switch_port`: Leave empty (reserved for advanced use cases)
+ # `interfaces.switch_port`: Leave empty (deprecated)
# `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
+ # This field is required and cannot be empty
+ # Use lspci to list the PCI address of all devices
+ # Example of value: "0000:5e:00.0"
# `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
+ # Empty value (default) to use the speed discovered by the traffic generator.
+ # Recommended to leave this field empty.
+ # Do not use unless you want to override the speed discovered by the
+ # traffic generator. Expected format: 10Gbps
+ #
+ # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA.
+ # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration)
+ # for more details
+ # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set)
+ # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set)
+ # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set)
+ # Threads are pinned to cores, so specifying threads actually determines the hardware cores.
+ # Example of values:
+ # platform:
+ # master_thread_id: 0
+ # latency_thread_id: 2
+ # dual_if:
+ # - socket: 0
+ # threads: [1]
#
generator_profile:
- name: trex-local
tool: TRex
ip: 127.0.0.1
- cores: 3
+ cores: 4
software_mode: false
+ limit_memory: 1024
+ zmq_pub_port: 4500
+ zmq_rpc_port: 4501
interfaces:
- port: 0
- switch_port:
pci:
- - port: 1
switch_port:
+ - port: 1
pci:
- intf_speed: 10Gbps
+ switch_port:
+ intf_speed:
+ platform:
+ master_thread_id:
+ latency_thread_id:
+ dual_if:
+ - socket:
+ threads:
+
+# Use 'true' to force restart of local TRex server before next run
+# TRex local server will be restarted even if restart property is false in case of generator config changes between runs
+restart: false
+
+# Simpler override for trex core count and mbuf multilier factor
+# if empty defaults to the one specified in generator_profile.cores
+cores:
+
+# Simpler override for the interface speed
+# if empty, the current generator_profile.intf_speed parameter applies
+# if value = 'auto' the auto-detection is forced
+intf_speed:
+
+# 'cores' and 'intf_speed' parameters can be overriden themselves
+# by respective options --cores and --intf-speed on the command-line.
+
+# By default, the real ports line rate is detected and used as
+# the reference for computing the theoretical maximum traffic load (100%).
+# Note that specifying 'intf_speed' allows to artificially lower this
+# reference while not modifying the actual transmission bit rate.
+
+# The values of the following parameters are ignored on entry
+# they are defined here in order to appear in the reported configuration.
+# They will reflect the value active at run-time (after overriding or detection)
+cores_used:
+intf_speed_used:
+intf_speed_detected:
+
+# A cache size value is passed to the TRex field engine (FE) at packet generation.
+# Can be overridden by --cache-size
+# More information for TRex performance:
+# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
+# If cache_size = 0 (or empty): no cache will be used by TRex (default)
+# If cache_size < 0: cache_size will be set to flow count value
+cache_size: 0
+# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
+# Note that the resulting value is finally capped to 10000, whatever the requested size is (by design limitation).
+
+# Specification of the TRex behaviour dealing with the i40e network card driver issue: Trex-528
+# see https://trex-tgn.cisco.com/youtrack/issue/trex-528
+# This issue states that if other ports, in the same card,
+# are in kernel mode, they could impair traffic counting.
+# Can be overridden by --i40e-mixed
+# Values can be:
+# ignore - don't consider the case (default)
+# exit - should the case arise, exit (TRex default behaviour)
+# unbind - unbind kernel bound ports (the former NFVbench behaviour)
+# The 'ignore' option might be OK as soon as the issue has been fixed in the driver.
+# The 'unbind' option should not be used! who knows the current use of other ports?
+i40e_mixed:
+
+# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
+mbuf_64:
+
+# mbuffer ratio to use for TRex (see TRex documentation for more details)
+mbuf_factor: 0.2
+
+# A switch to disable hdrh
+# hdrh is enabled by default and requires TRex v2.58 or higher
+disable_hdrh: false
+
+# List of latency percentiles values returned using hdrh
+# elements should be int or float between 0.0 and 100.0
+lat_percentiles: [25, 75, 99]
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
@@ -214,21 +426,28 @@ generic_poll_sec: 2
# name of the loop VM
loop_vm_name: 'nfvbench-loop-vm'
-# Default names, subnets and CIDRs for PVP/PVVP networks
+# Default names, subnets and CIDRs for PVP/PVVP networks (OpenStack only)
+#
# If a network with given name already exists it will be reused.
# - PVP only uses left and right
# - PVVP uses left, middle and right
# - for EXT chains, this structure is not relevant - refer to external_networks
# Otherwise a new internal network will be created with that name, subnet and CIDR.
-#
-# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
-# will be assigned by Neutron.
-# Must be unique for each network
+#
+# network_type must be 'vlan' (for VLAN and SRIOV) or 'vxlan' (for VxLAN)
+# all 3 networks must use the same network type in this release
+# segmentation_id can be set to enforce a specific segmentation id (vlan ID or VNI if vxlan)
+# by default (empty) the segmentation id will be assigned by Neutron.
+# If specified, it must be unique for each network
+# For multi-chaining, see notes below
# physical_network can be set to pick a specific phsyical network - by default (empty) the
# default physical network will be picked
-# In the case of SR-IOV, both physical_network and segmentation ID must be provided
-# For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet
+# SR-IOV: both physical_network and VLAN segmentation ID must be provided
+# VxLAN: the VNI must generally be provided (except special Neutron VxLAN implementations)
+#
+# For example to setup 1xPVP using 2 different SR-IOV ports, you must put the appropriate physnet
# names under left.physical_network and right.physical_network.
+# For multi-chaining and non shared networks,
# Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1)
# using VLAN ID 2000 and 2001:
# internal_networks:
@@ -238,56 +457,286 @@ loop_vm_name: 'nfvbench-loop-vm'
# right:
# segmentation_id: 2001
# physical_network: phys_sriov1
+#
+# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS):
+# - the segmentation_id field if provided must be a list of values (as many as chains)
+# - segmentation_id auto-indexing:
+# the segmentation_id field can also be a single value that represents the base value from which
+# values for each chain is derived using the chain ID as an offset. For example
+# if 2000 is specified, NFVbench will use 2000 for chain 0, 2001 for chain 1 etc...
+# The ranges of all the networks must not overlap.
+# - the physical_network can be a single name (all VFs to be allocated on same physnet)
+# of a list of physnet names to use different PFs
+#
+# Example of 2-chain VLAN configuration:
+# internal_networks:
+# left:
+# segmentation_id: [2000, 2001]
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: [2010, 2011]
+# physical_network: phys_sriov1
+# Equivalent to (using auto-indexing):
+# internal_networks:
+# left:
+# segmentation_id: 2000
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: 2010
+# physical_network: phys_sriov1
+#
+# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true)
+# this parameter doesn't support auto-indexing because this is not a typical scenario
+# expected the list of values in a range 256-1048575, one value per chain is expected
+#
+# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain
+# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain
+# Example of 2-chain MPLS configuration:
+# internal_networks:
+# left:
+# network_type: mpls
+# segmentation_id: [2000, 2001]
+# mpls_transport_labels: [10000, 10000]
+# physical_network: phys_sriov0
+# right:
+# network_type: mpls
+# segmentation_id: [2010, 2011]
+# mpls_transport_labels: [11000, 11000]
+# physical_network: phys_sriov1
+
internal_networks:
left:
- name: 'nfvbench-net0'
- subnet: 'nfvbench-subnet0'
+ name: 'nfvbench-lnet'
+ subnet: 'nfvbench-lsubnet'
cidr: '192.168.1.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
right:
- name: 'nfvbench-net1'
- subnet: 'nfvbench-subnet1'
+ name: 'nfvbench-rnet'
+ subnet: 'nfvbench-rsubnet'
cidr: '192.168.2.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
middle:
- name: 'nfvbench-net2'
- subnet: 'nfvbench-subnet2'
+ name: 'nfvbench-mnet'
+ subnet: 'nfvbench-msubnet'
cidr: '192.168.3.0/24'
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
+
+# IDLE INTERFACES: PVP, PVVP and non shared net only.
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If service_chain_shared_net is false, additional virtual interfaces can be
+# added at VM creation time, these interfaces will not carry any traffic and
+# can be used to test the impact of idle interfaces in the overall performance.
+# All these idle interfaces will use normal ports (not direct).
+# Number of idle interfaces per VM (none by default)
+idle_interfaces_per_vm: 0
+
+# A new network is created for each idle interface.
+# If service_chain_shared_net is true, the options below will be ignored
+# and no idle interfaces will be added.
+idle_networks:
+ # Prefix for all idle networks, the final name will append the chain ID and idle index
+ # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
+ name: 'nfvbench-idle-net'
+ # Subnet name to use for all idle subnetworks
+ subnet: 'nfvbench-idle-subnet'
+ # CIDR to use for all idle networks (value should not matter)
+ cidr: '192.169.1.0/24'
+ # Type of network associated to the idle virtual interfaces (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the idle virtual interfaces
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ # Note that NFVbench will use as many consecutive segmentation IDs as needed.
+ # For example, for 4 PVP chains and 8 idle
+ # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
+ # starting from the value provided.
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
use_sriov_middle_net: false
-# EXT chain only. Names of edge networks which will be used to send traffic via traffic generator.
+# EXT chain only. Prefix names of edge networks or list of edge network names
+# used to send traffic via traffic generator.
+#
+# If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name.
+#
+# If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks.
+# left and right can take either a string prefix or a list of arbitrary network names
+# If a string prefix is passed, an index will be appended to each network name to form the final name.
+# Example:
+# external_networks:
+# left: 'ext-lnet'
+# right: 'ext-rnet'
+# ext-lnet0 ext-rnet0 for chain #0
+# ext-lnet1 ext-rnet1 for chain #1
+# etc...
+# If a list of strings is passed, each string in the list must be the name of the network used for the
+# chain indexed by the entry position in the list.
+# The list must have at least as many entries as there are chains
+# Example:
+# external_networks:
+# left: ['ext-lnet', 'ext-lnet2']
+# right: ['ext-rnet', 'ext-rnet2']
+#
external_networks:
- left: 'nfvbench-net0'
- right: 'nfvbench-net1'
+ left:
+ right:
+
+# PVP with L3 router in the packet path only.
+# Only use when l3_router option is True (see l3_router)
+# Prefix names of edge networks which will be used to send traffic via traffic generator.
+# If a network with given name already exists it will be reused.
+# Otherwise a new edge network will be created with that name, subnet and CIDR.
+#
+# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
+#
+# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
+# will be assigned by Neutron.
+# Must be unique for each network
+# physical_network can be set to pick a specific phsyical network - by default (empty) the
+# default physical network will be picked
+#
+edge_networks:
+ left:
+ name: 'nfvbench-net2'
+ router_name: 'router_left'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+ right:
+ name: 'nfvbench-net3'
+ router_name: 'router_right'
+ subnet: 'nfvbench-subnet3'
+ cidr: '192.168.4.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'vxlan'
+# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false
+# and vise versa
+vxlan: false
+# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'mpls'
+# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false'
+# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these
+# features are not supported at the moment. In future when these features will be supported they will require
+# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another
+# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration
+mpls: false
# Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
-# Leave empty you do not want the traffic generator to insert the VLAN tag. This is
-# needed for example if VLAN tagging is enabled on switch (trunk mode) or if you want to hook directly to a NIC
-# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex)
+# Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
+# needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
+# directly to a NIC).
+# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
+# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets
+# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
vlan_tagging: true
-# Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2).
-# Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above.
-# In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used.
-# Example: [1998, 1999]
+# Used only in the case of EXT chain and no OpenStack or not admin access to specify the VLAN IDs to use.
+# This property is ignored when OpenStack is used or when 'vlan_tagging' is disabled.
+# If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API.
+# If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values
+# If networks are not shared across chains (service_chain_shared_net=false), the list should have
+# 2 list of vlan IDs
+# Examples:
+# [1998, 1999] left network uses vlan 1998 right network uses vlan 1999
+# [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4
+# [1010, 1010] same vlan ID on both sides, for a typical l2-loopback test (*)
+# The vlan lists may be oversized, compared to the actual service chain count
+# (lowest indexes are used) but an exception is raised if they are too short.
vlans: []
-
-# Used only with EXT chain. MAC addresses of traffic generator ports are used as destination
-# if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses.
+# (*) actually there is no restriction, left/right IDs may differ
+# for some exotic purpose - see also the l2_loopback parameter.
+
+# ARP is used to discover the MAC address of VNFs that run L3 routing.
+# Used only with EXT chain.
+# False (default): ARP requests are sent to find out dest MAC addresses.
+# True: do not send ARP but use provisioned dest macs instead
+# (see mac_addrs_left and mac_addrs_right)
no_arp: false
+# Loop VM (VPP forwarder) can use ARP to discover next hop mac address
+# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP)
+# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW)
+loop_vm_arp: false
+
# Traffic Profiles
# You can add here more profiles as needed
# `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
@@ -314,9 +763,18 @@ traffic:
# Can be overriden by --no-traffic
no_traffic: false
-# Do not reset tx/rx counters prior to running
-# Can be overriden by --no-reset
-no_reset: false
+# Use an L3 router in the packet path. This option if set will create or reuse an OpenStack neutron
+# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
+# Can be overriden by --l3-router
+l3_router: false
+
+# If l3_router is true and depending on ARP stale time SUT configuration
+# Gratuitous ARP (GARP) from TG port to the router is needed to keep traffic up
+# Default value: 1 packet per second
+# This value needs to be defined inferior to SUT ARP stale time to avoid GARP packets drop
+# in case of high load traffic
+periodic_gratuitous_arp: false
+gratuitous_arp_pps: 1
# Test configuration
@@ -343,6 +801,9 @@ duration_sec: 60
# Can be overridden by --interval
interval_sec: 10
+# Default pause between iterations of a binary search (NDR/PDR)
+pause_sec: 2
+
# NDR / PDR configuration
measurement:
# Drop rates represent the ratio of dropped packet to the total number of packets sent.
@@ -381,6 +842,26 @@ debug: false
# Defaults to disabled
log_file:
+# One can specify a user ID for changing ownership of output log/json files
+# - empty: depends on file existency
+# . yes? replacement, owner is unchanged
+# . no ? creation with root as user
+# - 0: this is the root user ID
+# - other: will corresponds (or not) to an existing user/group in the host
+# (the current user ID can be obtained with the command 'id -u')
+# Can be overriden by --user-id
+# Consider also that the default value below is overridable by a USER_ID env variable,
+# if nfvbench is run into a container, this information can be passed at its creation.
+# The overall precedence rule is: 'default_config (this) < env < config < command_line'
+user_id:
+
+# Similarly, the group ID is defined
+# Can be overriden by --group-id
+# Default may be set through env GROUP_ID
+# Caveat: user and group with a same name may have different numerical IDs
+# (the current group ID can be obtained with the command 'id -g')
+group_id:
+
# When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports
# A list of one or more fluentd servers identified by their IPs and port numbers should be given.
# For each recipient it is possible to enable both sending logs and performance
@@ -415,3 +896,58 @@ factory_class: 'BasicFactory'
# Custom label added for every perf record generated during this run.
# Can be overriden by --user-label
user_label:
+
+# Custom information to be passed to results post-processing,
+# they will be included as is in the json report 'config' branch.
+# Useful for documenting or automating further treatments.
+# The value is any yaml object (=> open usage) - example:
+# |user_info:
+# | status: explore
+# | description:
+# | generator: VM
+# | attachment: direct
+# | target: lab-pf
+# | switch: qfx3500
+# Keys may be merged/overriden using the --user-info command line option
+# (the command-line parameter value is expressed as a json object string)
+user_info:
+
+
+# THESE FIELDS SHOULD BE USED VERY RARELY OR ON PURPOSE
+
+# Skip vswitch configuration and retrieving of stats
+# Can be overriden by --no-vswitch-access
+# Should be left to the default value (false)
+no_vswitch_access: false
+
+# Enable service mode for trafic capture from TRex console (for debugging purpose)
+# Can be overriden by --service-mode
+# Should be left to the default value (false)
+service_mode: false
+
+# Disable extra flow stats (on high load traffic)
+# Can be overriden by --no-flow-stats
+# Should be left to the default value (false)
+no_flow_stats: false
+
+# Disable flow stats for latency traffic
+# Can be overriden by --no-latency-stats
+# Should be left to the default value (false)
+no_latency_stats: false
+
+# Disable latency measurements (no streams)
+# Can be overriden by --no-latency-streams
+# Should be left to the default value (false)
+no_latency_streams: false
+
+# Skip "end to end" connectivity check on traffic setup
+# Can be overriden by --no-e2e-check
+# Should be left to the default value (false)
+# This flag is usable for traffic generation only
+no_e2e_check: false
+
+# General purpose register (debugging flags)
+# Can be overriden by --debug-mask
+# Designed for development needs
+# The hexadecimal notation (0x...) is accepted.
+debug_mask: 0x00000000