aboutsummaryrefslogtreecommitdiffstats
path: root/nfvbench/cfg.default.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'nfvbench/cfg.default.yaml')
-rw-r--r--[-rwxr-xr-x]nfvbench/cfg.default.yaml511
1 files changed, 469 insertions, 42 deletions
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index fa3d807..c76e738 100755..100644
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -25,8 +25,30 @@
# The only case where this field can be empty is when measuring a system that does not run
# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
# desirable. In that case the EXT service chain must be used.
+#
+# If openrc is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# WARNING: Not used if clouds_detail is sets
openrc_file:
+# The OpenStack clouds configuration from clouds.yaml file to use.
+# clouds.yaml file must be in one of the following paths:
+# - ~/.config/openstack
+# - /etc/openstack
+# Note: If running in a container, this path must be valid in the container.
+# The only case where this field can be empty is when measuring a system that does not run
+# OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not
+# desirable. In that case the EXT service chain must be used.
+#
+# If user is not admin some parameters are mandatory and must be filled with valid values in config file such as :
+# - availability_zone
+# - hypervisor_hostname
+# - vlans
+# If a value is sets, this parameter disable the use of openrc file
+clouds_detail:
+
# Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
vm_forwarder: testpmd
@@ -46,16 +68,16 @@ vm_image_file:
# Otherwise, a new flavor will be created with attributes listed below.
flavor_type: 'nfvbench.medium'
-# Custom flavor attributes
+# Custom flavor attributes for the test VM
flavor:
- # Number of vCPUs for the flavor
+ # Number of vCPUs for the flavor, must be at least 2!
vcpus: 2
# Memory for the flavor in MB
ram: 4096
# Size of local disk in GB
disk: 0
# metadata are supported and can be added if needed, optional
- # note that if your openstack does not have NUMA optimization
+ # note that if your OpenStack does not have NUMA optimization
# (cpu pinning and huge pages)
# you must comment out extra_specs completely otherwise
# loopback VM creation will fail
@@ -63,14 +85,38 @@ flavor:
"hw:cpu_policy": dedicated
"hw:mem_page_size": large
+# Enable multiqueue for all test VM interfaces (PVP and PVVP only).
+# When enabled, the test VM image will get added the property to enable
+# multiqueue (hw_vif_multiqueue_enabled='true').
+# The number of queues per interace will be set to the number of vCPUs configured for
+# the VM.
+# By default there is only 1 queue per interface
+# The max allowed queue per interface is 8.
+# The valid range for this parameter is [1..min(8, vcpu_count)]
+# When multiqueue is used the recommended setting is to set it to same value as the
+# number of vCPU used - up to a max of 8 queues.
+# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
+# vif_multiqueue_size is set to 2, OpenStack will create 4 queues per interface but the
+# test VM will only use the first 2 queues.
+vif_multiqueue_size: 1
+
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# availability_zone: 'nova'
+# If openrc is not admin set a valid value
availability_zone:
# To force placement on a given hypervisor, set the name here
# (if multiple names are provided, the first will be used)
-# Leave empty to let openstack pick the hypervisor
+# Leave empty to let OpenStack pick the hypervisor
compute_nodes:
+# If openrc is not admin set a valid value for hypervisor hostname
+# Example of value: hypervisor_hostname: "server1"
+hypervisor_hostname:
# Type of service chain to run, possible options are PVP, PVVP and EXT
# PVP - port to VM to port
@@ -99,10 +145,16 @@ flow_count: 10000
sriov: false
# Perform port to port loopback (direct or through switch)
-# Should be used with EXT service chain and no ARP (no_arp: true)
-# When enabled, the vlans property must contain the same VLAN id for all chains.
-# Can be overriden by --l2-loopback
+# e.g. for unitary testing of the switch or the bench itself.
+# When selected, this mode forces EXT service chain and no ARP mode
+# Destination MAC for each port is set to the other (peer) port MAC.
+# VLAN tagging is defined by 'vlans' & 'vlan_tagging' properties.
+# Can be overriden by --l2-loopback (including vlan tagging spec).
l2_loopback: false
+# No assumption is made about the loop implementation.
+# Multiple L2 vlan tagged service chains are allowed,
+# the vlan ID lists' size must be at least service_chain_count.
+# If not vlan tagging, the service chain count is forced to 1.
# Resources created by NFVbench will not be removed
# Can be overriden by --no-cleanup
@@ -134,10 +186,17 @@ traffic_generator:
# `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
ip_addrs: ['10.0.0.0/8', '20.0.0.0/8']
ip_addrs_step: 0.0.0.1
+
+ #'ip_src_static': an attribute to precise the state of source IP during the generation of traffic, It indicates whether
+ # the IP source variate or remain constant. Use True for constant IP and False for varying IPs.
+ # default value is True
+ ip_src_static: True
+
# `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs
# chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used
# `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
- tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100']
+ tg_gateway_ip_addrs: ['192.168.1.100', '192.168.2.100']
+ tg_gateway_ip_cidrs: ['192.168.1.0/24','192.168.2.0/24']
tg_gateway_ip_addrs_step: 0.0.0.1
# `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count
# must correspond to the public IP on the left and right networks
@@ -145,23 +204,43 @@ traffic_generator:
# must be the same subnet but not same IP as tg_gateway_ip_addrs.
# chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used
# `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
- gateway_ip_addrs: ['1.1.0.2', '2.2.0.2']
+ gateway_ip_addrs: ['192.168.1.1', '192.168.2.1']
gateway_ip_addrs_step: 0.0.0.1
+
+ # UDP DEFINED VARIABLES
+ # TRex pick default UDP port (53) but the range of UDP source and destination ports are also
+ # defined from configuration file by using the following attributes:
+ #
# `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53)
# `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53)
+ # `udp_src_port` and `udp_dst_port` can be defined by a single port or a range. Example:
+ # udp_src_port: 80
+ # udp_dst_port: ['1024','65000']
+ # `udp_port_step`: the step between two generated ports, default is equal to '1'
+ #
+ # NOTICE:
+ # Following TRex functionalities, incrementation and decrementation of source port and destination
+ # port values occur simultaneously.
+ # So, in order to reach the highest possible number of packets, it's recommended that the range of source ports
+ # minus the range of destination ports should be different of 1
+ # i.e: |range[source_port] - range[destination_port]| = 1
udp_src_port:
udp_dst_port:
+ udp_port_step: '1'
# VxLAN only: optionally specify what VLAN tag to use for the VxLAN overlay
# This is used if the vxlan tunnels are running on a specific VLAN.
# Leave empty if there is no VLAN tagging required, or specify the VLAN id to use
# for all VxLAN tunneled traffic
vtep_vlan:
- # VxLAN only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
+ # VxLAN and MPLS only: local/source vteps IP addresses for port 0 and 1 ['10.1.1.230', '10.1.1.231']
src_vteps:
# VxLAN only: remote IP address of the remote VTEPs that terminate all tunnels originating from local VTEPs
dst_vtep:
-
+ # The encapsulated L3/MPLS packet needs to traverse L3 or MPLS fabric to reach to its final dst_vtep.
+ # This parameter is required to resolve first next-hop MAC address if it next-hop is not its final dst_vtep.
+ # This parameter is mandatory for MPLS only
+ vtep_gateway_ips:
# L2 ADDRESSING OF UDP PACKETS
# Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain)
# Leave empty for PVP, PVVP, EXT with ARP
@@ -192,7 +271,7 @@ traffic_generator:
#
# Generator profiles are listed in the following format:
# `name`: Traffic generator profile name (use a unique name, no space or special character)
- # DFo not change this field
+ # Do not change this field
# `tool`: Traffic generator tool to be used (currently supported is `TRex`).
# Do not change this field
# `ip`: IP address of the traffic generator.
@@ -205,6 +284,13 @@ traffic_generator:
# software mode, therefore the performance of TRex will be significantly
# lower. ONLY applies to trex-local.
# Recommended to leave the default value (false)
+ # `limit_memory`: Specify the memory reserved for running the TRex traffic generator (in MB). Limit the amount
+ # of packet memory used. (Passed to dpdk as -m arg)
+ # ONLY applies to trex-local.
+ # `zmq_pub_port`: Specify the ZMQ pub port number for the TRex traffic generator instance (default value is 4500).
+ # ONLY applies to trex-local.
+ # `zmq_rpc_port`: Specify the ZMQ rpc port for the TRex traffic generator instance (default value is 4501).
+ # ONLY applies to trex-local.
# `interfaces`: Configuration of traffic generator interfaces.
# `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
# `interfaces.switch_port`: Leave empty (deprecated)
@@ -218,12 +304,33 @@ traffic_generator:
# Do not use unless you want to override the speed discovered by the
# traffic generator. Expected format: 10Gbps
#
+ # `platform`: Optional. Used to tune the performance and allocate the cores to the right NUMA.
+ # See https://trex-tgn.cisco.com/trex/doc/trex_manual.html (6.2.3. Platform section configuration)
+ # for more details
+ # `platform.master_thread_id`: Hardware thread_id for control thread. (Valid value is mandatory if platform property is set)
+ # `platform.latency_thread_id`: Hardware thread_id for RX thread. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if`: Section defines info for interface pairs (according to the order in “interfaces” list). (Valid value is mandatory if platform property is set)
+ # Each section, starting with “- socket” defines info for different interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.socket`: The NUMA node from which memory will be allocated for use by the interface pair. (Valid value is mandatory if platform property is set)
+ # `platform.dual_if.threads`: Hardware threads to be used for sending packets for the interface pair. (Valid value is mandatory if platform property is set)
+ # Threads are pinned to cores, so specifying threads actually determines the hardware cores.
+ # Example of values:
+ # platform:
+ # master_thread_id: 0
+ # latency_thread_id: 2
+ # dual_if:
+ # - socket: 0
+ # threads: [1]
+ #
generator_profile:
- name: trex-local
tool: TRex
ip: 127.0.0.1
cores: 4
software_mode: false
+ limit_memory: 1024
+ zmq_pub_port: 4500
+ zmq_rpc_port: 4501
interfaces:
- port: 0
pci:
@@ -232,14 +339,78 @@ traffic_generator:
pci:
switch_port:
intf_speed:
+ platform:
+ master_thread_id:
+ latency_thread_id:
+ dual_if:
+ - socket:
+ threads:
+
+# Use 'true' to force restart of local TRex server before next run
+# TRex local server will be restarted even if restart property is false in case of generator config changes between runs
+restart: false
# Simpler override for trex core count and mbuf multilier factor
# if empty defaults to the one specified in generator_profile.cores
cores:
+# Simpler override for the interface speed
+# if empty, the current generator_profile.intf_speed parameter applies
+# if value = 'auto' the auto-detection is forced
+intf_speed:
+
+# 'cores' and 'intf_speed' parameters can be overriden themselves
+# by respective options --cores and --intf-speed on the command-line.
+
+# By default, the real ports line rate is detected and used as
+# the reference for computing the theoretical maximum traffic load (100%).
+# Note that specifying 'intf_speed' allows to artificially lower this
+# reference while not modifying the actual transmission bit rate.
+
+# The values of the following parameters are ignored on entry
+# they are defined here in order to appear in the reported configuration.
+# They will reflect the value active at run-time (after overriding or detection)
+cores_used:
+intf_speed_used:
+intf_speed_detected:
+
+# A cache size value is passed to the TRex field engine (FE) at packet generation.
+# Can be overridden by --cache-size
+# More information for TRex performance:
+# https://trex-tgn.cisco.com/trex/doc/trex_stateless.html#_tutorial_field_engine_significantly_improve_performance
+# If cache_size = 0 (or empty): no cache will be used by TRex (default)
+# If cache_size < 0: cache_size will be set to flow count value
+cache_size: 0
+# The cache size is actually limited by the number of 64B mbufs configured in the trex platform configuration (see Trex manual 6.2.2. Memory section configuration)
+# Note that the resulting value is finally capped to 10000, whatever the requested size is (by design limitation).
+
+# Specification of the TRex behaviour dealing with the i40e network card driver issue: Trex-528
+# see https://trex-tgn.cisco.com/youtrack/issue/trex-528
+# This issue states that if other ports, in the same card,
+# are in kernel mode, they could impair traffic counting.
+# Can be overridden by --i40e-mixed
+# Values can be:
+# ignore - don't consider the case (default)
+# exit - should the case arise, exit (TRex default behaviour)
+# unbind - unbind kernel bound ports (the former NFVbench behaviour)
+# The 'ignore' option might be OK as soon as the issue has been fixed in the driver.
+# The 'unbind' option should not be used! who knows the current use of other ports?
+i40e_mixed:
+
+# Trex will use 1 x 64B mbuf per pre-built cached packet, assuming 1 pre-built cached packet per flow, it means for very large number of flows, the number of configured mbuf_64 will need to be set accordingly.
+mbuf_64:
+
# mbuffer ratio to use for TRex (see TRex documentation for more details)
mbuf_factor: 0.2
+# A switch to disable hdrh
+# hdrh is enabled by default and requires TRex v2.58 or higher
+disable_hdrh: false
+
+# List of latency percentiles values returned using hdrh
+# elements should be int or float between 0.0 and 100.0
+lat_percentiles: [25, 75, 99]
+
# -----------------------------------------------------------------------------
# These variables are not likely to be changed
@@ -255,7 +426,7 @@ generic_poll_sec: 2
# name of the loop VM
loop_vm_name: 'nfvbench-loop-vm'
-# Default names, subnets and CIDRs for PVP/PVVP networks (openstack only)
+# Default names, subnets and CIDRs for PVP/PVVP networks (OpenStack only)
#
# If a network with given name already exists it will be reused.
# - PVP only uses left and right
@@ -287,7 +458,7 @@ loop_vm_name: 'nfvbench-loop-vm'
# segmentation_id: 2001
# physical_network: phys_sriov1
#
-# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN):
+# For multi-chaining and non shared network mode (VLAN, SRIOV, VxLAN, MPLS):
# - the segmentation_id field if provided must be a list of values (as many as chains)
# - segmentation_id auto-indexing:
# the segmentation_id field can also be a single value that represents the base value from which
@@ -297,23 +468,42 @@ loop_vm_name: 'nfvbench-loop-vm'
# - the physical_network can be a single name (all VFs to be allocated on same physnet)
# of a list of physnet names to use different PFs
#
-# Example of 2-chain configuration:
-# internal_networks:
-# left:
-# segmentation_id: [2000, 2001]
-# physical_network: phys_sriov0
-# right:
-# segmentation_id: [2010, 2011]
-# physical_network: phys_sriov1
+# Example of 2-chain VLAN configuration:
+# internal_networks:
+# left:
+# segmentation_id: [2000, 2001]
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: [2010, 2011]
+# physical_network: phys_sriov1
+# Equivalent to (using auto-indexing):
+# internal_networks:
+# left:
+# segmentation_id: 2000
+# physical_network: phys_sriov0
+# right:
+# segmentation_id: 2010
+# physical_network: phys_sriov1
#
-# Equivalent to (using auto-indexing):
-# internal_networks:
-# left:
-# segmentation_id: 2000
-# physical_network: phys_sriov0
-# right:
-# segmentation_id: 2010
-# physical_network: phys_sriov1
+# - mpls_transport_labels is used only when MPLS encapsulation is enabled (mpls: true)
+# this parameter doesn't support auto-indexing because this is not a typical scenario
+# expected the list of values in a range 256-1048575, one value per chain is expected
+#
+# In the bellow configuration example 'segmentation_id; contains the inner MPLS label for each chain
+# and 'mpls_transport_labels' contains the outer transport MPLS label for each chain
+# Example of 2-chain MPLS configuration:
+# internal_networks:
+# left:
+# network_type: mpls
+# segmentation_id: [2000, 2001]
+# mpls_transport_labels: [10000, 10000]
+# physical_network: phys_sriov0
+# right:
+# network_type: mpls
+# segmentation_id: [2010, 2011]
+# mpls_transport_labels: [11000, 11000]
+# physical_network: phys_sriov1
+
internal_networks:
left:
@@ -323,6 +513,7 @@ internal_networks:
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
right:
name: 'nfvbench-rnet'
subnet: 'nfvbench-rsubnet'
@@ -330,6 +521,7 @@ internal_networks:
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
middle:
name: 'nfvbench-mnet'
subnet: 'nfvbench-msubnet'
@@ -337,51 +529,201 @@ internal_networks:
network_type: 'vlan'
segmentation_id:
physical_network:
+ mpls_transport_labels:
+
+# IDLE INTERFACES: PVP, PVVP and non shared net only.
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If service_chain_shared_net is false, additional virtual interfaces can be
+# added at VM creation time, these interfaces will not carry any traffic and
+# can be used to test the impact of idle interfaces in the overall performance.
+# All these idle interfaces will use normal ports (not direct).
+# Number of idle interfaces per VM (none by default)
+idle_interfaces_per_vm: 0
+
+# A new network is created for each idle interface.
+# If service_chain_shared_net is true, the options below will be ignored
+# and no idle interfaces will be added.
+idle_networks:
+ # Prefix for all idle networks, the final name will append the chain ID and idle index
+ # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
+ name: 'nfvbench-idle-net'
+ # Subnet name to use for all idle subnetworks
+ subnet: 'nfvbench-idle-subnet'
+ # CIDR to use for all idle networks (value should not matter)
+ cidr: '192.169.1.0/24'
+ # Type of network associated to the idle virtual interfaces (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the idle virtual interfaces
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ # Note that NFVbench will use as many consecutive segmentation IDs as needed.
+ # For example, for 4 PVP chains and 8 idle
+ # interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
+ # starting from the value provided.
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
use_sriov_middle_net: false
-# EXT chain only. Prefix names of edge networks which will be used to send traffic via traffic generator.
+# EXT chain only. Prefix names of edge networks or list of edge network names
+# used to send traffic via traffic generator.
#
# If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name.
#
# If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks.
-# An index will be appended to each network name to form the final name:
+# left and right can take either a string prefix or a list of arbitrary network names
+# If a string prefix is passed, an index will be appended to each network name to form the final name.
+# Example:
+# external_networks:
+# left: 'ext-lnet'
+# right: 'ext-rnet'
# ext-lnet0 ext-rnet0 for chain #0
# ext-lnet1 ext-rnet1 for chain #1
# etc...
+# If a list of strings is passed, each string in the list must be the name of the network used for the
+# chain indexed by the entry position in the list.
+# The list must have at least as many entries as there are chains
+# Example:
+# external_networks:
+# left: ['ext-lnet', 'ext-lnet2']
+# right: ['ext-rnet', 'ext-rnet2']
+#
external_networks:
- left: 'ext-lnet'
- right: 'ext-rnet'
+ left:
+ right:
+
+# PVP with L3 router in the packet path only.
+# Only use when l3_router option is True (see l3_router)
+# Prefix names of edge networks which will be used to send traffic via traffic generator.
+# If a network with given name already exists it will be reused.
+# Otherwise a new edge network will be created with that name, subnet and CIDR.
+#
+# gateway can be set in case of L3 traffic with edge networks - refer to edge_networks
+#
+# segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id
+# will be assigned by Neutron.
+# Must be unique for each network
+# physical_network can be set to pick a specific phsyical network - by default (empty) the
+# default physical network will be picked
+#
+edge_networks:
+ left:
+ name: 'nfvbench-net2'
+ router_name: 'router_left'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
+ right:
+ name: 'nfvbench-net3'
+ router_name: 'router_right'
+ subnet: 'nfvbench-subnet3'
+ cidr: '192.168.4.0/24'
+ gateway:
+ network_type:
+ segmentation_id:
+ physical_network:
# Use 'true' to enable VXLAN encapsulation support and sent by the traffic generator
# When this option enabled internal networks 'network type' parameter value should be 'vxlan'
+# VxLAN and MPLS encapsulations are mutual exclusive if 'vxlan' is true then 'mpls' should be false
+# and vise versa
vxlan: false
-
+# Use 'true' to enable MPLS encapsulation support and sent by the traffic generator
+# When this option enabled internal networks 'network type' parameter value should be 'mpls'
+# MPLS and VxLAN encapsulations are mutual exclusive if 'mpls' is 'true' then 'vxlan' should be set to 'false'
+# and vise versa. no_flow_stats, no_latency_stats, no_latency_streams should be set to 'true' because these
+# features are not supported at the moment. In future when these features will be supported they will require
+# special NIC hardware. Only 2 label stack supported at the moment where one label is transport and another
+# is VPN for more details please refer to 'mpls_transport_labels' and 'segmentation_id' in networks configuration
+mpls: false
# Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator
# Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is
# needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook
# directly to a NIC).
# By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports)
-# If VxLAN is enabled, this option should be set to false (vlan tagging for encapsulated packets
+# If VxLAN or MPLS are enabled, this option should be set to false (vlan tagging for encapsulated packets
# is not supported). Use the vtep_vlan option to enable vlan tagging for the VxLAN overlay network.
vlan_tagging: true
-# Used only in the case of EXT chain and no openstack to specify the VLAN IDs to use.
-# This property is ignored when OpenStakc is used or in the case of l2-loopback.
+# Used only in the case of EXT chain and no OpenStack or not admin access to specify the VLAN IDs to use.
+# This property is ignored when OpenStack is used or when 'vlan_tagging' is disabled.
# If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API.
# If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values
# If networks are not shared across chains (service_chain_shared_net=false), the list should have
# 2 list of vlan IDs
-# In the special case of l2-loopback the list should have the same VLAN id for all chains
# Examples:
# [1998, 1999] left network uses vlan 1998 right network uses vlan 1999
# [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4
-# [1010, 1010] same VLAN id with l2-loopback enabled
-#
+# [1010, 1010] same vlan ID on both sides, for a typical l2-loopback test (*)
+# The vlan lists may be oversized, compared to the actual service chain count
+# (lowest indexes are used) but an exception is raised if they are too short.
vlans: []
+# (*) actually there is no restriction, left/right IDs may differ
+# for some exotic purpose - see also the l2_loopback parameter.
# ARP is used to discover the MAC address of VNFs that run L3 routing.
# Used only with EXT chain.
@@ -390,6 +732,11 @@ vlans: []
# (see mac_addrs_left and mac_addrs_right)
no_arp: false
+# Loop VM (VPP forwarder) can use ARP to discover next hop mac address
+# False (default): do not send ARP but use static config devices macs instead (TRex gratuitous ARP are not interpreted by VPP)
+# True: ARP requests are sent to find out next hop MAC addresses (for instance SDN-GW)
+loop_vm_arp: false
+
# Traffic Profiles
# You can add here more profiles as needed
# `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
@@ -416,6 +763,19 @@ traffic:
# Can be overriden by --no-traffic
no_traffic: false
+# Use an L3 router in the packet path. This option if set will create or reuse an OpenStack neutron
+# router (PVP, PVVP) or reuse an existing L3 router (EXT) to route traffic to the destination VM.
+# Can be overriden by --l3-router
+l3_router: false
+
+# If l3_router is true and depending on ARP stale time SUT configuration
+# Gratuitous ARP (GARP) from TG port to the router is needed to keep traffic up
+# Default value: 1 packet per second
+# This value needs to be defined inferior to SUT ARP stale time to avoid GARP packets drop
+# in case of high load traffic
+periodic_gratuitous_arp: false
+gratuitous_arp_pps: 1
+
# Test configuration
# The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
@@ -482,6 +842,26 @@ debug: false
# Defaults to disabled
log_file:
+# One can specify a user ID for changing ownership of output log/json files
+# - empty: depends on file existency
+# . yes? replacement, owner is unchanged
+# . no ? creation with root as user
+# - 0: this is the root user ID
+# - other: will corresponds (or not) to an existing user/group in the host
+# (the current user ID can be obtained with the command 'id -u')
+# Can be overriden by --user-id
+# Consider also that the default value below is overridable by a USER_ID env variable,
+# if nfvbench is run into a container, this information can be passed at its creation.
+# The overall precedence rule is: 'default_config (this) < env < config < command_line'
+user_id:
+
+# Similarly, the group ID is defined
+# Can be overriden by --group-id
+# Default may be set through env GROUP_ID
+# Caveat: user and group with a same name may have different numerical IDs
+# (the current group ID can be obtained with the command 'id -g')
+group_id:
+
# When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports
# A list of one or more fluentd servers identified by their IPs and port numbers should be given.
# For each recipient it is possible to enable both sending logs and performance
@@ -517,10 +897,57 @@ factory_class: 'BasicFactory'
# Can be overriden by --user-label
user_label:
+# Custom information to be passed to results post-processing,
+# they will be included as is in the json report 'config' branch.
+# Useful for documenting or automating further treatments.
+# The value is any yaml object (=> open usage) - example:
+# |user_info:
+# | status: explore
+# | description:
+# | generator: VM
+# | attachment: direct
+# | target: lab-pf
+# | switch: qfx3500
+# Keys may be merged/overriden using the --user-info command line option
+# (the command-line parameter value is expressed as a json object string)
+user_info:
-# THESE FIELDS SHOULD BE USED VERY RARELY
+
+# THESE FIELDS SHOULD BE USED VERY RARELY OR ON PURPOSE
# Skip vswitch configuration and retrieving of stats
# Can be overriden by --no-vswitch-access
# Should be left to the default value (false)
no_vswitch_access: false
+
+# Enable service mode for trafic capture from TRex console (for debugging purpose)
+# Can be overriden by --service-mode
+# Should be left to the default value (false)
+service_mode: false
+
+# Disable extra flow stats (on high load traffic)
+# Can be overriden by --no-flow-stats
+# Should be left to the default value (false)
+no_flow_stats: false
+
+# Disable flow stats for latency traffic
+# Can be overriden by --no-latency-stats
+# Should be left to the default value (false)
+no_latency_stats: false
+
+# Disable latency measurements (no streams)
+# Can be overriden by --no-latency-streams
+# Should be left to the default value (false)
+no_latency_streams: false
+
+# Skip "end to end" connectivity check on traffic setup
+# Can be overriden by --no-e2e-check
+# Should be left to the default value (false)
+# This flag is usable for traffic generation only
+no_e2e_check: false
+
+# General purpose register (debugging flags)
+# Can be overriden by --debug-mask
+# Designed for development needs
+# The hexadecimal notation (0x...) is accepted.
+debug_mask: 0x00000000