# # NFVbench default configuration file # # This configuration file is ALWAYS loaded by NFVbench and should never be modified by users. # To specify your own property values, always define them in a separate config file # and pass that file to the script using -c or --config # Property values in that config file will override the default values in the current file # --- # IMPORTANT CUSTOMIZATION NOTES # There are roughly 2 types of NFVbench config based on the OpenStack encaps used: # - VLAN (OVS, OVS-DPDK, ML2/VPP) # Many of the fields to customize are relevant to only 1 of the 2 encaps # These will be clearly labeled "VxLAN only" or "VLAN only" # Fields that are not applicable will not be used by NFVbench and can be left empty # # All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise. # Fields that can be over-ridden at the command line are marked with the corresponding # option, e.g. "--interval" # The OpenStack openrc file to use - must be a valid full pathname. If running # in a container, this path must be valid in the container. # # The only case where this field can be empty is when measuring a system that does not run # OpenStack or when OpenStack APIs are not accessible or OpenStack APis use is not # desirable. In that case the EXT service chain must be used. openrc_file: # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd'] vm_forwarder: testpmd # By default (empty) NFVbench will try to locate a VM image file # from the package root directory named "nfvbench-.qcow2" and # upload that file. The image name will be "nfvbench-" # This can be overridden by specifying here a pathname of a file # that follows the same naming convention. # In most cases, this field should be left empty as the packaging should # include the proper VM image file vm_image_file: # Name of the flavor to use for the loopback VMs # # If the provided name is an exact match to a flavor name known by OpenStack # (as shown from 'nova flavor-list'), that flavor will be reused. # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' # Custom flavor attributes flavor: # Number of vCPUs for the flavor vcpus: 2 # Memory for the flavor in MB ram: 4096 # Size of local disk in GB disk: 0 # metadata are supported and can be added if needed, optional # note that if your openstack does not have NUMA optimization # (cpu pinning and huge pages) # you must comment out extra_specs completely otherwise # loopback VM creation will fail extra_specs: "hw:cpu_policy": dedicated "hw:mem_page_size": large # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' # availability_zone: 'nova' availability_zone: # To force placement on a given hypervisor, set the name here # (if multiple names are provided, the first will be used) # Leave empty to let openstack pick the hypervisor compute_nodes: # Type of service chain to run, possible options are PVP, PVVP and EXT # PVP - port to VM to port # PVVP - port to VM to VM to port # EXT - external chain used only for running traffic and checking traffic generator counters, # all other parts of chain must be configured manually # Can be overriden by --service-chain service_chain: 'PVP' # Total number of service chains, every chain has own traffic stream # Can be overriden by --service-chain-count service_chain_count: 1 # Specifies if all chains share the same right/left/middle networks service_chain_shared_net: false # Total number of traffic flows for all chains and directions generated by the traffic generator. # Minimum is '2 * service_chain_count', it is automatically adjusted if too small # value was configured. Must be even. # Every flow has packets with different IPs in headers # Can be overriden by --flow-count flow_count: 10000 # set to true if service chains should use SRIOV # This requires SRIOV to be available on compute nodes sriov: false # Perform port to port loopback (direct or through switch) # Should be used with EXT service chain and no ARP (no_arp: true) # When enabled, the vlans property must contain the same VLAN id for all chains. # Can be overriden by --l2-loopback l2_loopback: false # Resources created by NFVbench will not be removed # Can be overriden by --no-cleanup no_cleanup: false # Configuration for traffic generator traffic_generator: # Name of the traffic generator, only for informational purposes host_name: 'nfvbench_tg' # this is the default traffic generator profile to use # the name must be defined under generator_profile # you can override the traffic generator to use using the # -g or --traffic-gen option at the command line default_profile: trex-local # IP addresses for L3 traffic. # This section describes the addresses to use to fill in the UDP packets sent by the # traffic generator. If you VNFs are L2 forwarders, these fields below do not need to change. # If your VNFs are L3 routers, the fields below must match the static routes in your VNFs # so that UDP packets can be routed back to the peer port of the traffic generator. # All of the IPs are used as base for IP sequence computed based on chain or flow count. # (sim-devices-left)---(tg-gateway-left)---(vnf-left)- ... # -(vnf-right)---(tg-gateway-right)---(sim-devices-right) # # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count # these are used for addressing virtual devices simulated by the traffic generator # and be a different subnet than tg_gateway_ip_addrs and gateway_ip_addrs # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1. ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] ip_addrs_step: 0.0.0.1 # `tg_gateway_ip_addrs` base IP for traffic generator ports in the left and right networks to the VNFs # chain count consecutive IP addresses spaced by tg_gateway_ip_addrs_step will be used # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] tg_gateway_ip_addrs_step: 0.0.0.1 # `gateway_ip_addrs`: base IPs of VNF router gateways (left and right), quantity used depends on chain count # must correspond to the public IP on the left and right networks # for each left-most and right-most VNF of every chain. # must be the same subnet but not same IP as tg_gateway_ip_addrs. # chain count consecutive IP addresses spaced by gateway_ip_addrs_step will be used # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] gateway_ip_addrs_step: 0.0.0.1 # `udp_src_port`: the source port for sending UDP traffic, default is picked by TRex (53) # `udp_dst_port`: the destination port for sending UDP traffic, default is picked by TRex (53) udp_src_port: udp_dst_port: # L2 ADDRESSING OF UDP PACKETS # Lists of dest MAC addresses to use on each traffic generator port (one dest MAC per chain) # Leave empty for PVP, PVVP, EXT with ARP # Only used when `service_chain` is EXT and `no_arp` is true. # - If both lists are empty the far end MAC of the traffic generator will be used for left and right # (this is typicaly used to loop back on the first hop switch or using a loopback cable) # - The length of each list must match the number of chains being used! # - The index of each list must correspond to the chain index to ensure proper pairing. # - Below is an example of using two chains: # - mac_addrs_left: ['00:00:00:00:01:00', '00:00:00:00:02:00'] # - mac_addrs_right: ['00:00:00:00:01:01', '00:00:00:00:02:01'] # UDP packets sent on port 0 will use dest MAC '00:00:00:00:01:00' for chain #0 and # dest MAC '00:00:00:00:02:00' for chain #1 # UDP packets sent on port 1 will use dest MAC '00:00:00:00:01:01' for chain #0 and # dest MAC '00:00:00:00:02:01' for chain #1 # It is expected that the looping device (L2 forwarder) will rewrite the src and dst MAC # of the looping UDP packet so that it can reach back to the peer port of the traffic # generator. # mac_addrs_left: mac_addrs_right: # Traffic Generator Profiles # In case you have multiple testbeds or traffic generators, # you can define one traffic generator profile per testbed/traffic generator. # In most cases you only need to fill in the pci address for the 2 ports used by the # traffic generator and leave all other fields unchanged # # Generator profiles are listed in the following format: # `name`: Traffic generator profile name (use a unique name, no space or special character) # DFo not change this field # `tool`: Traffic generator tool to be used (currently supported is `TRex`). # Do not change this field # `ip`: IP address of the traffic generator. # The default loopback address is used when the traffic generator runs on the same host # as NFVbench. # `cores`: Specify the number of cores for running the TRex traffic generator. # ONLY applies to trex-local. # `software_mode`: Advice TRex to use software mode which provides the best compability. But # note that TRex will not use any hardware acceleration technology under # software mode, therefore the performance of TRex will be significantly # lower. ONLY applies to trex-local. # Recommended to leave the default value (false) # `interfaces`: Configuration of traffic generator interfaces. # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.) # `interfaces.switch_port`: Leave empty (deprecated) # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port # This field is required and cannot be empty # Use lspci to list the PCI address of all devices # Example of value: "0000:5e:00.0" # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction). # Empty value (default) to use the speed discovered by the traffic generator. # Recommended to leave this field empty. # Do not use unless you want to override the speed discovered by the # traffic generator. Expected format: 10Gbps # generator_profile: - name: trex-local tool: TRex ip: 127.0.0.1 cores: 3 software_mode: false interfaces: - port: 0 pci: switch_port: - port: 1 pci: switch_port: intf_speed: # ----------------------------------------------------------------------------- # These variables are not likely to be changed # Number of seconds to wait for VMs to pass traffic in both directions check_traffic_time_sec: 200 # General retry count generic_retry_count: 100 # General poll period generic_poll_sec: 2 # name of the loop VM loop_vm_name: 'nfvbench-loop-vm' # Default names, subnets and CIDRs for PVP/PVVP networks # If a network with given name already exists it will be reused. # - PVP only uses left and right # - PVVP uses left, middle and right # - for EXT chains, this structure is not relevant - refer to external_networks # Otherwise a new internal network will be created with that name, subnet and CIDR. # # segmentation_id can be set to enforce a specific VLAN id - by default (empty) the VLAN id # will be assigned by Neutron. # Must be unique for each network # physical_network can be set to pick a specific phsyical network - by default (empty) the # default physical network will be picked # In the case of SR-IOV, both physical_network and segmentation ID must be provided # For example to setup PVP using 2 different SR-IOV ports, you must put the appropriate physnet # names under left.physical_network and right.physical_network. # Example of override configuration to force PVP to run on 2 SRIOV ports (phys_sriov0 and phys_sriov1) # using VLAN ID 2000 and 2001: # internal_networks: # left: # segmentation_id: 2000 # physical_network: phys_sriov0 # right: # segmentation_id: 2001 # physical_network: phys_sriov1 internal_networks: left: name: 'nfvbench-lnet' subnet: 'nfvbench-lsubnet' cidr: '192.168.1.0/24' network_type: 'vlan' segmentation_id: physical_network: right: name: 'nfvbench-rnet' subnet: 'nfvbench-rsubnet' cidr: '192.168.2.0/24' network_type: 'vlan' segmentation_id: physical_network: middle: name: 'nfvbench-mnet' subnet: 'nfvbench-msubnet' cidr: '192.168.3.0/24' network_type: 'vlan' segmentation_id: physical_network: # In the scenario of PVVP + SRIOV, there is choice of how the traffic will be # handled in the middle network. The default (false) will use vswitch, while # SRIOV can be used by toggling below setting. use_sriov_middle_net: false # EXT chain only. Prefix names of edge networks which will be used to send traffic via traffic generator. # # If service_chain_shared_net is true, the left and right networks must pre-exist and match exactly by name. # # If service_chain_shared_net is false, each chain must have its own pre-existing left and right networks. # An index will be appended to each network name to form the final name: # ext-lnet0 ext-rnet0 for chain #0 # ext-lnet1 ext-rnet1 for chain #1 # etc... external_networks: left: 'ext-lnet' right: 'ext-rnet' # Use 'true' to enable VLAN tagging of packets generated and sent by the traffic generator # Leave empty or set to false if you do not want the traffic generator to insert the VLAN tag (this is # needed for example if VLAN tagging is enabled on switch (access mode) or if you want to hook # directly to a NIC). # By default is set to true (which is the nominal use case with TOR and trunk mode to Trex ports) vlan_tagging: true # Used only in the case of EXT chain and no openstack to specify the VLAN IDs to use. # This property is ignored when OpenStakc is used or in the case of l2-loopback. # If OpenStack is used leave the list empty, VLAN IDs are retrieved from OpenStack networks using Neutron API. # If networks are shared across all chains (service_chain_shared_net=true), the list should have exactly 2 values # If networks are not shared across chains (service_chain_shared_net=false), the list should have # 2 list of vlan IDs # In the special case of l2-loopback the list should have the same VLAN id for all chains # Examples: # [1998, 1999] left network uses vlan 1998 right network uses vlan 1999 # [[1,2],[3,4]] chain 0 left vlan 1, right vlan 2 - chain 1 left vlan 3 right vlan 4 # [1010, 1010] same VLAN id with l2-loopback enabled # vlans: [] # ARP is used to discover the MAC address of VNFs that run L3 routing. # Used only with EXT chain. # False (default): ARP requests are sent to find out dest MAC addresses. # True: do not send ARP but use provisioned dest macs instead # (see mac_addrs_left and mac_addrs_right) no_arp: false # Traffic Profiles # You can add here more profiles as needed # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes # of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1). traffic_profile: - name: traffic_profile_64B l2frame_size: ['64'] - name: traffic_profile_IMIX l2frame_size: ['IMIX'] - name: traffic_profile_1518B l2frame_size: ['1518'] - name: traffic_profile_3sizes l2frame_size: ['64', 'IMIX', '1518'] # Traffic Configuration # bidirectional: to have traffic generated from both direction, set bidirectional to true # profile: must be one of the profiles defined in traffic_profile # The traffic profile can be overriden with the options --frame-size and --uni-dir traffic: bidirectional: true profile: traffic_profile_64B # Check config and connectivity only - do not generate traffic # Can be overriden by --no-traffic no_traffic: false # Test configuration # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1. unidir_reverse_traffic_pps: 1 # The rate specifies if NFVbench should determine the NDR/PDR # or if NFVbench should just generate traffic at a given fixed rate # for a given duration (called "single run" mode) # Supported rate format: # NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default) # Or for single run mode: # Packet per second: pps (e.g. `50pps`) # Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`) # Load percentage: % (e.g. `50%`) # Can be overridden by --rate rate: ndr_pdr # Default run duration (single run at given rate only) # Can be overridden by --duration duration_sec: 60 # Interval between intermediate reports when interval reporting is enabled # Can be overridden by --interval interval_sec: 10 # Default pause between iterations of a binary search (NDR/PDR) pause_sec: 2 # NDR / PDR configuration measurement: # Drop rates represent the ratio of dropped packet to the total number of packets sent. # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all # packets sent are dropped (or 1 packet every 10,000 packets sent) # No Drop Rate in percentage; Default to 0.001% NDR: 0.001 # Partial Drop Rate in percentage; NDR should always be less than PDR PDR: 0.1 # The accuracy of NDR and PDR as a percnetage of line rate; The exact NDR # or PDR should be within `load_epsilon` line rate % from the one calculated. # For example, with a value 0.1, and a line rate of 10Gbps, the accuracy # of NDR and PDR will be within 0.1% Of 10Gbps or 10Mbps. # The lower the value the more iterations and the longer it will take to find the NDR/PDR. # In practice, due to the precision of the traffic generator it is not recommended to # set it to lower than 0.1 load_epsilon: 0.1 # Location where to store results in a JSON format. Must be container specific path. # Can be overriden by --json json: # Location where to store results in the NFVbench standard JSON format: # ---.json # Example: PVP-1-10-64-IMIX.json # Must be container specific path. # Can be overriden by --std-json std_json: # Prints debug messages (verbose mode) # Can be overriden by --debug debug: false # Set to a valid path name if logging to file is to be enabled # Defaults to disabled log_file: # When enabled, all results and/or logs will be sent to a fluentd servers at the requested IPs and ports # A list of one or more fluentd servers identified by their IPs and port numbers should be given. # For each recipient it is possible to enable both sending logs and performance # results, or enable either logs or performance results. For enabling logs or results logging_tag or # result_tag should be set. fluentd: # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd # to enable logging to fluents, specify a valid fluentd tag name to be used for the # log records - logging_tag: # by default (result_tag is empty) nfvbench results are not sent to fluentd # to enable sending nfvbench results to fluentd, specify a valid fluentd tag name # to be used for the results records, which is different than logging_tag result_tag: # IP address of the server, defaults to loopback ip: 127.0.0.1 # port # to use, by default, use the default fluentd forward port port: 24224 # by default (logging_tag is empty) nfvbench log messages are not sent to fluentd # to enable logging to fluents, specify a valid fluentd tag name to be used for the # log records # Module and class name of factory which will be used to provide classes dynamically for other components. factory_module: 'nfvbench.factory' factory_class: 'BasicFactory' # Custom label added for every perf record generated during this run. # Can be overriden by --user-label user_label: # THESE FIELDS SHOULD BE USED VERY RARELY # Skip vswitch configuration and retrieving of stats # Can be overriden by --no-vswitch-access # Should be left to the default value (false) no_vswitch_access: false