# # NFVbench default configuration file # # This configuration file is ALWAYS loaded by NFVbench and should never be modified by users. # To specify your own property values, always define them in a separate config file # and pass that file to the script using -c or --config # Property values in that config file will override the default values in the current file # --- # IMPORTANT CUSTOMIZATION NOTES # There are roughly 2 types of NFVbench config based on the OpenStack encaps used: # - VLAN (OVS, OVS-DPDK, ML2/VPP) # Many of the fields to customize are relevant to only 1 of the 2 encaps # These will be clearly labeled "VxLAN only" or "VLAN only" # Fields that are not applicable will not be used by NFVbench and can be left empty # # All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise. # Fields that can be over-ridden at the command line are marked with the corresponding # option, e.g. "--interval" # Name of the image to use for launching the loopback VMs. This name must be # the exact same name used in OpenStack (as shown from 'nova image-list') # Can be overridden by --image or -i image_name: 'nfvbenchvm' # Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd'] vm_forwarder: testpmd # NFVbench can automatically upload a VM image if the image named by # image_name is missing, for that you need to specify a file location where # the image can be retrieved # # To upload the image as a file, download it to preferred location # and prepend it with file:// like in this example: # file:// # NFVbench (the image must have the same name as defined in image_name above). vm_image_file: # Name of the flavor to use for the loopback VMs # # If the provided name is an exact match to a flavor name known by OpenStack # (as shown from 'nova flavor-list'), that flavor will be reused. # Otherwise, a new flavor will be created with attributes listed below. flavor_type: 'nfvbench.medium' # Custom flavor attributes flavor: # Number of vCPUs for the flavor vcpus: 2 # Memory for the flavor in MB ram: 8192 # Size of local disk in GB disk: 0 # metadata are supported and can be added if needed, optional # note that if your openstack does not have NUMA optimization # (cpu pinning and huge pages) # you must comment out extra_specs completely otherwise # loopback VM creation will fail extra_specs: "hw:cpu_policy": dedicated "hw:mem_page_size": large # Name of the availability zone to use for the test VMs # Must be one of the zones listed by 'nova availability-zone-list' # If the selected zone contains only 1 compute node and PVVP inter-node flow is selected, # application will use intra-node PVVP flow. # List of compute nodes can be specified, must be in given availability zone if not empty #availability_zone: 'nova' availability_zone: compute_nodes: # Credentials for SSH connection to TOR switches. tor: # Leave type empty or switch list empty to skip TOR switches configuration. # Preferably use 'no_tor_access' to achieve the same behavior. # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces # to the service chain under test, needed only if configured in access mode) type: # Switches are only needed if type is not empty. # You can configure 0, 1 or 2 switches # no switch: in this case NFVbench will not attempt to ssh to the switch # and stitching of traffic must be done externally # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch # 2 switches: this is the recommended setting wuth redundant switches, in this case each # traffic generator interface must be wired to a different switch switches: - host: username: password: port: # Skip TOR switch configuration and retrieving of stats # Can be overriden by --no-tor-access no_tor_access: false # Skip vswitch configuration and retrieving of stats # Can be overriden by --no-vswitch-access no_vswitch_access: false # Type of service chain to run, possible options are PVP, PVVP and EXT # PVP - port to VM to port # PVVP - port to VM to VM to port # EXT - external chain used only for running traffic and checking traffic generator counters, # all other parts of chain must be configured manually # Can be overriden by --service-chain service_chain: 'PVP' # Total number of service chains, every chain has own traffic stream # Can be overriden by --service-chain-count service_chain_count: 1 # Total number of traffic flows for all chains and directions generated by the traffic generator. # Minimum is '2 * service_chain_count', it is automatically adjusted if too small # value was configured. Must be even. # Every flow has packets with different IPs in headers # Can be overriden by --flow-count flow_count: 2 # Used by PVVP chain to spawn VMs on different compute nodes # Can be overriden by --inter-node inter_node: false # set to true if service chains should use SRIOV # This requires SRIOV to be available on compute nodes sriov: false # Skip interfaces config on EXT service chain # Can be overriden by --no-int-config no_int_config: false # Resources created by NFVbench will not be removed # Can be overriden by --no-cleanup no_cleanup: false # Configuration for traffic generator traffic_generator: # Name of the traffic generator, only for informational purposes host_name: 'nfvbench_tg' # this is the default traffic generator profile to use # the name must be defined under generator_profile # you can override the traffic generator to use using the # -g or --traffic-gen option at the command line default_profile: trex-local # IP addresses for L3 traffic. # All of the IPs are used as base for IP sequence computed based on chain or flow count. # # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1. # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1 # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1 ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] ip_addrs_step: 0.0.0.1 tg_gateway_ip_addrs: ['1.1.0.100', '2.2.0.100'] tg_gateway_ip_addrs_step: 0.0.0.1 gateway_ip_addrs: ['1.1.0.2', '2.2.0.2'] gateway_ip_addrs_step: 0.0.0.1 # Traffic Generator Profiles # In case you have multiple testbeds or traffic generators, # you can define one traffic generator profile per testbed/traffic generator. # # Generator profiles are listed in the following format: # `name`: Traffic generator profile name (use a unique name, no space or special character) # `tool`: Traffic generator tool to be used (currently supported is `TRex`). # `ip`: IP address of the traffic generator. # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local. # `interfaces`: Configuration of traffic generator interfaces. # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.) # `interfaces.switch_port`: Leave empty (reserved for advanced use cases) # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction). # generator_profile: - name: trex-local tool: TRex ip: 127.0.0.1 cores: 3 interfaces: - port: 0 switch_port: pci: - port: 1 switch_port: pci: intf_speed: 10Gbps # ----------------------------------------------------------------------------- # These variables are not likely to be changed # The openrc file openrc_file: # General retry count generic_retry_count: 100 # General poll period generic_poll_sec: 2 # name of the loop VM loop_vm_name: 'nfvbench-loop-vm' # Default names, subnets and CIDRs for internal networks used by the script. # If a network with given name already exists it will be reused. # Otherwise a new internal network will be created with that name, subnet and CIDR. internal_networks: # Required only when segmentation_id specified physical_network: left: name: 'nfvbench-net0' subnet: 'nfvbench-subnet0' cidr: '192.168.1.0/24' network_type: 'vlan' segmentation_id: right: name: 'nfvbench-net1' subnet: 'nfvbench-subnet1' cidr: '192.168.2.0/24' network_type: 'vlan' segmentation_id: middle: name: 'nfvbench-net2' subnet: 'nfvbench-subnet2' cidr: '192.168.3.0/24' network_type: 'vlan' segmentation_id: # EXT chain only. Names of edge networks which will be used to send traffic via traffic generator. external_networks: left: 'nfvbench-net0' right: 'nfvbench-net1' # Use 'true' to enable VLAN tagging of packets coming from traffic generator # Leave empty if VLAN tagging is enabled on switch or if you want to hook directly to a NIC # Else by default is set to true (which is the nominal use case with TOR and trunk mode to Trex) vlan_tagging: true # Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2). # Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above. # In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used. # Example: [1998, 1999] vlans: [] # Used only with EXT chain. MAC addresses of traffic generator ports are used as destination # if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses. no_arp: false # Traffic Profiles # You can add here more profiles as needed # `l2frame_size` can be specified in any none zero integer value to represent the size in bytes # of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1). traffic_profile: - name: traffic_profile_64B l2frame_size: ['64'] - name: traffic_profile_IMIX l2frame_size: ['IMIX'] - name: traffic_profile_1518B l2frame_size: ['1518'] - name: traffic_profile_3sizes l2frame_size: ['64', 'IMIX', '1518'] # Traffic Configuration # bidirectional: to have traffic generated from both direction, set bidirectional to true # profile: must be one of the profiles defined in traffic_profile # The traffic profile can be overriden with the options --frame-size and --uni-dir traffic: bidirectional: true profile: traffic_profile_64B # Check config and connectivity only - do not generate traffic # Can be overriden by --no-traffic no_traffic: false # Do not reset tx/rx counters prior to running # Can be overriden by --no-reset no_reset: false # Test configuration # The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1. unidir_reverse_traffic_pps: 1 # The rate specifies if NFVbench should determine the NDR/PDR # or if NFVbench should just generate traffic at a given fixed rate # for a given duration (called "single run" mode) # Supported rate format: # NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default) # Or for single run mode: # Packet per second: pps (e.g. `50pps`) # Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`) # Load percentage: % (e.g. `50%`) # Can be overridden by --rate rate: ndr_pdr # Default run duration (single run at given rate only) # Can be overridden by --duration duration_sec: 60 # Interval between intermediate reports when interval reporting is enabled # Can be overridden by --interval interval_sec: 10 # NDR / PDR configuration ZZ measurement: # Drop rates represent the ratio of dropped packet to the total number of packets sent. # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all # packets sent are dropped (or 1 packet every 10,000 packets sent) # No Drop Rate in percentage; Default to 0.001% NDR: 0.001 # Partial Drop Rate in percentage; NDR should always be less than PDR PDR: 0.1 # The accuracy of NDR and PDR load percentiles; The actual load percentile that match NDR # or PDR should be within `load_epsilon` difference than the one calculated. load_epsilon: 0.1 # Location where to store results in a JSON format. Must be container specific path. # Can be overriden by --json json: # Location where to store results in the NFVbench standard JSON format: # ---.json # Example: PVP-1-10-64-IMIX.json # Must be container specific path. # Can be overriden by --std-json std_json: # Prints debug messages (verbose mode) # Can be overriden by --debug debug: false # Module and class name of factory which will be used to provide classes dynamically for other components. factory_module: 'nfvbench.factory' factory_class: 'BasicFactory'