diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
new file mode 100644
@@ -0,0 +1,337 @@
+# NFVbench default configuration file
+# This configuration file is ALWAYS loaded by NFVbench and should never be modified by users.
+# To specify your own property values, always define them in a separate config file
+# and pass that file to the script using -c or --config <file>
+# Property values in that config file will override the default values in the current file
+# IMPORTANT CUSTOMIZATION NOTES
+# There are roughly 2 types of NFVbench config based on the OpenStack encaps used:
+# - VLAN (OVS, OVS-DPDK, ML2/VPP)
+# Many of the fields to customize are relevant to only 1 of the 2 encaps
+# These will be clearly labeled "VxLAN only" or "VLAN only"
+# Fields that are not applicable will not be used by NFVbench and can be left empty
+# All fields are applicable to all encaps/traffic generators unless explicitly marked otherwise.
+# Fields that can be over-ridden at the command line are marked with the corresponding
+# option, e.g. "--interval"
+# Name of the image to use for launching the loopback VMs. This name must be
+# the exact same name used in OpenStack (as shown from 'nova image-list')
+# Can be overridden by --image or -i
+# Forwarder to use in nfvbenchvm image. Available options: ['vpp', 'testpmd']
+# NFVbench can automatically upload a VM image if the image named by
+# image_name is missing, for that you need to specify a file location where
+# the image can be retrieved
+# To upload the image as a file, download it to preferred location
+# and prepend it with file:// like in this example:
+# file://<location of the image>
+# NFVbench (the image must have the same name as defined in image_name above).
+# Name of the flavor to use for the loopback VMs
+# If the provided name is an exact match to a flavor name known by OpenStack
+# (as shown from 'nova flavor-list'), that flavor will be reused.
+# Otherwise, a new flavor will be created with attributes listed below.
+# Custom flavor attributes
+ # Number of vCPUs for the flavor
+ vcpus: 2
+ # Memory for the flavor in MB
+ ram: 8192
+ # Size of local disk in GB
+ disk: 0
+ # metadata are supported and can be added if needed, optional
+ # note that if your openstack does not have NUMA optimization
+ # (cpu pinning and huge pages)
+ # you must comment out extra_specs completely otherwise
+ # loopback VM creation will fail
+ "hw:cpu_policy": dedicated
+ "hw:mem_page_size": large
+# Name of the availability zone to use for the test VMs
+# Must be one of the zones listed by 'nova availability-zone-list'
+# If the selected zone contains only 1 compute node and PVVP inter-node flow is selected,
+# application will use intra-node PVVP flow.
+# List of compute nodes can be specified, must be in given availability zone if not empty
+# Credentials for SSH connection to TOR switches.
+ # Leave type empty or switch list empty to skip TOR switches configuration.
+ # Preferably use 'no_tor_access' to achieve the same behavior.
+ # (skipping TOR config will require the user to pre-stitch the traffic generator interfaces
+ # to the service chain under test, needed only if configured in access mode)
+ # Switches are only needed if type is not empty.
+ # You can configure 0, 1 or 2 switches
+ # no switch: in this case NFVbench will not attempt to ssh to the switch
+ # and stitching of traffic must be done externally
+ # 1 switch: this assumes that both traffic generator interfaces are wired to the same switch
+ # 2 switches: this is the recommended setting wuth redundant switches, in this case each
+ # traffic generator interface must be wired to a different switch
+ - host:
+# Skip TOR switch configuration and retrieving of stats
+# Can be overriden by --no-tor-access
+# Skip vswitch configuration and retrieving of stats
+# Can be overriden by --no-vswitch-access
+# Type of service chain to run, possible options are PVP, PVVP and EXT
+# PVP - port to VM to port
+# PVVP - port to VM to VM to port
+# EXT - external chain used only for running traffic and checking traffic generator counters,
+# all other parts of chain must be configured manually
+# Can be overriden by --service-chain
+# Total number of service chains, every chain has own traffic stream
+# Can be overriden by --service-chain-count
+# Total number of traffic flows for all chains and directions generated by the traffic generator.
+# Minimum is '2 * service_chain_count', it is automatically adjusted if too small
+# value was configured. Must be even.
+# Every flow has packets with different IPs in headers
+# Can be overriden by --flow-count
+# Used by PVVP chain to spawn VMs on different compute nodes
+# Can be overriden by --inter-node
+# set to true if service chains should use SRIOV
+# This requires SRIOV to be available on compute nodes
+# Skip interfaces config on EXT service chain
+# Can be overriden by --no-int-config
+# Resources created by NFVbench will not be removed
+# Can be overriden by --no-cleanup
+# Configuration for traffic generator
+ # Name of the traffic generator, only for informational purposes
+ host_name: 'nfvbench_tg'
+ # this is the default traffic generator profile to use
+ # the name must be defined under generator_profile
+ # you can override the traffic generator to use using the
+ # -g or --traffic-gen option at the command line
+ default_profile: trex-local
+ # IP addresses for L3 traffic.
+ # All of the IPs are used as base for IP sequence computed based on chain or flow count.
+ # `ip_addrs` base IPs used as src and dst in packet header, quantity depends on flow count
+ # `ip_addrs_step`: step for generating IP sequence. Use "random" for random patterns, default is 0.0.0.1.
+ # `tg_gateway_ip_addrs` base IPs for traffic generator ports, quantity depends on chain count
+ # `tg_gateway_ip_addrs__step`: step for generating traffic generator gateway sequences. default is 0.0.0.1
+ # `gateway_ip_addrs`: base IPs of router gateways on both networks, quantity depends on chain count
+ # `gateway_ip_addrs_step`: step for generating router gateway sequences. default is 0.0.0.1
+ ip_addrs: ['10.0.0.0/8', '126.96.36.199/8']
+ ip_addrs_step: 0.0.0.1
+ tg_gateway_ip_addrs: ['188.8.131.52', '184.108.40.206']
+ tg_gateway_ip_addrs_step: 0.0.0.1
+ gateway_ip_addrs: ['220.127.116.11', '18.104.22.168']
+ gateway_ip_addrs_step: 0.0.0.1
+ # Traffic Generator Profiles
+ # In case you have multiple testbeds or traffic generators,
+ # you can define one traffic generator profile per testbed/traffic generator.
+ # Generator profiles are listed in the following format:
+ # `name`: Traffic generator profile name (use a unique name, no space or special character)
+ # `tool`: Traffic generator tool to be used (currently supported is `TRex`).
+ # `ip`: IP address of the traffic generator.
+ # `cores`: Specify the number of cores for TRex traffic generator. ONLY applies to trex-local.
+ # `interfaces`: Configuration of traffic generator interfaces.
+ # `interfaces.port`: The port of the traffic generator to be used (leave as 0 and 1 resp.)
+ # `interfaces.switch_port`: Leave empty (reserved for advanced use cases)
+ # `interfaces.pci`: The PCI address of the intel NIC interface associated to this port
+ # `intf_speed`: The speed of the interfaces used by the traffic generator (per direction).
+ - name: trex-local
+ tool: TRex
+ ip: 127.0.0.1
+ cores: 3
+ - port: 0
+ - port: 1
+ intf_speed: 10Gbps
+# These variables are not likely to be changed
+# The openrc file
+# General retry count
+# General poll period
+# name of the loop VM
+# Default names, subnets and CIDRs for internal networks used by the script.
+# If a network with given name already exists it will be reused.
+# Otherwise a new internal network will be created with that name, subnet and CIDR.
+ # Required only when segmentation_id specified
+ name: 'nfvbench-net0'
+ subnet: 'nfvbench-subnet0'
+ cidr: '192.168.1.0/24'
+ network_type: 'vlan'
+ name: 'nfvbench-net1'
+ subnet: 'nfvbench-subnet1'
+ cidr: '192.168.2.0/24'
+ network_type: 'vlan'
+ name: 'nfvbench-net2'
+ subnet: 'nfvbench-subnet2'
+ cidr: '192.168.3.0/24'
+ network_type: 'vlan'
+# EXT chain only. Names of edge networks which will be used to send traffic via traffic generator.
+ left: 'nfvbench-net0'
+ right: 'nfvbench-net1'
+# Use 'true' to enable VLAN tagging of packets coming from traffic generator
+# Leave empty if VLAN tagging is enabled on switch or if you want to hook directly to a NIC
+# Else by default is set to true (which is the nominal use case with TOR and trunk mode to Trex)
+# Specify only when you want to override VLAN IDs used for tagging with own values (exactly 2).
+# Default behavior of VLAN tagging is to retrieve VLAN IDs from OpenStack networks provided above.
+# In case of VxLAN this setting is ignored and only vtep_vlan from traffic generator profile is used.
+# Example: [1998, 1999]
+# Used only with EXT chain. MAC addresses of traffic generator ports are used as destination
+# if 'no_arp' is set to 'true'. Otherwise ARP requests are sent to find out destination MAC addresses.
+# Traffic Profiles
+# You can add here more profiles as needed
+# `l2frame_size` can be specified in any none zero integer value to represent the size in bytes
+# of the L2 frame, or "IMIX" to represent the standard 3-packet size mixed sequence (IMIX1).
+ - name: traffic_profile_64B
+ l2frame_size: ['64']
+ - name: traffic_profile_IMIX
+ l2frame_size: ['IMIX']
+ - name: traffic_profile_1518B
+ l2frame_size: ['1518']
+ - name: traffic_profile_3sizes
+ l2frame_size: ['64', 'IMIX', '1518']
+# Traffic Configuration
+# bidirectional: to have traffic generated from both direction, set bidirectional to true
+# profile: must be one of the profiles defined in traffic_profile
+# The traffic profile can be overriden with the options --frame-size and --uni-dir
+ bidirectional: true
+ profile: traffic_profile_64B
+# Check config and connectivity only - do not generate traffic
+# Can be overriden by --no-traffic
+# Do not reset tx/rx counters prior to running
+# Can be overriden by --no-reset
+# Test configuration
+# The rate pps for traffic going in reverse direction in case of unidirectional flow. Default to 1.
+# The rate specifies if NFVbench should determine the NDR/PDR
+# or if NFVbench should just generate traffic at a given fixed rate
+# for a given duration (called "single run" mode)
+# Supported rate format:
+# NDR/PDR test: `ndr`, `pdr`, `ndr_pdr` (default)
+# Or for single run mode:
+# Packet per second: pps (e.g. `50pps`)
+# Bits per second: bps, kbps, Mbps, etc (e.g. `1Gbps`, `1000bps`)
+# Load percentage: % (e.g. `50%`)
+# Can be overridden by --rate
+# Default run duration (single run at given rate only)
+# Can be overridden by --duration
+# Interval between intermediate reports when interval reporting is enabled
+# Can be overridden by --interval
+# NDR / PDR configuration ZZ
+ # Drop rates represent the ratio of dropped packet to the total number of packets sent.
+ # Values provided here are percentages. A value of 0.01 means that at most 0.01% of all
+ # packets sent are dropped (or 1 packet every 10,000 packets sent)
+ # No Drop Rate in percentage; Default to 0.001%
+ NDR: 0.001
+ # Partial Drop Rate in percentage; NDR should always be less than PDR
+ PDR: 0.1
+ # The accuracy of NDR and PDR load percentiles; The actual load percentile that match NDR
+ # or PDR should be within `load_epsilon` difference than the one calculated.
+ load_epsilon: 0.1
+# Location where to store results in a JSON format. Must be container specific path.
+# Can be overriden by --json
+# Location where to store results in the NFVbench standard JSON format:
+# Example: PVP-1-10-64-IMIX.json
+# Must be container specific path.
+# Can be overriden by --std-json
+# Prints debug messages (verbose mode)
+# Can be overriden by --debug
+# Module and class name of factory which will be used to provide classes dynamically for other components.
+factory_class: 'BasicFactory' \ No newline at end of file