aboutsummaryrefslogtreecommitdiffstats
path: root/nfvbenchvm/dib/elements/nfvbenchvm/static
diff options
context:
space:
mode:
Diffstat (limited to 'nfvbenchvm/dib/elements/nfvbenchvm/static')
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf1
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf1
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/openstack/clouds.yaml1
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh1
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local110
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator122
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm298
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service12
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh258
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf25
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh51
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf4
12 files changed, 773 insertions, 111 deletions
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf
new file mode 100644
index 0000000..f32633f
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf
@@ -0,0 +1 @@
+options vfio enable_unsafe_noiommu_mode=1
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf
new file mode 100644
index 0000000..7ce4214
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf
@@ -0,0 +1 @@
+vfio-pci
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/openstack/clouds.yaml b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/openstack/clouds.yaml
new file mode 100644
index 0000000..cb1130f
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/openstack/clouds.yaml
@@ -0,0 +1 @@
+# clouds.yaml file \ No newline at end of file
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh
new file mode 100644
index 0000000..a9bf588
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh
@@ -0,0 +1 @@
+#!/bin/bash
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
deleted file mode 100644
index caf3142..0000000
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-
-touch /var/lock/subsys/local
-
-# Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds
-NFVBENCH_CONF=/etc/nfvbenchvm.conf
-retry=30
-until [ $retry -eq 0 ]; do
- if [ -f $NFVBENCH_CONF ]; then break; fi
- retry=$[$retry-1]
- sleep 2
-done
-if [ ! -f $NFVBENCH_CONF ]; then
- exit 0
-fi
-
-# Parse and obtain all configurations
-echo "Generating configurations for forwarder..."
-eval $(cat $NFVBENCH_CONF)
-touch /nfvbench_configured.flag
-NICS=`lspci -D | grep Ethernet | cut -d' ' -f1 | xargs`
-PCI_ADDRESS_1=`echo $NICS | awk '{ print $1 }'`
-PCI_ADDRESS_2=`echo $NICS | awk '{ print $2 }'`
-CPU_CORES=`grep -c ^processor /proc/cpuinfo`
-CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc`
-WORKER_CORES=`expr $CPU_CORES - 1`
-
-# CPU isolation optimizations
-echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
-echo 1 > /sys/devices/virtual/workqueue/cpumask
-echo 1 > /proc/irq/default_smp_affinity
-for irq in `ls /proc/irq/`; do
- echo 1 > /proc/irq/$irq/smp_affinity
-done
-tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate
-
-# Sometimes the interfaces on the loopback VM will use different drivers, e.g.
-# one from vswitch which is virtio based, one is from SRIOV VF. In this case,
-# we have to make sure the forwarder uses them in the right order, which is
-# especially important if the VM is in a PVVP chain.
-SWAP_FLAG=0
-if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
- NET_PATH=/sys/class/net
- EXP_INTF_1=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC1" $NET_PATH/$f/address; then echo $f; break; fi; done)
- EXP_PCI_ADDRESS_1=$(ethtool -i $EXP_INTF_1 | grep "bus-info" | awk -F' ' '{ print $2 }')
- EXP_INTF_2=$(for f in $(ls $NET_PATH/); do if grep -q "$INTF_MAC2" $NET_PATH/$f/address; then echo $f; break; fi; done)
- EXP_PCI_ADDRESS_2=$(ethtool -i $EXP_INTF_2 | grep "bus-info" | awk -F' ' '{ print $2 }')
- if [ "$PCI_ADDRESS_1" == "$EXP_PCI_ADDRESS_2" ] && [ "$PCI_ADDRESS_2" == "$EXP_PCI_ADDRESS_1" ]; then
- # Interfaces are not coming in the expected order:
- # (1) Swap the traffic generator MAC in the case of testpmd;
- # (2) Swap the interface configs in the case of VPP;
- SWAP_FLAG=1
- fi
-fi
-
-# Configure the forwarder
-if [ -z "`lsmod | grep igb_uio`" ]; then
- modprobe uio
- insmod /dpdk/igb_uio.ko
-fi
-if [ "$FORWARDER" == "testpmd" ]; then
- echo "Configuring testpmd..."
- if [ $SWAP_FLAG -eq 1 ]; then
- TEMP=$TG_MAC1; TG_MAC1=$TG_MAC2; TG_MAC2=$TEMP
- fi
- # Binding ports to DPDK
- /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
- /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
- screen -dmSL testpmd /dpdk/testpmd \
- -c $CPU_MASKS \
- -n 4 \
- -- \
- --burst=32 \
- --txd=256 \
- --rxd=1024 \
- --eth-peer=0,$TG_MAC1 \
- --eth-peer=1,$TG_MAC2 \
- --forward-mode=mac \
- --nb-cores=$WORKER_CORES \
- --max-pkt-len=9000 \
- --cmdline-file=/dpdk/testpmd_cmd.txt
-else
- echo "Configuring vpp..."
- cp /vpp/startup.conf /etc/vpp/startup.conf
- cp /vpp/vm.conf /etc/vpp/vm.conf
-
- sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
- sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
- sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
- service vpp start
- sleep 10
-
- INTFS=`vppctl show int | grep Ethernet | xargs`
- INTF_1=`echo $INTFS | awk '{ print $1 }'`
- INTF_2=`echo $INTFS | awk '{ print $4 }'`
- if [ $SWAP_FLAG -eq 1 ]; then
- TEMP=$INTF_1; INTF_1=$INTF_2; INTF_2=$TEMP
- fi
- sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
- sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
- service vpp restart
-fi
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator
new file mode 100644
index 0000000..9ac23a3
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+touch /var/lock/subsys/local
+
+# Waiting for cloud-init to generate $NFVBENCH_CONF, retry 60 seconds
+NFVBENCH_CONF=/etc/nfvbenchvm.conf
+retry=30
+until [ $retry -eq 0 ]; do
+ if [ -f $NFVBENCH_CONF ]; then break; fi
+ retry=$[$retry-1]
+ sleep 2
+done
+if [ ! -f $NFVBENCH_CONF ]; then
+ exit 0
+fi
+
+# Parse and obtain all configurations
+echo "Generating configurations for NFVbench and TRex..."
+eval $(cat $NFVBENCH_CONF)
+touch /nfvbench_configured.flag
+
+# Add DNS entry
+if [ $DNS_SERVERS ]; then
+ IFS="," read -a dns <<< $DNS_SERVERS
+ for d in "${dns[@]}"; do
+ echo "nameserver $d" >> /etc/resolv.conf
+ done
+fi
+
+# CPU isolation optimizations
+echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
+echo 1 > /sys/devices/virtual/workqueue/cpumask
+echo 1 > /proc/irq/default_smp_affinity
+for irq in `ls /proc/irq/`; do
+ if [ -f /proc/irq/$irq/smp_affinity ]; then
+ echo 1 > /proc/irq/$irq/smp_affinity
+ fi
+done
+
+NET_PATH=/sys/class/net
+
+get_eth_port() {
+ # device mapping for CentOS Linux 7:
+ # lspci:
+ # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # /sys/class/net:
+ # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+ # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+ mac=$1
+ for f in $(ls $NET_PATH/); do
+ if grep -q "$mac" $NET_PATH/$f/address; then
+ eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8)
+ # some virtual interfaces match on MAC and do not have a PCI address
+ if [ "$eth_port" -a "$eth_port" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac"
+ break
+ else
+ eth_port=""
+ fi
+ fi;
+ done
+ if [ -z "$eth_port" ]; then
+ echo "ERROR: Cannot find eth port for MAC $mac" >&2
+ logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac"
+ return 1
+ fi
+ echo $eth_port
+ return 0
+}
+
+# Set VM MANAGEMENT port up and running
+if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then
+ if [ $INTF_MAC_MGMT ]; then
+ ETH_PORT=$(get_eth_port $INTF_MAC_MGMT)
+ elif [ "$CLOUD_DETAIL" ] && [ "$PORT_MGMT_NAME" ]; then
+ $INTF_MAC_MGMT=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $PORT_MGMT_NAME | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ ETH_PORT=$(get_eth_port $INTF_MAC_MGMT)
+ else
+ ETH_PORT=""
+ fi
+ if [ -z "$ETH_PORT" ]; then
+ echo "ERROR: Cannot find eth port for management port" >&2
+ logger "NFVBENCHVM ERROR: Cannot find eth port for management port"
+ return 1
+ fi
+
+ # By default, configure the MTU of the management interface to the
+ # conservative value of 1500: this will reduce the risk to get an
+ # unmanageable VM in some setups.
+ #
+ # To set the MTU to a different value, configure the INTF_MGMT_MTU variable
+ # in /etc/nfvbenchvm.conf. If INTF_MGMT_MTU is set to the special value
+ # "auto", the MTU will not be configured and it will keep the value set by
+ # the hypervisor ("legacy" nfvbenchvm behavior). If INTF_MGMT_MTU is unset,
+ # the MTU will be set to 1500. In other cases, the MTU will be set to the
+ # value of INTF_MGMT_MTU.
+ #
+ if [[ -z "$INTF_MGMT_MTU" ]]; then
+ ip link set $ETH_PORT mtu 1500
+ elif [[ "$INTF_MGMT_MTU" != "auto" ]]; then
+ ip link set $ETH_PORT mtu $INTF_MGMT_MTU
+ fi
+
+ ip addr add $INTF_MGMT_CIDR dev $ETH_PORT
+ ip link set $ETH_PORT up
+ ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT
+else
+ echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF"
+fi
+
+/nfvbench/configure-nfvbench.sh
+
+if [ $ACTION ]; then
+ /nfvbench/start-nfvbench.sh $ACTION
+else
+ /nfvbench/start-nfvbench.sh
+fi
+
+exit 0
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm
new file mode 100644
index 0000000..181ff2a
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm
@@ -0,0 +1,298 @@
+#!/bin/bash
+
+touch /var/lock/subsys/local
+
+# Waiting for cloud-init to generate $NFVBENCH_CONF, retry 60 seconds
+NFVBENCH_CONF=/etc/nfvbenchvm.conf
+retry=30
+until [ $retry -eq 0 ]; do
+ if [ -f $NFVBENCH_CONF ]; then break; fi
+ retry=$[$retry-1]
+ sleep 2
+done
+if [ ! -f $NFVBENCH_CONF ]; then
+ exit 0
+fi
+
+# Parse and obtain all configurations
+echo "Generating configurations for forwarder..."
+eval $(cat $NFVBENCH_CONF)
+touch /nfvbench_configured.flag
+
+# WE assume there are at least 2 cores available for the VM
+CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
+
+# We need at least 1 admin core.
+if [ $CPU_CORES -le 2 ]; then
+ ADMIN_CORES=1
+else
+ # If the number of cores is even we
+ # reserve 2 cores for admin (second being idle) so the number of
+ # workers is either 1 (if CPU_CORES is 2) or always even
+ if (( $CPU_CORES % 2 )); then
+ ADMIN_CORES=1
+ else
+ ADMIN_CORES=2
+ fi
+fi
+# 2 vcpus: AW (core 0: Admin, core 1: Worker)
+# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker)
+# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused)
+# 5 vcpus: AWWWW
+# 6 vcpus: AWWWWU
+WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES)
+# worker cores are all cores except the admin core (core 0) and the eventual unused core
+# AW -> 1
+# AWW -> 1,2
+# AWWU -> 1,2
+WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES)
+# always use all cores
+CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc)
+
+logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)"
+
+# CPU isolation optimizations
+echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
+echo 1 > /sys/devices/virtual/workqueue/cpumask
+echo 1 > /proc/irq/default_smp_affinity
+for irq in `ls /proc/irq/`; do
+ if [ -f /proc/irq/$irq/smp_affinity ]; then
+ echo 1 > /proc/irq/$irq/smp_affinity
+ fi
+done
+
+# Isolate all cores that are reserved for workers
+tuna -c $WORKER_CORE_LIST --isolate
+
+NET_PATH=/sys/class/net
+
+get_pci_address() {
+ # device mapping for CentOS Linux 7:
+ # lspci:
+ # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # /sys/class/net:
+ # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+ # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+ mac=$1
+ for f in $(ls $NET_PATH/); do
+ if grep -q "$mac" $NET_PATH/$f/address; then
+ pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
+ # some virtual interfaces match on MAC and do not have a PCI address
+ if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac"
+ break
+ else
+ pci_addr=""
+ fi
+ fi;
+ done
+ if [ -z "$pci_addr" ]; then
+ echo "ERROR: Cannot find pci address for MAC $mac" >&2
+ logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
+ return 1
+ fi
+ echo $pci_addr
+ return 0
+}
+
+get_eth_port() {
+ # device mapping for CentOS Linux 7:
+ # lspci:
+ # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # /sys/class/net:
+ # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+ # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+ mac=$1
+ for f in $(ls $NET_PATH/); do
+ if grep -q "$mac" $NET_PATH/$f/address; then
+ eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8)
+ # some virtual interfaces match on MAC and do not have a PCI address
+ if [ "$eth_port" -a "$eth_port" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac"
+ break
+ else
+ eth_port=""
+ fi
+ fi;
+ done
+ if [ -z "$eth_port" ]; then
+ echo "ERROR: Cannot find eth port for MAC $mac" >&2
+ logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac"
+ return 1
+ fi
+ echo $eth_port
+ return 0
+}
+
+# Set VM MANAGEMENT port up and running
+if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then
+ if [ $INTF_MAC_MGMT ]; then
+ ETH_PORT=$(get_eth_port $INTF_MAC_MGMT)
+ else
+ ETH_PORT="eth0"
+ fi
+
+ # By default, configure the MTU of the management interface to the
+ # conservative value of 1500: this will reduce the risk to get an
+ # unmanageable VM in some setups.
+ #
+ # To set the MTU to a different value, configure the INTF_MGMT_MTU variable
+ # in /etc/nfvbenchvm.conf. If INTF_MGMT_MTU is set to the special value
+ # "auto", the MTU will not be configured and it will keep the value set by
+ # the hypervisor ("legacy" nfvbenchvm behavior). If INTF_MGMT_MTU is unset,
+ # the MTU will be set to 1500. In other cases, the MTU will be set to the
+ # value of INTF_MGMT_MTU.
+ #
+ if [[ -z "$INTF_MGMT_MTU" ]]; then
+ ip link set $ETH_PORT mtu 1500
+ elif [[ "$INTF_MGMT_MTU" != "auto" ]]; then
+ ip link set $ETH_PORT mtu $INTF_MGMT_MTU
+ fi
+
+ ip addr add $INTF_MGMT_CIDR dev $ETH_PORT
+ ip link set $ETH_PORT up
+ ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT
+else
+ echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF"
+fi
+
+# Set dynamically interfaces mac values, if VM is spawn without using NFVBench
+# and management interface is used on eth0
+if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then
+ INTF_MAC1=$(ip l show eth1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ INTF_MAC2=$(ip l show eth2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+fi
+
+
+# Sometimes the interfaces on the loopback VM will use different drivers, e.g.
+# one from vswitch which is virtio based, one is from SRIOV VF. In this case,
+# we have to make sure the forwarder uses them in the right order, which is
+# especially important if the VM is in a PVVP chain.
+if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
+ PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
+ PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
+else
+ echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
+ logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF"
+fi
+
+wait_vpp_service() {
+ # Wait for at most wait_max=$1 seconds until VPP service is ready. Exit
+ # with code 1 if timeout is reached.
+ #
+ # Because VPP systemd unit has Type=simple, systemctl will report the
+ # service to be active has soon as it is forked. This does not mean that
+ # the service is ready, and actually it takes some times before vppctl can
+ # succesfully connect to VPP client socket /run/vpp/cli.sock.
+ local wait_max=$1
+
+ local wait_time=0
+ while ! vppctl show int; do
+ if [[ $wait_time -ge $wait_max ]]; then
+ # Log error to both system log and standard error output
+ logger -s "NFVBENCHVM ERROR: VPP service still not ready after $wait_max seconds." \
+ "Exiting $(basename $0)."
+ exit 1
+ fi
+ sleep 1
+ wait_time=$(( wait_time + 1 ))
+ done
+}
+
+if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
+ logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
+ logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
+ # active uio_pci_generic driver
+ modprobe uio_pci_generic
+ # Configure the forwarder
+ if [ "$FORWARDER" == "testpmd" ]; then
+ echo "Configuring testpmd..."
+ mkdir /dpdk
+ echo "set promisc all off" > /dpdk/testpmd_cmd.txt
+ # Binding ports to DPDK VFIO or UIO
+ dpdk-devbind -b vfio-pci $PCI_ADDRESS_1 || dpdk-devbind -b uio_pci_generic $PCI_ADDRESS_1
+ dpdk-devbind -b vfio-pci $PCI_ADDRESS_2 || dpdk-devbind -b uio_pci_generic $PCI_ADDRESS_2
+ screen -dmSL testpmd testpmd \
+ -c $CORE_MASK \
+ -n 4 \
+ -- \
+ --nb-ports=2 \
+ --burst=32 \
+ --txd=256 \
+ --rxd=1024 \
+ --eth-peer=0,$TG_MAC1 \
+ --eth-peer=1,$TG_MAC2 \
+ --forward-mode=mac \
+ --nb-cores=$WORKER_CORES \
+ --txq=$VIF_MQ_SIZE \
+ --rxq=$VIF_MQ_SIZE \
+ --max-pkt-len=9000 \
+ --cmdline-file=/dpdk/testpmd_cmd.txt
+ echo "testpmd running in screen 'testpmd'"
+ logger "NFVBENCHVM: testpmd running in screen 'testpmd'"
+ elif [ "$FORWARDER" == "vpp" ]; then
+ echo "Configuring vpp..."
+ cp /vpp/startup.conf /etc/vpp/startup.conf
+ cp /vpp/vm.conf /etc/vpp/vm.conf
+
+ sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
+ sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
+ sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
+ sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf
+ sed -i "s/{{NUM_MBUFS}}/${NUM_MBUFS}/g" /etc/vpp/startup.conf
+ systemctl start vpp
+ # Wait until VPP service is ready for at most 30 seconds
+ wait_vpp_service 30
+
+ VPPCTL_OUTPUT=$(vppctl show int)
+ INTFS=$(echo "$VPPCTL_OUTPUT" | grep Ethernet | xargs)
+ INTF_1=$(echo $INTFS | awk '{ print $1 }')
+ INTF_2=$(echo $INTFS | awk '{ print $4 }')
+ if [[ -z "$INTF_1" ]] || [[ -z "$INTF_2" ]]; then
+ # Log error to both system log and standard error output
+ logger -s "NFVBENCHVM DEBUG: \"vppctl show int\" output:"
+ logger -s "NFVBENCHVM DEBUG: $VPPCTL_OUTPUT"
+ logger -s "NFVBENCHVM ERROR: vppctl does not show the two Ethernet interfaces we expect." \
+ "Exiting $(basename $0)."
+ exit 1
+ fi
+ if [ -z "${TG_MAC1}" ]; then
+ # vm.conf does not support lines commented with #, so
+ # we need to remove the line to set the static ARP entry.
+ sed -i "/{{TG_MAC1}}/d" /etc/vpp/vm.conf
+ else
+ sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf
+ fi
+ if [ -z "${TG_MAC2}" ]; then
+ sed -i "/{{TG_MAC2}}/d" /etc/vpp/vm.conf
+ else
+ sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf
+ fi
+ sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf
+ sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf
+ sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf
+ systemctl restart vpp
+ logger "NFVBENCHVM: vpp service restarted"
+ else
+ echo "ERROR: Unknown forwarder value. Accepted values: testpmd or vpp"
+ exit 1
+ fi
+else
+ echo "ERROR: Cannot find PCI Address from MAC"
+ echo "$INTF_MAC1: $PCI_ADDRESS_1"
+ echo "$INTF_MAC2: $PCI_ADDRESS_2"
+ logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
+fi
+
+exit 0
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service
new file mode 100644
index 0000000..e952070
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=nfvbench service
+After=network.target
+
+[Service]
+Type=forking
+User=root
+RemainAfterExit=yes
+ExecStart=/bin/bash -a -c "source /etc/profile.d/nfvbench.sh && /usr/bin/screen -dmSL nfvbench /usr/local/bin/nfvbench -c /etc/nfvbench/nfvbench.conf --server"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh
new file mode 100644
index 0000000..3bf1d8d
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh
@@ -0,0 +1,258 @@
+#!/bin/bash
+
+set -e
+
+NFVBENCH_CONF=/etc/nfvbenchvm.conf
+E2E_CFG=/etc/nfvbench/e2e.cfg
+LOOPBACK_CFG=/etc/nfvbench/loopback.cfg
+NFVBENCH_CFG=/etc/nfvbench/nfvbench.cfg
+
+# Parse and obtain all configurations
+eval $(cat $NFVBENCH_CONF)
+
+# WE assume there are at least 2 cores available for the VM
+CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
+
+# We need at least 2 admin cores (one master and another latency).
+if [ $CPU_CORES -le 3 ]; then
+ ADMIN_CORES=2
+else
+ # If the number of cores is even we
+ # reserve 3 cores for admin (third being idle) so the number of
+ # workers is either 1 (if CPU_CORES is 4) or always even
+ if (( $CPU_CORES % 2 )); then
+ ADMIN_CORES=2
+ else
+ ADMIN_CORES=3
+ fi
+fi
+# 2 vcpus: AW (core 0: Admin, core 1: Worker)
+# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker)
+# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused)
+# 5 vcpus: AWWWW
+# 6 vcpus: AWWWWU
+WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES)
+# worker cores are all cores except the admin core (core 0) and the eventual unused core
+# AW -> 1
+# AWW -> 1,2
+# AWWU -> 1,2
+WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES)
+# always use all cores
+CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc)
+
+logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)"
+
+# Isolate all cores that are reserved for workers
+tuna -c $WORKER_CORE_LIST --isolate
+
+NET_PATH=/sys/class/net
+
+get_pci_address() {
+ # device mapping for CentOS Linux 7:
+ # lspci:
+ # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # /sys/class/net:
+ # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+ # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+ mac=$1
+ for f in $(ls $NET_PATH/); do
+ if grep -q "$mac" $NET_PATH/$f/address; then
+ pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
+ # some virtual interfaces match on MAC and do not have a PCI address
+ if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac"
+ break
+ else
+ pci_addr=""
+ fi
+ fi;
+ done
+ if [ -z "$pci_addr" ]; then
+ echo "ERROR: Cannot find pci address for MAC $mac" >&2
+ logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac"
+ return 1
+ fi
+ echo $pci_addr
+ return 0
+}
+
+get_interfaces_mac_values(){
+ # Set dynamically interfaces mac values, if VM is spawn with SRIOV PF ports
+ # and openstack API are accessible
+ if [ -z "$LOOPBACK_INTF_MAC1" ] && [ -z "$LOOPBACK_INTF_MAC2" ]; then
+ if [ "$CLOUD_DETAIL" ] && [ "$LOOPBACK_PORT_NAME1" ] && [ "$LOOPBACK_PORT_NAME2" ]; then
+ LOOPBACK_INTF_MAC1=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $LOOPBACK_PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ LOOPBACK_INTF_MAC2=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $LOOPBACK_PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ fi
+ fi
+ if [ -z "$E2E_INTF_MAC1" ] && [ -z "$E2E_INTF_MAC2" ]; then
+ if [ "$CLOUD_DETAIL" ] && [ "$E2E_PORT_NAME1" ] && [ "$E2E_PORT_NAME2" ]; then
+ E2E_INTF_MAC1=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $E2E_PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ E2E_INTF_MAC2=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $E2E_PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ fi
+ fi
+ if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then
+ if [ "$CLOUD_DETAIL" ] && [ "$PORT_NAME1" ] && [ "$PORT_NAME2" ]; then
+ INTF_MAC1=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ INTF_MAC2=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ fi
+ fi
+}
+
+get_interfaces_pci_address(){
+ # Sometimes the interfaces on the generator VM will use different physical networks. In this case,
+ # we have to make sure the generator uses them in the right order.
+ if [ $LOOPBACK_INTF_MAC1 ] && [ $LOOPBACK_INTF_MAC2 ]; then
+ LOOPBACK_PCI_ADDRESS_1=$(get_pci_address $LOOPBACK_INTF_MAC1)
+ LOOPBACK_PCI_ADDRESS_2=$(get_pci_address $LOOPBACK_INTF_MAC2)
+
+ echo LOOPBACK_PCI_ADDRESS_1=$LOOPBACK_PCI_ADDRESS_1 >> $NFVBENCH_CONF
+ echo LOOPBACK_PCI_ADDRESS_2=$LOOPBACK_PCI_ADDRESS_2 >> $NFVBENCH_CONF
+ fi
+ if [ $E2E_INTF_MAC1 ] && [ $E2E_INTF_MAC2 ]; then
+ E2E_PCI_ADDRESS_1=$(get_pci_address $E2E_INTF_MAC1)
+ E2E_PCI_ADDRESS_2=$(get_pci_address $E2E_INTF_MAC2)
+
+ echo E2E_PCI_ADDRESS_1=$E2E_PCI_ADDRESS_1 >> $NFVBENCH_CONF
+ echo E2E_PCI_ADDRESS_2=$E2E_PCI_ADDRESS_2 >> $NFVBENCH_CONF
+ fi
+ if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then
+ PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1)
+ PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2)
+
+ echo PCI_ADDRESS_1=$PCI_ADDRESS_1 >> $NFVBENCH_CONF
+ echo PCI_ADDRESS_2=$PCI_ADDRESS_2 >> $NFVBENCH_CONF
+ fi
+}
+
+bind_interfaces(){
+ if [ $LOOPBACK_PCI_ADDRESS_1 ]; then
+ dpdk-devbind -b vfio-pci $LOOPBACK_PCI_ADDRESS_1
+ fi
+ if [ $LOOPBACK_PCI_ADDRESS_2 ]; then
+ dpdk-devbind -b vfio-pci $LOOPBACK_PCI_ADDRESS_2
+ fi
+ if [ $E2E_PCI_ADDRESS_1 ]; then
+ dpdk-devbind -b vfio-pci $E2E_PCI_ADDRESS_1
+ fi
+ if [ $E2E_PCI_ADDRESS_2 ]; then
+ dpdk-devbind -b vfio-pci $E2E_PCI_ADDRESS_2
+ fi
+ if [ $PCI_ADDRESS_1 ]; then
+ dpdk-devbind -b vfio-pci $PCI_ADDRESS_1
+ fi
+ if [ $PCI_ADDRESS_2 ]; then
+ dpdk-devbind -b vfio-pci $PCI_ADDRESS_2
+ fi
+}
+
+configure_loopback_mode(){
+ if [ $LOOPBACK_PCI_ADDRESS_1 ] && [ $LOOPBACK_PCI_ADDRESS_2 ]; then
+ logger "NFVBENCHVM: loopback - Using pci $LOOPBACK_PCI_ADDRESS_1 ($LOOPBACK_INTF_MAC1)"
+ logger "NFVBENCHVM: loopback - Using pci $LOOPBACK_PCI_ADDRESS_2 ($LOOPBACK_INTF_MAC2)"
+
+ echo "Configuring nfvbench and TRex for loopback mode..."
+ # execute env script to avoid no ENV in screen and a nfvbench error
+ source /etc/profile.d/nfvbench.sh
+ sed -i "s/{{PCI_ADDRESS_1}}/$LOOPBACK_PCI_ADDRESS_1/g" /etc/nfvbench/loopback.cfg
+ sed -i "s/{{PCI_ADDRESS_2}}/$LOOPBACK_PCI_ADDRESS_2/g" /etc/nfvbench/loopback.cfg
+ sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/loopback.cfg
+ CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES)))
+ sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/loopback.cfg
+ else
+ echo "ERROR: Cannot find PCI Address from MAC"
+ echo "$LOOPBACK_INTF_MAC1: $LOOPBACK_PCI_ADDRESS_1"
+ echo "$LOOPBACK_INTF_MAC2: $LOOPBACK_PCI_ADDRESS_2"
+ logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC (loopback mode)"
+ fi
+
+}
+
+configure_e2e_mode(){
+ if [ $E2E_PCI_ADDRESS_1 ] && [ $E2E_PCI_ADDRESS_2 ]; then
+ logger "NFVBENCHVM: e2e - Using pci $E2E_PCI_ADDRESS_1 ($E2E_INTF_MAC1)"
+ logger "NFVBENCHVM: e2e - Using pci $E2E_PCI_ADDRESS_2 ($E2E_INTF_MAC2)"
+
+ echo "Configuring nfvbench and TRex for e2e mode..."
+ # execute env script to avoid no ENV in screen and a nfvbench error
+ source /etc/profile.d/nfvbench.sh
+ sed -i "s/{{PCI_ADDRESS_1}}/$E2E_PCI_ADDRESS_1/g" /etc/nfvbench/e2e.cfg
+ sed -i "s/{{PCI_ADDRESS_2}}/$E2E_PCI_ADDRESS_2/g" /etc/nfvbench/e2e.cfg
+ sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/e2e.cfg
+ CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES)))
+ sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/e2e.cfg
+ else
+ echo "ERROR: Cannot find PCI Address from MAC"
+ echo "$E2E_INTF_MAC1: $E2E_PCI_ADDRESS_1"
+ echo "$E2E_INTF_MAC2: $E2E_PCI_ADDRESS_2"
+ logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC (e2e mode)"
+ fi
+}
+
+configure_nfvbench(){
+ if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
+ logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)"
+ logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)"
+
+ echo "Configuring nfvbench and TRex..."
+ # execute env script to avoid no ENV in screen and a nfvbench error
+ source /etc/profile.d/nfvbench.sh
+
+ if [ $DEFAULT ]; then
+ cp /nfvbench/nfvbench.conf /etc/nfvbench/nfvbench.cfg
+ fi
+ sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/nfvbench/nfvbench.cfg
+ sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/nfvbench/nfvbench.cfg
+ sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/nfvbench.cfg
+ CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES)))
+ sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/nfvbench.cfg
+
+ else
+ echo "ERROR: Cannot find PCI Address from MAC"
+ echo "$INTF_MAC1: $PCI_ADDRESS_1"
+ echo "$INTF_MAC2: $PCI_ADDRESS_2"
+ logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
+ fi
+}
+
+# Check if config files are provided by config drive (CLI command) or Ansible script
+# and configure NFVbench accordingly to these files
+if [ -f $E2E_CFG ]; then
+ if [ -z $E2E_PCI_ADDRESS_1 ] && [ -z $E2E_PCI_ADDRESS_2 ]; then
+ get_interfaces_mac_values
+ get_interfaces_pci_address
+ bind_interfaces
+ fi
+ configure_e2e_mode
+fi
+if [ -f $LOOPBACK_CFG ]; then
+ if [ -z $LOOPBACK_PCI_ADDRESS_1 ] && [ -z $LOOPBACK_PCI_ADDRESS_2 ]; then
+ get_interfaces_mac_values
+ get_interfaces_pci_address
+ bind_interfaces
+ fi
+ configure_loopback_mode
+fi
+# if nfvbench.cfg is provided by config drive (CLI command) or Ansible script
+# configure nfvbench using this file otherwise untemplate default config if no file exists
+if [ -f $NFVBENCH_CFG ]; then
+ if [ -z $PCI_ADDRESS_1 ] && [ -z $PCI_ADDRESS_2 ]; then
+ get_interfaces_mac_values
+ get_interfaces_pci_address
+ bind_interfaces
+ fi
+ configure_nfvbench
+elif [ ! -f $E2E_CFG ] && [ ! -f $LOOPBACK_CFG ]; then
+ if [ -z $PCI_ADDRESS_1 ] && [ -z $PCI_ADDRESS_2 ]; then
+ get_interfaces_mac_values
+ get_interfaces_pci_address
+ bind_interfaces
+ fi
+ DEFAULT=true
+ configure_nfvbench
+fi
+
+exit 0 \ No newline at end of file
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf
new file mode 100644
index 0000000..c1ca23e
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf
@@ -0,0 +1,25 @@
+traffic_generator:
+ generator_profile:
+ - name: trex-local
+ tool: TRex
+ ip: 127.0.0.1
+ zmq_pub_port: 4500
+ zmq_rpc_port: 4501
+ software_mode: false
+
+ cores: {{CORES}}
+ platform:
+ master_thread_id: '0'
+ latency_thread_id: '1'
+ dual_if:
+ - socket: 0
+ threads: [{{CORE_THREADS}}]
+
+ interfaces:
+ - port: 0
+ pci: "{{PCI_ADDRESS_1}}"
+ switch:
+ - port: 1
+ pci: "{{PCI_ADDRESS_2}}"
+ switch:
+ intf_speed: \ No newline at end of file
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh
new file mode 100644
index 0000000..1f6fa28
--- /dev/null
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+
+restart_nfvbench_service(){
+ service nfvbench restart
+ echo "NFVbench running in screen 'nfvbench'"
+ logger "NFVBENCHVM: NFVbench running in screen 'nfvbench'"
+}
+
+start_nfvbench(){
+ ln -sfn /etc/nfvbench/nfvbench.cfg /etc/nfvbench/nfvbench.conf
+ restart_nfvbench_service
+}
+
+start_nfvbench_e2e_mode(){
+ ln -sfn /etc/nfvbench/e2e.cfg /etc/nfvbench/nfvbench.conf
+ restart_nfvbench_service
+}
+
+start_nfvbench_loopback_mode(){
+ ln -sfn /etc/nfvbench/loopback.cfg /etc/nfvbench/nfvbench.conf
+ restart_nfvbench_service
+}
+
+usage() {
+ echo "Usage: $0 action"
+ echo "action (optional):"
+ echo "e2e start NFVbench with E2E config file"
+ echo "loopback start NFVbench with loopback config file"
+ echo ""
+ echo "If no action is given NFVbench will start with default config file"
+ exit 1
+}
+
+# ----------------------------------------------------------------------------
+# Parse command line options and configure the script
+# ----------------------------------------------------------------------------
+if [ "$#" -lt 1 ]; then
+ start_nfvbench
+ exit 0
+else
+ if [ $1 = "e2e" ]; then
+ start_nfvbench_e2e_mode
+ exit 0
+ elif [ $1 = "loopback" ]; then
+ start_nfvbench_loopback_mode
+ exit 0
+ else
+ usage
+ fi
+fi
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
index e3f1486..874f6cb 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
@@ -14,11 +14,13 @@ dpdk {
dev default {
num-rx-desc 1024
num-tx-desc 1024
+ num-rx-queues {{VIF_MQ_SIZE}}
}
socket-mem 1024
dev {{PCI_ADDRESS_1}}
dev {{PCI_ADDRESS_2}}
- uio-driver igb_uio
+ uio-driver uio_pci_generic
+ num-mbufs {{NUM_MBUFS}}
}
api-segment {