#!/bin/bash touch /var/lock/subsys/local # Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds NFVBENCH_CONF=/etc/nfvbenchvm.conf retry=30 until [ $retry -eq 0 ]; do if [ -f $NFVBENCH_CONF ]; then break; fi retry=$[$retry-1] sleep 2 done if [ ! -f $NFVBENCH_CONF ]; then exit 0 fi # Parse and obtain all configurations echo "Generating configurations for forwarder..." eval $(cat $NFVBENCH_CONF) touch /nfvbench_configured.flag CPU_CORES=`grep -c ^processor /proc/cpuinfo` CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc` WORKER_CORES=`expr $CPU_CORES - 1` # CPU isolation optimizations echo 1 > /sys/bus/workqueue/devices/writeback/cpumask echo 1 > /sys/devices/virtual/workqueue/cpumask echo 1 > /proc/irq/default_smp_affinity for irq in `ls /proc/irq/`; do if [ -f /proc/irq/$irq/smp_affinity ]; then echo 1 > /proc/irq/$irq/smp_affinity fi done tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate NET_PATH=/sys/class/net get_pci_address() { # device mapping for CentOS Linux 7: # lspci: # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device # /sys/class/net: # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 mac=$1 for f in $(ls $NET_PATH/); do if grep -q "$mac" $NET_PATH/$f/address; then pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5) # some virtual interfaces match on MAC and do not have a PCI address if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then break else pci_addr="" fi fi; done if [ -z "$pci_addr" ]; then echo "ERROR: Cannot find pci address for MAC $mac" >&2 logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac" return 1 fi echo $pci_addr return 0 } # Sometimes the interfaces on the loopback VM will use different drivers, e.g. # one from vswitch which is virtio based, one is from SRIOV VF. In this case, # we have to make sure the forwarder uses them in the right order, which is # especially important if the VM is in a PVVP chain. if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1) PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2) else echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" fi if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)" logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)" # Configure the forwarder if [ -z "`lsmod | grep igb_uio`" ]; then modprobe uio insmod /dpdk/igb_uio.ko fi if [ "$FORWARDER" == "testpmd" ]; then echo "Configuring testpmd..." # Binding ports to DPDK /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1 /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2 screen -dmSL testpmd /dpdk/testpmd \ -c $CPU_MASKS \ -n 4 \ -- \ --burst=32 \ --txd=256 \ --rxd=1024 \ --eth-peer=0,$TG_MAC1 \ --eth-peer=1,$TG_MAC2 \ --forward-mode=mac \ --nb-cores=$WORKER_CORES \ --max-pkt-len=9000 \ --cmdline-file=/dpdk/testpmd_cmd.txt echo "testpmd running in screen 'testpmd'" logger "NFVBENCHVM: testpmd running in screen 'testpmd'" else echo "Configuring vpp..." cp /vpp/startup.conf /etc/vpp/startup.conf cp /vpp/vm.conf /etc/vpp/vm.conf sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf service vpp start sleep 10 INTFS=`vppctl show int | grep Ethernet | xargs` INTF_1=`echo $INTFS | awk '{ print $1 }'` INTF_2=`echo $INTFS | awk '{ print $4 }'` sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf service vpp restart logger "NFVBENCHVM: vpp service restarted" fi else echo "ERROR: Cannot find PCI Address from MAC" echo "$INTF_MAC1: $PCI_ADDRESS_1" echo "$INTF_MAC2: $PCI_ADDRESS_2" logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC" fi