summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/helper-scripts/rapid
diff options
context:
space:
mode:
authorLuc Provoost <luc.provoost@intel.com>2019-11-20 11:56:52 +0100
committerLuc Provoost <luc.provoost@intel.com>2019-11-26 17:36:30 +0100
commit30c2cccc1cc31ed0552a5e342e44a37bec550680 (patch)
tree05087b72d3b8a6d1a26042c4051bed088239a1a1 /VNFs/DPPD-PROX/helper-scripts/rapid
parent21c3b7f054e34055654a9ca94058deefb51c25e6 (diff)
rapid november release combining changes
-Renamed monitorswap.test into corestats.test. This is a more generic test case that can monitor the core statistics of all PROX instances -New portstats.test which can monitor the port statistics of all PROX instances -Removed the tasks parameter in the [DEFAULT] section of the *.test files. Runrapid.py is now scaning the PROX configuration files to automatically detect the PROX tasks that are being used. -There is now an option to add the PushGateway parameter in the [Default] section of some *.test files. This will push the measurements to the Gateway, so they can be used by tools like Grafana. The logging of the data in a csv file has also been improved. Results can now be found on the screen, in the RUN*.log files, in the *.csv files and are being pushed to the PushGateway. -In the *.test files, replaced the swapcores, irqcores and impaircores by the cores parameter since they are not treated differently by the code. For a generator, we are still using the gencores and latcores parameters since we need to treat them differently in runrapid.py -devbind.sh is removed from the repository since this file is now created by runrapid.py -renamed the rapidVMs.vms file into rapid.vms -image is now built without ssh access for root. Only using centos user. runrapid.py is now using centos user for ssh sessions. PROX tools are strted using sudo -using new DPDK, mb_lib and PROX versions as defined in deploycentostools.sh -installing missing yum-utils package in image -added flowsize parameter to the impairtest -added list of flowsizes to the fixed_rate testing (l3framerate.test) -the irqtest can now loop through multiple Test Machines in stead of only checking the irq of one. Note that we should have the same cores parameter for all Test machines -changes in prox_ctrl.py to support setting the speed of a generator on a combination of mulitple cores and tasks. Also added the multi_port_stats function to support the new portstats.test -Updated version to 19.11.21 -Fixed some power of 2 in the flow table Change-Id: Ia14543bf3195a2b1bd4184b455da6940f1777fc9 Signed-off-by: Luc Provoost <luc.provoost@intel.com>
Diffstat (limited to 'VNFs/DPPD-PROX/helper-scripts/rapid')
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/README7
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/bare.test5
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test11
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/monitorswap.test)9
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh33
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/impair.test6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/irq.test10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test4
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test9
-rw-r--r--[-rwxr-xr-x]VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh)28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid.vms (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/rapidVMs.vms)0
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py597
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test7
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg4
23 files changed, 473 insertions, 316 deletions
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/README b/VNFs/DPPD-PROX/helper-scripts/rapid/README
index 75ba30e3..b198c519 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/README
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/README
@@ -20,6 +20,8 @@ rapid (Rapid Automated Performance Indication for Dataplane)
rapid is a set of files offering an easy way to do a sanity check of the
dataplane performance of an OpenStack or container environment.
+Most of the information below is now available on wiki.opnfv.org/display/SAM/Rapid+scripting
+
In case of OpenStack, copy the files in a directory on a machine that can run the OpenStack CLI
commands and that can reach the networks to connect to the VMs.
@@ -159,12 +161,13 @@ dp_ip = 10.10.10.15
dp_mac = fa:16:3e:69:f3:e7
[ssh]
-key = prox
+key = prox.pem
+user = centos
[Varia]
vim = OpenStack
stack = rapid
-vms = rapidVMs.vms
+vms = rapid.vms
image = rapidVM
image_file = rapidVM.qcow2
dataplane_network = dataplane-network
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test b/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test
index c3f4965f..7991f3b1 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test
@@ -20,7 +20,6 @@ number_of_tests = 2
total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = Generator
@@ -32,7 +31,7 @@ latcores = [3]
[TestM2]
name = Swap
config_file = l2swap.cfg
-swapcores = [1]
+cores = [1]
[BinarySearchParams]
drop_rate_threshold = 0
@@ -52,5 +51,5 @@ warmuptime=2
test=flowsizetest
packetsizes=[64,128]
# the number of flows in the list need to be powers of 2, max 2^20
-# # Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65535, 131072, 262144, 524280, 1048576
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
flows=[512,1]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test
index 0a751d8c..1174f773 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test
@@ -20,7 +20,7 @@ number_of_tests = 2
total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
+#PushGateway=http://192.168.36.1:9091
[TestM1]
name = Generator
@@ -32,7 +32,7 @@ latcores = [3]
[TestM2]
name = Swap
config_file = swap.cfg
-swapcores = [1]
+cores = [1]
[BinarySearchParams]
drop_rate_threshold = 0.1
@@ -50,8 +50,7 @@ warmuptime=2
[test2]
test=flowsizetest
-packetsizes=[64,128]
+packetsizes=[64,128,1024]
# the number of flows in the list need to be powers of 2, max 2^20
-# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65535, 131072, 262144, 524280, 1048576
-flows=[512,1]
-
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[1,512,1048576]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
index 7d66bd39..ad297b44 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
@@ -27,7 +27,7 @@ then
isolated_cores=1-$MAXCOREID*)
echo "Isolated CPU(s) OK, no reboot: $line">>$logfile
modprobe uio
- insmod /root/dpdk/build/kmod/igb_uio.ko
+ insmod /home/centos/dpdk/build/kmod/igb_uio.ko
exit 0
;;
isolated_cores=*)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/monitorswap.test b/VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test
index 76da2347..026a2036 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/monitorswap.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test
@@ -15,17 +15,16 @@
##
[DEFAULT]
-name = MonitorSwap
+name = CoreStatistics
number_of_tests = 1
total_number_of_test_machines = 1
prox_socket = true
-prox_launch_exit = false
-tasks=[0]
+prox_launch_exit = true
[TestM1]
name = Swap
config_file = swap.cfg
-swapcores = [1]
+cores = [1]
[test1]
-test=measureswap
+test=corestats
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
index fc5e97b4..ccb500aa 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
@@ -28,12 +28,11 @@ import re
import logging
from logging.handlers import RotatingFileHandler
from logging import handlers
-from prox_ctrl import prox_ctrl
import ConfigParser
-version="19.6.30"
+version="19.11.21"
stack = "rapid" #Default string for stack. This is not an OpenStack Heat stack, just a group of VMs
-vms = "rapidVMs.vms" #Default string for vms file
+vms = "rapid.vms" #Default string for vms file
key = "prox" # default name for key
image = "rapidVM" # default name for the image
image_file = "rapidVM.qcow2"
@@ -206,6 +205,8 @@ log.debug("Checking dataplane network: " + dataplane_network)
if dataplane_network in Networks:
# If the dataplane already exists, we are assuming that this network is already created before with the proper configuration, hence we do not check if the subnet is created etc...
log.info("Dataplane network (" + dataplane_network + ") already active")
+ subnet = "n/a: was already existing"
+ subnet_cidr = "n/a, was already existing"
else:
log.info('Creating dataplane network ...')
cmd = 'openstack network create '+dataplane_network+' -f value -c status'
@@ -220,6 +221,8 @@ else:
Subnets = subprocess.check_output(cmd , shell=True).decode().strip()
if subnet in Subnets:
log.info("Subnet (" +subnet+ ") already exists")
+ subnet = "n/a, was already existing"
+ subnet_cidr = "n/a, was already existing"
else:
log.info('Creating subnet ...')
cmd = 'openstack subnet create --network ' + dataplane_network + ' --subnet-range ' + subnet_cidr +' --gateway none ' + subnet+' -f value -c name'
@@ -241,6 +244,7 @@ log.debug(cmd)
Images = subprocess.check_output(cmd , shell=True).decode().strip()
if image in Images:
log.info("Image (" + image + ") already available")
+ image_file="Don't know, was already existing"
else:
log.info('Creating image ...')
cmd = 'openstack image create -f value -c status --disk-format qcow2 --container-format bare --public --file ./'+image_file+ ' ' +image
@@ -384,7 +388,8 @@ for vm in range(1, int(total_number_of_VMs)+1):
log.info('%s: (admin IP: %s), (dataplane IP: %s), (dataplane MAC: %s)' % (ServerName[vm-1],vmAdminIP,vmDPIP,vmDPmac))
config.add_section('ssh')
-config.set('ssh', 'key', key)
+config.set('ssh', 'key', key+'.pem')
+config.set('ssh', 'user', 'centos')
config.add_section('Varia')
config.set('Varia', 'VIM', 'OpenStack')
config.set('Varia', 'stack', stack)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
index 2695735c..ddef6448 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
@@ -15,19 +15,22 @@
## limitations under the License.
##
-BUILD_DIR="/opt/openstackrapid"
-COPY_DIR="/home/centos" # Directory where the packer tool has copied some files (e.g. check_prox_system_setup.sh)
-DPDK_VERSION="18.08"
-PROX_COMMIT="c8e9e6bb696363a397b2e718eb4d3e5f38a8ef22"
+BUILD_DIR="/opt/rapid"
+WORK_DIR="/home/centos" # Directory where the packer tool has copied some files (e.g. check_prox_system_setup.sh)
+ # Runtime scripts are assuming ${WORK_DIR} as the directory for PROX. Check the rundir variable in runrapid.py. Should be the same!
+ # This variable is defined in 4 different places and should have the same value: centos.json, deploycentos.sh, check_prox_system_setup.sh and runrapid.py
+DPDK_VERSION="19.05"
+PROX_COMMIT="f456ab65"
+MULTI_BUFFER_LIB_VER="0.52"
export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
export RTE_TARGET="x86_64-native-linuxapp-gcc"
function os_pkgs_install()
{
+ sudo yum install -y deltarpm yum-utils
# NASM repository for AESNI MB library
sudo yum-config-manager --add-repo http://www.nasm.us/nasm.repo
- sudo yum install -y deltarpm
sudo yum update -y
sudo yum install -y git wget gcc unzip libpcap-devel ncurses-devel \
libedit-devel lua-devel kernel-devel iperf3 pciutils \
@@ -36,9 +39,6 @@ function os_pkgs_install()
function os_cfg()
{
- # Enabling root ssh access
- sudo sed -i '/disable_root: 1/c\disable_root: 0' /etc/cloud/cloud.cfg
-
# huge pages to be used by DPDK
sudo sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
@@ -59,9 +59,9 @@ function os_cfg()
# Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
# assigned to this VM depends on the flavor used. We don't know at this time what that will be.
- sudo chmod +x ${COPY_DIR}/check_prox_system_setup.sh
- sudo cp -r ${COPY_DIR}/check_prox_system_setup.sh /usr/local/libexec/
- sudo cp -r ${COPY_DIR}/check-prox-system-setup.service /etc/systemd/system/
+ sudo chmod +x ${WORK_DIR}/check_prox_system_setup.sh
+ sudo cp -r ${WORK_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+ sudo cp -r ${WORK_DIR}/check-prox-system-setup.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable check-prox-system-setup.service
@@ -70,12 +70,12 @@ function os_cfg()
function mblib_install()
{
- export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-0.50"
+ export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
# Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
pushd ${BUILD_DIR} > /dev/null 2>&1
- wget https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip
- unzip v0.50.zip
+ wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+ unzip v${MULTI_BUFFER_LIB_VER}.zip
pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
make -j`getconf _NPROCESSORS_ONLN`
sudo make install
@@ -95,8 +95,7 @@ function dpdk_install()
tar -xf ./dpdk-${DPDK_VERSION}.tar.xz
popd > /dev/null 2>&1
- # Runtime scripts are assuming /root as the directory for PROX
- sudo ln -s ${RTE_SDK} /root/dpdk
+ sudo ln -s ${RTE_SDK} ${WORK_DIR}/dpdk
pushd ${RTE_SDK} > /dev/null 2>&1
make config T=${RTE_TARGET}
@@ -129,7 +128,7 @@ function prox_install()
git checkout ${PROX_COMMIT}
popd > /dev/null 2>&1
prox_compile
- sudo ln -s ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX /root/prox
+ sudo ln -s ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX ${WORK_DIR}/prox
popd > /dev/null 2>&1
}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg
index 16b6ac99..8e86c8e1 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg
@@ -38,7 +38,7 @@ name=${name}
[core 0]
mode=master
-[core $impaircores]
+[core $cores]
name=impair
task=0
mode=impair
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test b/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test
index 806762a1..ee926ae4 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test
@@ -20,7 +20,6 @@ number_of_tests = 2
total_number_of_test_machines = 3
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = Generator
@@ -33,12 +32,12 @@ latcores = [3]
[TestM2]
name = ImpairGW
config_file = impair.cfg
-impaircores = [1]
+cores = [1]
[TestM3]
name = Swap
config_file = swap.cfg
-swapcores = [1]
+cores = [1]
[BinarySearchParams]
drop_rate_threshold = 0.1
@@ -57,3 +56,4 @@ warmuptime=2
[test2]
test=impairtest
packetsize=64
+flowsize=64
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg
index 38e4b966..8c1331e9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg
@@ -36,7 +36,7 @@ name=${name}
[core 0]
mode=master
-[core $irqcores]
+[core $cores]
name=irq
task=0
mode=irq
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test b/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test
index 4dbb0cc6..5ba0df87 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test
@@ -17,17 +17,19 @@
[DEFAULT]
name = BasicSwapTesting
number_of_tests = 1
-total_number_of_test_machines = 1
+total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = InterruptTesting
config_file = irq.cfg
-irqcores = [1,2,3]
+cores = [1,2,3]
+[TestM2]
+name = InterruptTesting
+config_file = irq.cfg
+cores = [1,2,3]
[test1]
test=irqtest
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test b/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test
index 51710fe9..4fb0baef 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test
@@ -20,7 +20,6 @@ number_of_tests = 1
total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = Generator
@@ -33,9 +32,10 @@ startspeed = 10
[TestM2]
name = Swap
config_file = l2swap.cfg
-swapcores = [1]
+cores = [1]
[test1]
test=fixed_rate
packetsizes=[256]
+flows=[64]
speed=10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg
index 366d8ac2..2c8ce091 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg
@@ -37,7 +37,7 @@ name=${name}
[core 0]
mode=master
-[core $swapcores]
+[core $cores]
name=swap
task=0
mode=swap
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test
index 95b2d492..106b4c6d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test
@@ -20,7 +20,6 @@ number_of_tests = 2
total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = Generator
@@ -32,7 +31,7 @@ latcores = [3]
[TestM2]
name = Swap
config_file = l2swap.cfg
-swapcores = [1]
+cores = [1]
[BinarySearchParams]
drop_rate_threshold = 0
@@ -52,6 +51,5 @@ warmuptime=2
test=flowsizetest
packetsizes=[64]
# the number of flows in the list need to be powers of 2, max 2^20
-# # Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65535, 131072, 262144, 524280, 1048576
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
flows=[512,1]
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test b/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test
index 2095da4c..28214654 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test
@@ -20,7 +20,7 @@ number_of_tests = 1
total_number_of_test_machines = 2
prox_socket = true
prox_launch_exit = true
-tasks=[0]
+#PushGateway=http://192.168.36.1:9091
[TestM1]
name = Generator
@@ -32,9 +32,12 @@ latcores = [3]
[TestM2]
name = Swap
config_file = swap.cfg
-swapcores = [1]
+cores = [1]
[test1]
test=fixed_rate
-packetsizes=[64]
+packetsizes=[64,128]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[1,64]
speed=10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test
index f9fc03ce..c5a18849 100755..100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test
@@ -1,7 +1,5 @@
-#!/bin/bash
-
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2010-2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -16,12 +14,18 @@
## limitations under the License.
##
-link="$(ip -o link | grep MACADDRESS |cut -d":" -f 2)"
-if [ -n "$link" ];
-then
- echo Need to bind
- /root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(/root/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
-else
- echo Assuming port is already bound to DPDK
-fi
-exit 0
+[DEFAULT]
+name = PortStats
+number_of_tests = 1
+total_number_of_test_machines = 1
+prox_socket = true
+prox_launch_exit = true
+
+[TestM1]
+name = Swap
+config_file = swap.cfg
+cores = [1]
+ports = [0]
+
+[test1]
+test=portstats
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
index 5d5fb181..4c764eae 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
@@ -172,13 +172,10 @@ class prox_sock(object):
def stop(self, cores):
self._send('stop %s' % ','.join(map(str, cores)))
- def speed(self, speed, cores, tasks=None):
- if tasks is None:
- tasks = [ 0 ] * len(cores)
- elif len(tasks) != len(cores):
- raise ValueError('cores and tasks must have the same len')
- for (core, task) in zip(cores, tasks):
- self._send('speed %s %s %s' % (core, task, speed))
+ def speed(self, speed, cores, tasks=[0]):
+ for core in cores:
+ for task in tasks:
+ self._send('speed %s %s %s' % (core, task, speed))
def reset_stats(self):
self._send('reset stats')
@@ -240,6 +237,23 @@ class prox_sock(object):
hz = int(stats[7])
return rx, rx_non_dp, tx, tx_non_dp, drop, tx_fail, tsc, hz
+ def multi_port_stats(self, ports=[0]):
+ rx = tx = port_id = tsc = no_mbufs = errors = 0
+ self._send('multi port stats %s' % (','.join(map(str, ports))))
+ result = self._recv().split(';')
+ if result[0].startswith('error'):
+ log.critical("multi port stats error: unexpected invalid syntax (potential incompatibility between scripts and PROX)")
+ raise Exception("multi port stats error")
+ for statistics in result:
+ stats = statistics.split(',')
+ port_id = int(stats[0])
+ rx += int(stats[1])
+ tx += int(stats[2])
+ no_mbufs += int(stats[3])
+ errors += int(stats[4])
+ tsc = int(stats[5])
+ return rx, tx, no_mbufs, errors, tsc
+
def set_random(self, cores, task, offset, mask, length):
self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length))
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapidVMs.vms b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.vms
index 6032f68b..6032f68b 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapidVMs.vms
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.vms
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
index d0ee68a3..e95d650b 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
@@ -33,8 +33,9 @@ import ConfigParser
import ast
import atexit
import csv
+import requests
-version="19.6.30"
+version="19.11.21"
env = "rapid.env" #Default string for environment
test_file = "basicrapid.test" #Default string for test
machine_map_file = "machine.map" #Default string for machine map file
@@ -42,6 +43,7 @@ loglevel="DEBUG" # sets log level for writing to file
screenloglevel="INFO" # sets log level for writing to screen
runtime=10 # time in seconds for 1 test run
configonly = False # IF True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
+rundir = "/home/centos" # Directory where to find the tools in the machines running PROX
def usage():
print("usage: runrapid [--version] [-v]")
@@ -106,7 +108,7 @@ for opt, arg in opts:
print ("Using '"+env+"' as name for the environment")
print ("Using '"+test_file+"' for test case definition")
print ("Using '"+machine_map_file+"' for machine mapping")
-print ("Runtime: "+ runtime)
+print ("Runtime: "+ str(runtime))
class bcolors:
HEADER = '\033[95m'
@@ -212,7 +214,7 @@ def run_iteration(gensock,sutsock):
sleep_time = 2
# Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
time.sleep(sleep_time)
- abs_old_rx, abs_old_non_dp_rx, abs_old_tx, abs_old_non_dp_tx, abs_old_drop, abs_old_tx_fail, abs_old_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,tasks)
+ abs_old_rx, abs_old_non_dp_rx, abs_old_tx, abs_old_non_dp_tx, abs_old_drop, abs_old_tx_fail, abs_old_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
abs_old_rx = abs_old_rx - abs_old_non_dp_rx
abs_old_tx = abs_old_tx - abs_old_non_dp_tx
gensock.start(gencores)
@@ -221,7 +223,7 @@ def run_iteration(gensock,sutsock):
old_sut_rx, old_sut_non_dp_rx, old_sut_tx, old_sut_non_dp_tx, old_sut_drop, old_sut_tx_fail, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
old_sut_rx = old_sut_rx - old_sut_non_dp_rx
old_sut_tx = old_sut_tx - old_sut_non_dp_tx
- old_rx, old_non_dp_rx, old_tx, old_non_dp_tx, old_drop, old_tx_fail, old_tsc, tsc_hz = gensock.core_stats(genstatcores,tasks)
+ old_rx, old_non_dp_rx, old_tx, old_non_dp_tx, old_drop, old_tx_fail, old_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
old_rx = old_rx - old_non_dp_rx
old_tx = old_tx - old_non_dp_tx
# Measure latency statistics per second
@@ -243,7 +245,7 @@ def run_iteration(gensock,sutsock):
lat_avg = lat_avg / n_loops
used_avg = used_avg / n_loops
# Get statistics after some execution time
- new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,tasks)
+ new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
new_rx = new_rx - new_non_dp_rx
new_tx = new_tx - new_non_dp_tx
if sutsock!='none':
@@ -253,7 +255,7 @@ def run_iteration(gensock,sutsock):
#Stop generating
gensock.stop(gencores)
time.sleep(sleep_time)
- abs_new_rx, abs_new_non_dp_rx, abs_new_tx, abs_new_non_dp_tx, abs_new_drop, abs_new_tx_fail, abs_new_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,tasks)
+ abs_new_rx, abs_new_non_dp_rx, abs_new_tx, abs_new_non_dp_tx, abs_new_drop, abs_new_tx_fail, abs_new_tsc, abs_tsc_hz = gensock.core_stats(genstatcores,gentasks)
abs_new_rx = abs_new_rx - abs_new_non_dp_rx
abs_new_tx = abs_new_tx - abs_new_non_dp_tx
drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
@@ -296,6 +298,9 @@ def get_speed(packet_speed,size):
def run_flow_size_test(gensock,sutsock):
+ fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
+ writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
+ writer.writeheader()
gensock.start(latcores)
for size in packet_size_list:
size = size-4
@@ -325,7 +330,7 @@ def run_flow_size_test(gensock,sutsock):
print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
sys.stdout.flush()
# Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed / len(gencores), gencores)
+ gensock.speed(speed / len(gencores) / len (gentasks), gencores, gentasks)
time.sleep(1)
# Get statistics now that the generation is stable and initial ARP messages are dealt with
pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx_fail, abs_tx, lat_min, lat_used = run_iteration(gensock,sutsock)
@@ -352,6 +357,7 @@ def run_flow_size_test(gensock,sutsock):
speed_prefix = bcolors.ENDC
gen_warning = ''
endspeed = speed
+ endspeed_prefix = speed_prefix
endpps_req_tx = pps_req_tx
endpps_tx = pps_tx
endpps_sut_tx_str = pps_sut_tx_str
@@ -390,215 +396,304 @@ def run_flow_size_test(gensock,sutsock):
log.debug('|step{:>3}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% '+speed_prefix +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | ' + '{:>9.3f}'.format(pps_tx) +' Mpps | '+ bcolors.ENDC + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+bcolors.OKBLUE + '{:>4.1f}'.format(get_speed(pps_rx,size)) + 'Gb/s{:>9.3f}'.format(pps_rx)+' Mpps'+bcolors.ENDC+' | '+lat_avg_prefix+ '{:>9.0f}'.format(lat_avg)+' us | '+lat_max_prefix+ '{:>9.0f}'.format(lat_max)+' us | '+ abs_drop_rate_prefix + '{:>14d}'.format(abs_dropped)+drop_rate_prefix+ ' |''{:>9.2f}'.format(drop_rate)+bcolors.ENDC+ success_message +lat_warning + gen_warning)
speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
if endpps_sut_tx_str != 'NO_RESULTS':
- log.info('|{:>7}'.format(str(flow_number))+" | " + '{:>5.1f}'.format(endspeed) + '% ' + speed_prefix + '{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+ ' Mpps | '+ bcolors.ENDC + '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+bcolors.OKBLUE + '{:>4.1f}'.format(get_speed(pps_rx,size)) + 'Gb/s{:>9.3f}'.format(endpps_rx)+' Mpps'+bcolors.ENDC+' | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
+ log.info('|{:>7}'.format(str(flow_number))+" | " + '{:>5.1f}'.format(endspeed) + '% ' + endspeed_prefix + '{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+ ' Mpps | '+ '{:>9.3f}'.format(endpps_tx) + ' Mpps | ' + bcolors.ENDC + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+bcolors.OKBLUE + '{:>4.1f}'.format(get_speed(endpps_rx,size)) + 'Gb/s{:>9.3f}'.format(endpps_rx)+' Mpps'+bcolors.ENDC+' | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
if endwarning:
log.info (endwarning)
log.info("+--------+--------------------+----------------+----------------+----------------+------------------------+----------------+----------------+----------------+------------+")
- writer.writerow({'flow':flow_number,'size':(size+4),'endspeed':endspeed,'endspeedpps':get_pps(endspeed,size),'endpps_req_tx':endpps_req_tx,'endpps_tx':endpps_tx,'endpps_sut_tx_str':endpps_sut_tx_str,'endpps_rx':endpps_rx,'endlat_avg':endlat_avg,'endlat_max':endlat_max,'endabs_dropped':endabs_dropped,'enddrop_rate':enddrop_rate})
+ writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'Gbps':get_speed(endpps_rx,size),'Mpps':endpps_rx,'AvgLatency':endlat_avg,'MaxLatency':endlat_max,'PacketsDropped':endabs_dropped,'PacketDropRate':enddrop_rate})
+ if PushGateway:
+ URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
+ DATA = 'Flows {}\nPacketSize {}\nGbps {}\nMpps {}\nAvgLatency {}\nMaxLatency {}\nPacketsDropped {}\nPacketDropRate {}\n'.format(flow_number,size+4,get_speed(endpps_rx,size),endpps_rx,endlat_avg,endlat_max,endabs_dropped,enddrop_rate)
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
+ response = requests.post(url=URL, data=DATA,headers=HEADERS)
else:
log.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
gensock.stop(latcores)
def run_fixed_rate(gensock,sutsock):
- log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| UDP, 1 flow, different packet sizes |")
- log.info("+-----+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
- log.info("|Pktsz| Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core| Avg. Latency| Max. Latency| Sent | Received | Lost | Total Lost |")
- log.info("+-----+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
- sleep_time = 3
+ fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
+ writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
+ writer.writeheader()
gensock.start(latcores)
+ sleep_time=3
for size in packet_size_list:
- # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
- time.sleep(sleep_time)
size = size-4
- gensock.reset_stats()
- if sutsock!='none':
- sutsock.reset_stats()
gensock.set_size(gencores,0,size) # This is setting the frame size
gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- pps_sut_tx_str = 'NO_RESULTS'
- speed = STARTSPEED
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed / len(gencores), gencores)
- duration = float(runtime)
- first = 1
- tot_drop = 0
- if sutsock!='none':
- old_sut_rx, old_sut_non_dp_rx, old_sut_tx, old_sut_non_dp_tx, old_sut_drop, old_sut_tx_fail, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
- old_sut_rx = old_sut_rx - old_sut_non_dp_rx
- old_sut_tx = old_sut_tx - old_sut_non_dp_tx
- old_rx, old_non_dp_rx, old_tx, old_non_dp_tx, old_drop, old_tx_fail, old_tsc, tsc_hz = gensock.core_stats(genstatcores,tasks)
- old_rx = old_rx - old_non_dp_rx
- old_tx = old_tx - old_non_dp_tx
- gensock.start(gencores)
- while (duration > 0):
- time.sleep(0.5)
- lat_min, lat_max, lat_avg, lat_used = gensock.lat_stats(latcores)
- if lat_used < 0.95:
- lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
- else:
- lat_warning = ''
- # Get statistics after some execution time
- new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,tasks)
- new_rx = new_rx - new_non_dp_rx
- new_tx = new_tx - new_non_dp_tx
- if sutsock!='none':
- new_sut_rx, new_sut_non_dp_rx, new_sut_tx, new_sut_non_dp_tx, new_sut_drop, new_sut_tx_fail, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
- new_sut_rx = new_sut_rx - new_sut_non_dp_rx
- new_sut_tx = new_sut_tx - new_sut_non_dp_tx
- drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
- rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
- tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
- tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
- if tsc == 0 :
- continue
+ # This will only work when using sending UDP packets. For different protocols and ehternet types, we would need a different calculation
+ log.info("+--------------------------------------------------------------------------------------------------------------------------------------------------------------+")
+ log.info("| UDP, "+ '{:>5}'.format(size+4) +" bytes, different number of flows by randomizing SRC & DST UDP port |")
+ log.info("+--------+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
+ log.info("| Flows | Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core| Avg. Latency| Max. Latency| Sent | Received | Lost | Total Lost |")
+ log.info("+--------+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
+ for flow_number in flow_size_list:
+ time.sleep(sleep_time)
+ gensock.reset_stats()
if sutsock!='none':
- sut_rx = new_sut_rx - old_sut_rx
- sut_tx = new_sut_tx - old_sut_tx
- sut_tsc = new_sut_tsc - old_sut_tsc
- if sut_tsc == 0 :
- continue
- duration = duration - 1
- old_drop = new_drop
- old_rx = new_rx
- old_tx = new_tx
- old_tsc = new_tsc
- pps_req_tx = (tx+drop-rx)*tsc_hz*1.0/(tsc*1000000)
- pps_tx = tx*tsc_hz*1.0/(tsc*1000000)
- pps_rx = rx*tsc_hz*1.0/(tsc*1000000)
+ sutsock.reset_stats()
+ source_port,destination_port = flows[flow_number]
+ gensock.set_random(gencores,0,34,source_port,2)
+ gensock.set_random(gencores,0,36,destination_port,2)
+ endpps_sut_tx_str = 'NO_RESULTS'
+ speed = STARTSPEED
+ # Start generating packets at requested speed (in % of a 10Gb/s link)
+ gensock.speed(speed / len(gencores) / len (gentasks), gencores, gentasks)
+ duration = float(runtime)
+ first = 1
+ tot_drop = 0
if sutsock!='none':
- old_sut_tx = new_sut_tx
- old_sut_rx = new_sut_rx
- old_sut_tsc= new_sut_tsc
- pps_sut_tx = sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000)
- pps_sut_tx_str = '{:>7.3f}'.format(pps_sut_tx)
- else:
- pps_sut_tx = 0
- pps_sut_tx_str = 'NO MEAS.'
- if (tx == 0):
- log.critical("TX = 0. Test interrupted since no packet has been sent.")
- raise Exception("TX = 0")
- tot_drop = tot_drop + tx - rx
-
- if pps_sut_tx_str != 'NO_RESULTS':
- # First second mpps are not valid as there is no alignement between time the generator is started and per seconds stats
- if (first):
- log.info('|{:>4}'.format(size+4)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+' |' +' |' +' |'+ ' |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+'{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
- else:
- log.info('|{:>4}'.format(size+4)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+ '{:>7.3f}'.format(pps_req_tx)+' Mpps |'+ '{:>7.3f}'.format(pps_tx) +' Mpps |' + '{:>7}'.format(pps_sut_tx_str) +' Mpps |'+ '{:>7.3f}'.format(pps_rx)+' Mpps |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+ '{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
- else:
- log.debug('|{:>7}'.format(str(size))+" | Speed 0 or close to 0")
- first = 0
- if (duration <= 0):
- #Stop generating
- gensock.stop(gencores)
- time.sleep(sleep_time)
+ old_sut_rx, old_sut_non_dp_rx, old_sut_tx, old_sut_non_dp_tx, old_sut_drop, old_sut_tx_fail, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
+ old_sut_rx = old_sut_rx - old_sut_non_dp_rx
+ old_sut_tx = old_sut_tx - old_sut_non_dp_tx
+ old_rx, old_non_dp_rx, old_tx, old_non_dp_tx, old_drop, old_tx_fail, old_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
+ old_rx = old_rx - old_non_dp_rx
+ old_tx = old_tx - old_non_dp_tx
+ gensock.start(gencores)
+ while (duration > 0):
+ time.sleep(0.5)
lat_min, lat_max, lat_avg, lat_used = gensock.lat_stats(latcores)
if lat_used < 0.95:
lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
else:
lat_warning = ''
# Get statistics after some execution time
- new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,tasks)
+ new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
new_rx = new_rx - new_non_dp_rx
new_tx = new_tx - new_non_dp_tx
if sutsock!='none':
new_sut_rx, new_sut_non_dp_rx, new_sut_tx, new_sut_non_dp_tx, new_sut_drop, new_sut_tx_fail, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
new_sut_rx = new_sut_rx - new_sut_non_dp_rx
new_sut_tx = new_sut_tx - new_sut_non_dp_tx
- drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
- rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
- tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
- tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
- tot_drop = tot_drop + tx - rx
+ drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
+ tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
+ tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
+ if tsc == 0 :
+ continue
if sutsock!='none':
sut_rx = new_sut_rx - old_sut_rx
sut_tx = new_sut_tx - old_sut_tx
sut_tsc = new_sut_tsc - old_sut_tsc
+ if sut_tsc == 0 :
+ continue
+ duration = duration - 1
+ old_drop = new_drop
+ old_rx = new_rx
+ old_tx = new_tx
+ old_tsc = new_tsc
+ pps_req_tx = (tx+drop-rx)*tsc_hz*1.0/(tsc*1000000)
+ pps_tx = tx*tsc_hz*1.0/(tsc*1000000)
+ pps_rx = rx*tsc_hz*1.0/(tsc*1000000)
+ if sutsock!='none':
+ old_sut_tx = new_sut_tx
+ old_sut_rx = new_sut_rx
+ old_sut_tsc= new_sut_tsc
+ pps_sut_tx = sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000)
+ pps_sut_tx_str = '{:>7.3f}'.format(pps_sut_tx)
+ else:
+ pps_sut_tx = 0
+ pps_sut_tx_str = 'NO MEAS.'
+ if (tx == 0):
+ log.critical("TX = 0. Test interrupted since no packet has been sent.")
+ raise Exception("TX = 0")
+ tot_drop = tot_drop + tx - rx
+
if pps_sut_tx_str != 'NO_RESULTS':
- log.info('|{:>4}'.format(size+4)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+' |' +' |' +' |'+ ' |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+ '{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
- log.info("+-----+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
+ # First second mpps are not valid as there is no alignement between time the generator is started and per seconds stats
+ if (first):
+ log.info('|{:>7}'.format(flow_number)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+' |' +' |' +' |'+ ' |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+'{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
+ else:
+ log.info('|{:>7}'.format(flow_number)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+ '{:>7.3f}'.format(pps_req_tx)+' Mpps |'+ '{:>7.3f}'.format(pps_tx) +' Mpps |' + '{:>7}'.format(pps_sut_tx_str) +' Mpps |'+ '{:>7.3f}'.format(pps_rx)+' Mpps |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+ '{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
+ writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Sent':tx,'Received':rx,'Lost':(tx-rx),'LostTotal':tot_drop})
+ if PushGateway:
+ URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
+ DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(flow_number,size+4,get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,tx,rx,(tx-rx),tot_drop)
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
+ response = requests.post(url=URL, data=DATA,headers=HEADERS)
+ else:
+ log.debug('|{:>7} | Speed 0 or close to 0'.format(str(size)))
+ first = 0
+ if (duration <= 0):
+ #Stop generating
+ gensock.stop(gencores)
+ time.sleep(sleep_time)
+ lat_min, lat_max, lat_avg, lat_used = gensock.lat_stats(latcores)
+ if lat_used < 0.95:
+ lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
+ else:
+ lat_warning = ''
+ # Get statistics after some execution time
+ new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = gensock.core_stats(genstatcores,gentasks)
+ new_rx = new_rx - new_non_dp_rx
+ new_tx = new_tx - new_non_dp_tx
+ if sutsock!='none':
+ new_sut_rx, new_sut_non_dp_rx, new_sut_tx, new_sut_non_dp_tx, new_sut_drop, new_sut_tx_fail, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores,tasks)
+ new_sut_rx = new_sut_rx - new_sut_non_dp_rx
+ new_sut_tx = new_sut_tx - new_sut_non_dp_tx
+ drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
+ tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
+ tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
+ tot_drop = tot_drop + tx - rx
+ if sutsock!='none':
+ sut_rx = new_sut_rx - old_sut_rx
+ sut_tx = new_sut_tx - old_sut_tx
+ sut_tsc = new_sut_tsc - old_sut_tsc
+ if pps_sut_tx_str != 'NO_RESULTS':
+ log.info('|{:>7}'.format(flow_number)+" |" + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps|'+' |' +' |' +' |'+ ' |'+ '{:>8.0f}'.format(lat_avg)+' us |'+'{:>8.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | '+ '{:>9.0f}'.format(rx) + ' | '+ '{:>7.0f}'.format(tx-rx) + ' | '+ '{:>7.0f}'.format(tot_drop) +' |'+lat_warning)
+ log.info("+--------+------------------+-------------+-------------+-------------+-------------+-------------+-------------+-----------+-----------+---------+------------+")
gensock.stop(latcores)
-def run_measure_swap(sutsock):
- log.info("+------------------------------------------------------------------------------------------------------+")
- log.info("| Measuring packets on SWAP system |")
- log.info("+-----------+------------+------------+------------+------------+------------+------------+------------+")
- log.info("| Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
- log.info("+-----------+------------+------------+------------+------------+------------+------------+------------+")
- sutsock.reset_stats()
+def run_core_stats(socks):
+ fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
+ writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
+ writer.writeheader()
+ log.info("+------------------------------------------------------------------------------------------------------------------+")
+ log.info("| Measuring core statistics on 1 or more PROX instances |")
+ log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ log.info("| PROX ID | Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
+ log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ for sock in socks:
+ sock.reset_stats()
+ duration = float(runtime)
+ tot_drop = []
+ old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
+ new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
+ sockets_to_go = len (socks)
+ for i,sock in enumerate(socks,start=0):
+ tot_drop.append(0)
+ old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
+ old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = sock.core_stats(cores[i],tasks)
+ new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
+ while (duration > 0):
+ time.sleep(0.5)
+ # Get statistics after some execution time
+ for i,sock in enumerate(socks,start=0):
+ new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = sock.core_stats(cores[i],tasks)
+ drop = new_drop[i]-old_drop[i]
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
+ non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ sockets_to_go -= 1
+ old_drop[i] = new_drop[i]
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_non_dp_rx[i] = new_non_dp_rx[i]
+ old_non_dp_tx[i] = new_non_dp_tx[i]
+ old_tsc[i] = new_tsc[i]
+ tot_drop[i] = tot_drop[i] + tx - rx
+ log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
+ writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
+ if PushGateway:
+ URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
+ DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
+ response = requests.post(url=URL, data=DATA,headers=HEADERS)
+ if sockets_to_go == 0:
+ duration = duration - 1
+ sockets_to_go = len (socks)
+ log.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+
+def run_port_stats(socks):
+ fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
+ writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
+ writer.writeheader()
+ log.info("+---------------------------------------------------------------------------+")
+ log.info("| Measuring port statistics on 1 or more PROX instances |")
+ log.info("+-----------+-----------+------------+------------+------------+------------+")
+ log.info("| PROX ID | Time | RX | TX | no MBUFS | ierr&imiss |")
+ log.info("+-----------+-----------+------------+------------+------------+------------+")
+ for sock in socks:
+ sock.reset_stats()
duration = float(runtime)
- first = 1
- tot_drop = 0
- old_rx, old_non_dp_rx, old_tx, old_non_dp_tx, old_drop, old_tx_fail, old_tsc, tsc_hz = sutsock.core_stats(sutstatcores,tasks)
+ old_rx = []; old_tx = []; old_no_mbufs = []; old_errors = []; old_tsc = []
+ new_rx = []; new_tx = []; new_no_mbufs = []; new_errors = []; new_tsc = []
+ sockets_to_go = len (socks)
+ for i,sock in enumerate(socks,start=0):
+ old_rx.append(0); old_tx.append(0); old_no_mbufs.append(0); old_errors.append(0); old_tsc.append(0)
+ old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[-1] = sock.multi_port_stats(ports[i])
+ new_rx.append(0); new_tx.append(0); new_no_mbufs.append(0); new_errors.append(0); new_tsc.append(0)
while (duration > 0):
time.sleep(0.5)
# Get statistics after some execution time
- new_rx, new_non_dp_rx, new_tx, new_non_dp_tx, new_drop, new_tx_fail, new_tsc, tsc_hz = sutsock.core_stats(sutstatcores,tasks)
- drop = new_drop-old_drop
- rx = new_rx - old_rx
- tx = new_tx - old_tx
- non_dp_rx = new_non_dp_rx - old_non_dp_rx
- non_dp_tx = new_non_dp_tx - old_non_dp_tx
- tsc = new_tsc - old_tsc
- if tsc == 0 :
- continue
- duration = duration - 1
- old_drop = new_drop
- old_rx = new_rx
- old_tx = new_tx
- old_non_dp_rx = new_non_dp_rx
- old_non_dp_tx = new_non_dp_tx
- old_tsc = new_tsc
- tot_drop = tot_drop + tx - rx
-
- log.info('|{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop) +' |')
- log.info("+------------------------------------------------------------------------------------------------------+")
-
-
-def run_irqtest(sock):
+ for i,sock in enumerate(socks,start=0):
+ new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[i] = sock.multi_port_stats(ports[i])
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
+ errors = new_errors[i] - old_errors[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ sockets_to_go -= 1
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_no_mbufs[i] = new_no_mbufs[i]
+ old_errors[i] = new_errors[i]
+ old_tsc[i] = new_tsc[i]
+ log.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
+ writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
+ if PushGateway:
+ URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env + str(i)
+ DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(i,duration,rx,tx,no_mbufs,errors)
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
+ response = requests.post(url=URL, data=DATA,headers=HEADERS)
+ if sockets_to_go == 0:
+ duration = duration - 1
+ sockets_to_go = len (socks)
+ log.info("+-----------+-----------+------------+------------+------------+------------+")
+
+def run_irqtest(socks):
log.info("+----------------------------------------------------------------------------------------------------------------------------")
log.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
log.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
log.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
log.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
- log.info("| the duration of the test. This is to avoid rounding errors in the case of 0.0 ")
+ log.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 ")
log.info("+----------------------------------------------------------------------------------------------------------------------------")
sys.stdout.flush()
- buckets=sock.show_irq_buckets(1)
- print('Measurement ongoing ... ',end='\r')
- sock.stop(irqcores)
- old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq[0][0] = 'bucket us'
- for j,bucket in enumerate(buckets,start=1):
- irq[0][j] = '<'+ bucket
- irq[0][-1] = '>'+ buckets [-2]
- sock.start(irqcores)
- time.sleep(2)
- for j,bucket in enumerate(buckets,start=1):
- for i,irqcore in enumerate(irqcores,start=1):
- old_irq[i][j] = sock.irq_stats(irqcore,j-1)
- time.sleep(float(runtime))
- sock.stop(irqcores)
- for i,irqcore in enumerate(irqcores,start=1):
- irq[i][0]='core %s '%irqcore
+ for sock_index,sock in enumerate(socks,start=0):
+ buckets=socks[sock_index].show_irq_buckets(1)
+ print('Measurement ongoing ... ',end='\r')
+ socks[sock_index].stop(cores[sock_index])
+ old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
+ irq = [[0 for x in range(len(buckets)+1)] for y in range(len(cores[sock_index])+1)]
+ irq[0][0] = 'bucket us'
for j,bucket in enumerate(buckets,start=1):
- diff = sock.irq_stats(irqcore,j-1) - old_irq[i][j]
- if diff == 0:
- irq[i][j] = '0'
- else:
- irq[i][j] = str(round(diff/float(runtime), 2))
- for row in irq:
- log.info(''.join(['{:>12}'.format(item) for item in row]))
+ irq[0][j] = '<'+ bucket
+ irq[0][-1] = '>'+ buckets [-2]
+ socks[sock_index].start(cores[sock_index])
+ time.sleep(2)
+ for j,bucket in enumerate(buckets,start=1):
+ for i,irqcore in enumerate(cores[sock_index],start=1):
+ old_irq[i][j] = socks[sock_index].irq_stats(irqcore,j-1)
+ time.sleep(float(runtime))
+ socks[sock_index].stop(cores[sock_index])
+ for i,irqcore in enumerate(cores[sock_index],start=1):
+ irq[i][0]='core %s '%irqcore
+ for j,bucket in enumerate(buckets,start=1):
+ diff = socks[sock_index].irq_stats(irqcore,j-1) - old_irq[i][j]
+ if diff == 0:
+ irq[i][j] = '0'
+ else:
+ irq[i][j] = str(round(diff/float(runtime), 2))
+ log.info('Results for PROX instance %s'%sock_index)
+ for row in irq:
+ log.info(''.join(['{:>12}'.format(item) for item in row]))
def run_impairtest(gensock,sutsock):
+ fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
+ writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
+ writer.writeheader()
size=PACKETSIZE-4
log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| Generator is sending UDP (1 flow) packets ("+ '{:>5}'.format(size+4) +" bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test |")
+ log.info("| Generator is sending UDP ("+'{:>5}'.format(FLOWSIZE)+" flow) packets ("+ '{:>5}'.format(size+4) +" bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test |")
log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
@@ -607,9 +702,12 @@ def run_impairtest(gensock,sutsock):
gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
# This will only work when using sending UDP packets. For different protocols and ethernet types, we would need a different calculation
+ source_port,destination_port = flows[FLOWSIZE]
+ gensock.set_random(gencores,0,34,source_port,2)
+ gensock.set_random(gencores,0,36,destination_port,2)
gensock.start(latcores)
speed = STARTSPEED
- gensock.speed(speed / len(gencores), gencores)
+ gensock.speed(speed / len(gencores) / len(gentasks), gencores, gentasks)
while True:
attempts += 1
print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
@@ -623,14 +721,19 @@ def run_impairtest(gensock,sutsock):
else:
lat_warning = ''
log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% |'+lat_warning)
- writer.writerow({'flow':'1','size':(size+4),'endspeed':speed,'endspeedpps':get_pps(speed,size),'endpps_req_tx':pps_req_tx,'endpps_tx':pps_tx,'endpps_sut_tx_str':pps_sut_tx_str,'endpps_rx':pps_rx,'endlat_avg':lat_avg,'endlat_max':lat_max,'endabs_dropped':abs_dropped,'enddrop_rate':drop_rate})
+ writer.writerow({'Flows':FLOWSIZE,'PacketSize':(size+4),'RequestedPPS':get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
+ if PushGateway:
+ URL = PushGateway + '/metrics/job/' + TestName + '/instance/' + env
+ DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(FLOWSIZE,size+4,get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max,abs_dropped,drop_rate)
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
+ response = requests.post(url=URL, data=DATA,headers=HEADERS)
def run_warmuptest(gensock):
# Running at low speed to make sure the ARP messages can get through.
# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
# Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
- gensock.speed(WARMUPSPEED / len(gencores), gencores)
+ gensock.speed(WARMUPSPEED / len(gencores) /len (gentasks), gencores, gentasks)
size=PACKETSIZE-4
gensock.set_size(gencores,0,size) # This is setting the frame size
gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
@@ -675,10 +778,10 @@ flows={\
8192: ['1000000000XXXXXX','100000000XXXXXXX'],\
16384: ['100000000XXXXXXX','100000000XXXXXXX'],\
32768: ['100000000XXXXXXX','10000000XXXXXXXX'],\
-65535: ['10000000XXXXXXXX','10000000XXXXXXXX'],\
+65536: ['10000000XXXXXXXX','10000000XXXXXXXX'],\
131072: ['10000000XXXXXXXX','1000000XXXXXXXXX'],\
262144: ['1000000XXXXXXXXX','1000000XXXXXXXXX'],\
-524280: ['1000000XXXXXXXXX','100000XXXXXXXXXX'],\
+524288: ['1000000XXXXXXXXX','100000XXXXXXXXXX'],\
1048576:['100000XXXXXXXXXX','100000XXXXXXXXXX'],}
clients =[]
socks =[]
@@ -693,17 +796,27 @@ prox_launch_exit =[]
auto_start =[]
mach_type =[]
sock_type =[]
+cores = []
+ports = []
+tasks = {}
data_file = 'RUN{}.{}.csv'.format(env,test_file)
data_csv_file = open(data_file,'w')
testconfig = ConfigParser.RawConfigParser()
testconfig.read(test_file)
required_number_of_test_machines = testconfig.get('DEFAULT', 'total_number_of_test_machines')
+TestName = testconfig.get('DEFAULT', 'name')
+if testconfig.has_option('DEFAULT', 'PushGateway'):
+ PushGateway = testconfig.get('DEFAULT', 'PushGateway')
+ log.info('Measurements will be pushed to %s'%PushGateway)
+else:
+ PushGateway = None
config = ConfigParser.RawConfigParser()
config.read(env)
machine_map = ConfigParser.RawConfigParser()
machine_map.read(machine_map_file)
key = config.get('ssh', 'key')
+user = config.get('ssh', 'user')
total_number_of_machines = config.get('rapid', 'total_number_of_machines')
if int(required_number_of_test_machines) > int(total_number_of_machines):
log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
@@ -722,14 +835,30 @@ for vm in range(1, int(required_number_of_test_machines)+1):
if prox_socket[vm-1]:
prox_launch_exit.append(testconfig.getboolean('TestM%d'%vm, 'prox_launch_exit'))
config_file.append(testconfig.get('TestM%d'%vm, 'config_file'))
+ # Looking for all task definitions in the PROX cfg files. Constructing a list of all tasks used
+ textfile = open (config_file[-1], 'r')
+ filetext = textfile.read()
+ textfile.close()
+ tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",filetext))
with open('{}_{}_parameters{}.lua'.format(env,test_file,vm), "w") as f:
f.write('name="%s"\n'% testconfig.get('TestM%d'%vm, 'name'))
f.write('local_ip="%s"\n'% vmDPIP[machine_index[vm-1]])
f.write('local_hex_ip="%s"\n'% hexDPIP[machine_index[vm-1]])
- if re.match('(l2){0,1}gen(_bare){0,1}\.cfg',config_file[-1]):
+ if testconfig.has_option('TestM%d'%vm, 'cores'):
+ cores.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'cores')))
+ f.write('cores="%s"\n'% ','.join(map(str, cores[-1])))
+ else:
+ cores.append(None)
+ if testconfig.has_option('TestM%d'%vm, 'ports'):
+ ports.append(ast.literal_eval(testconfig.get('TestM%d'%vm, 'ports')))
+ f.write('ports="%s"\n'% ','.join(map(str, ports[-1])))
+ else:
+ ports.append(None)
+ if re.match('(l2){0,1}gen(_bare){0,1}.*\.cfg',config_file[-1]):
gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
genstatcores = gencores + latcores
+ gentasks = tasks_for_this_cfg
auto_start.append(False)
mach_type.append('gen')
f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
@@ -738,10 +867,11 @@ for vm in range(1, int(required_number_of_test_machines)+1):
f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
- elif re.match('(l2){0,1}gen_gw\.cfg',config_file[-1]):
+ elif re.match('(l2){0,1}gen_gw.*\.cfg',config_file[-1]):
gencores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'gencores'))
latcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'latcores'))
genstatcores = gencores + latcores
+ gentasks = tasks_for_this_cfg
auto_start.append(False)
mach_type.append('gen')
f.write('gencores="%s"\n'% ','.join(map(str, gencores)))
@@ -753,34 +883,27 @@ for vm in range(1, int(required_number_of_test_machines)+1):
f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
- elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
- sutstatcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'swapcores'))
+ elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
+ sutstatcores = cores[-1]
auto_start.append(True)
mach_type.append('sut')
- f.write('swapcores="%s"\n'% ','.join(map(str, sutstatcores)))
- elif config_file[-1] == 'secgw1.cfg':
+ elif re.match('secgw1.*\.cfg',config_file[-1]):
auto_start.append(True)
mach_type.append('none')
- f.write('secgwcores="%s"\n'% ','.join(map(str, ast.literal_eval(testconfig.get('TestM%d'%vm, 'secgwcores')))))
destVMindex = int(testconfig.get('TestM%d'%vm, 'dest_vm'))-1
f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
- elif config_file[-1] == 'secgw2.cfg':
- sutstatcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'secgwcores'))
+ elif re.match('secgw2.*\.cfg',config_file[-1]):
+ sutstatcores = cores[-1]
auto_start.append(True)
mach_type.append('sut')
- f.write('secgwcores="%s"\n'% ','.join(map(str, sutstatcores)))
- elif config_file[-1] == 'impair.cfg':
+ else:
auto_start.append(True)
mach_type.append('none')
- f.write('impaircores="%s"\n'% ','.join(map(str, ast.literal_eval(testconfig.get('TestM%d'%vm, 'impaircores')))))
- elif config_file[-1] == 'irq.cfg':
- irqcores = ast.literal_eval(testconfig.get('TestM%d'%vm, 'irqcores'))
- auto_start.append(False)
- mach_type.append('irq')
- f.write('irqcores="%s"\n'% ','.join(map(str, irqcores)))
f.close
+ tasks = tasks_for_this_cfg.union(tasks)
+log.debug("Tasks detected in all PROX config files %r"%tasks)
#####################################################################################
def exit_handler():
log.debug ('exit cleanup')
@@ -796,35 +919,48 @@ atexit.register(exit_handler)
for vm in range(0, int(required_number_of_test_machines)):
if prox_socket[vm]:
- clients.append(prox_ctrl(vmAdminIP[machine_index[vm]], key+'.pem','root'))
+ clients.append(prox_ctrl(vmAdminIP[machine_index[vm]], key,user))
connect_client(clients[-1])
# Creating script to bind the right network interface to the poll mode driver
devbindfile = '{}_{}_devbindvm{}.sh'.format(env,test_file, vm+1)
- with open("devbind.sh") as f:
- newText=f.read().replace('MACADDRESS', vmDPmac[machine_index[vm]])
- with open(devbindfile, "w") as f:
- f.write(newText)
+ with open(devbindfile, "w") as f:
+ newText= 'link="$(ip -o link | grep '+vmDPmac[machine_index[vm]]+' |cut -d":" -f 2)"\n'
+ f.write(newText)
+ newText= 'if [ -n "$link" ];\n'
+ f.write(newText)
+ newText= 'then\n'
+ f.write(newText)
+ newText= ' echo Need to bind\n'
+ f.write(newText)
+ newText= ' sudo ' + rundir + '/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $('+rundir+'/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)\n'
+ f.write(newText)
+ newText= 'else\n'
+ f.write(newText)
+ newText= ' echo Assuming port is already bound to DPDK\n'
+ f.write(newText)
+ newText= 'fi\n'
+ f.write(newText)
+ newText= 'exit 0\n'
+ f.write(newText)
st = os.stat(devbindfile)
os.chmod(devbindfile, st.st_mode | stat.S_IEXEC)
- clients[-1].scp_put('./%s'%devbindfile, '/root/devbind.sh')
- cmd = '/root/devbind.sh'
+ clients[-1].scp_put('./%s'%devbindfile, rundir+'/devbind.sh')
+ cmd = 'sudo ' + rundir+ '/devbind.sh'
clients[-1].run_cmd(cmd)
log.debug("devbind.sh running on VM%d"%(vm+1))
- clients[-1].scp_put('./%s'%config_file[vm], '/root/%s'%config_file[vm])
- clients[-1].scp_put('./{}_{}_parameters{}.lua'.format(env,test_file, vm+1), '/root/parameters.lua')
+ clients[-1].scp_put('./%s'%config_file[vm], rundir+'/%s'%config_file[vm])
+ clients[-1].scp_put('./{}_{}_parameters{}.lua'.format(env,test_file, vm+1), rundir + '/parameters.lua')
if not configonly:
if prox_launch_exit[vm]:
log.debug("Starting PROX on VM%d"%(vm+1))
if auto_start[vm]:
- cmd = '/root/prox/build/prox -t -o cli -f /root/%s'%config_file[vm]
+ cmd = 'sudo ' +rundir + '/prox/build/prox -t -o cli -f ' + rundir + '/%s'%config_file[vm]
else:
- cmd = '/root/prox/build/prox -e -t -o cli -f /root/%s'%config_file[vm]
+ cmd = 'sudo ' +rundir + '/prox/build/prox -e -t -o cli -f ' + rundir + '/%s'%config_file[vm]
clients[-1].fork_cmd(cmd, 'PROX Testing on TestM%d'%(vm+1))
socks_control.append(prox_launch_exit[vm])
socks.append(connect_socket(clients[-1]))
sock_type.append(mach_type[vm])
-socks.append('none')
-socks_control.append(False)
def get_BinarySearchParams() :
global DROP_RATE_TRESHOLD
@@ -846,38 +982,35 @@ if configonly:
####################################################
gensock_index = sock_type.index('gen') if 'gen' in sock_type else -1
sutsock_index = sock_type.index('sut') if 'sut' in sock_type else -1
-irqsock_index = sock_type.index('irq') if 'irq' in sock_type else -1
number_of_tests = testconfig.get('DEFAULT', 'number_of_tests')
-with data_csv_file:
- fieldnames = ['flow','size','endspeed','endspeedpps','endpps_req_tx','endpps_tx','endpps_sut_tx_str','endpps_rx','endlat_avg','endlat_max','endabs_dropped','enddrop_rate']
- writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- writer.writeheader()
- for test_nr in range(1, int(number_of_tests)+1):
- test=testconfig.get('test%d'%test_nr,'test')
- tasks= ast.literal_eval(testconfig.get('test%d'%test_nr, 'tasks'))
- log.info(test)
- if test == 'flowsizetest':
- get_BinarySearchParams()
- packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
- flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
- run_flow_size_test(socks[gensock_index],socks[sutsock_index])
- elif test == 'fixed_rate':
- packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
- STARTSPEED = float(testconfig.get('test%d'%test_nr, 'speed'))
- run_fixed_rate(socks[gensock_index],socks[sutsock_index])
- elif test == 'measureswap':
- #packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
- run_measure_swap(socks[sutsock_index])
- elif test == 'impairtest':
- get_BinarySearchParams()
- PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
- run_impairtest(socks[gensock_index],socks[sutsock_index])
- elif test == 'irqtest':
- run_irqtest(socks[irqsock_index])
- elif test == 'warmuptest':
- PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
- FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
- WARMUPSPEED = int(testconfig.get('test%d'%test_nr, 'warmupspeed'))
- WARMUPTIME = int(testconfig.get('test%d'%test_nr, 'warmuptime'))
- run_warmuptest(socks[gensock_index])
+for test_nr in range(1, int(number_of_tests)+1):
+ test=testconfig.get('test%d'%test_nr,'test')
+ log.info(test)
+ if test == 'flowsizetest':
+ get_BinarySearchParams()
+ packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
+ flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
+ run_flow_size_test(socks[gensock_index],socks[sutsock_index])
+ elif test == 'fixed_rate':
+ packet_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'packetsizes'))
+ flow_size_list = ast.literal_eval(testconfig.get('test%d'%test_nr, 'flows'))
+ STARTSPEED = float(testconfig.get('test%d'%test_nr, 'speed'))
+ run_fixed_rate(socks[gensock_index],socks[sutsock_index])
+ elif test == 'corestats':
+ run_core_stats(socks)
+ elif test == 'portstats':
+ run_port_stats(socks)
+ elif test == 'impairtest':
+ get_BinarySearchParams()
+ PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
+ FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
+ run_impairtest(socks[gensock_index],socks[sutsock_index])
+ elif test == 'irqtest':
+ run_irqtest(socks)
+ elif test == 'warmuptest':
+ PACKETSIZE = int(testconfig.get('test%d'%test_nr, 'packetsize'))
+ FLOWSIZE = int(testconfig.get('test%d'%test_nr, 'flowsize'))
+ WARMUPSPEED = int(testconfig.get('test%d'%test_nr, 'warmupspeed'))
+ WARMUPTIME = int(testconfig.get('test%d'%test_nr, 'warmuptime'))
+ run_warmuptest(socks[gensock_index])
####################################################
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test
index d3693f29..f4b4a8d7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test
@@ -20,7 +20,6 @@ number_of_tests = 2
total_number_of_test_machines = 3
prox_socket = true
prox_launch_exit = true
-tasks=[0]
[TestM1]
name = Generator
@@ -34,12 +33,12 @@ latcores = [3]
name = GW1
config_file = secgw1.cfg
dest_vm = 3
-secgwcores = [1]
+cores = [1]
[TestM3]
name = GW2
config_file = secgw2.cfg
-secgwcores = [1]
+cores = [1]
[BinarySearchParams]
drop_rate_threshold = 0.1
@@ -59,5 +58,5 @@ warmuptime=2
test=flowsizetest
packetsizes=[64]
# the number of flows in the list need to be powers of 2, max 2^20
-# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65535, 131072, 262144, 524280, 1048576
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
flows=[512]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg
index 301a138e..0ffafb1b 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg
@@ -43,7 +43,7 @@ name=${name}
[core 0]
mode=master
-[core $secgwcores]
+[core $cores]
name=esp_enc
task=0
mode=esp_enc
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg
index 9c970e83..dacbf08b 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg
@@ -42,7 +42,7 @@ name=${name}
[core 0]
mode=master
-[core $secgwcores]
+[core $cores]
name=esp_dec
task=0
mode=esp_dec
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg
index 04b37c3d..aeaa9db8 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg
@@ -27,7 +27,7 @@ mac=hardware
vlan=yes
[defaults]
-mempool size=8K
+mempool size=2K
[global]
name=${name}
@@ -35,7 +35,7 @@ name=${name}
[core 0]
mode=master
-[core $swapcores]
+[core $cores]
name=swap
task=0
mode=swap