summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/helper-scripts
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX/helper-scripts')
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/README128
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py412
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test56
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py248
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms31
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py574
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test59
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile119
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/README183
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s94
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/centos.json52
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service12
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/config_file8
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg81
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg47
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg)40
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg)39
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg)27
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg)15
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg)31
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg59
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg)19
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg)13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg)14
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg)21
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg50
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg47
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py64
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py53
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh305
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh12
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh97
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml105
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua77
-rw-r--r--[-rwxr-xr-x]VNFs/DPPD-PROX/helper-scripts/rapid/machine.map (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh)29
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml168
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml33
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile42
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build101
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c70
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py293
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml94
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml82
-rw-r--r--[-rwxr-xr-x]VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh)22
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py93
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py90
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py36
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py326
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py181
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore23
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml26
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml36
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml8
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py108
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py106
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py236
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py264
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py140
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py259
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py193
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py83
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key49
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub1
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py164
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py441
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py52
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py56
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py199
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg16
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.py9
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh31
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py177
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/start.sh43
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/README194
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test54
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test61
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test51
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test65
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test73
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test63
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test31
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test70
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test)57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test65
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test37
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test)43
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test60
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test)53
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test32
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test60
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml54
103 files changed, 7460 insertions, 1699 deletions
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
deleted file mode 100644
index 2dac5b69..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
+++ /dev/null
@@ -1,128 +0,0 @@
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-rapid (Rapid Automated Performance Indication for Dataplane)
-************************************************************
-
-rapid is a set of files offering an easy way to do a sanity check of the
-dataplane performance of an OpenStack environment.
-
-Copy the files in a directory on a machine that can run the OpenStack CLI
-commands and that can reach the OpenStack public network. Also create a qcow2
-image in the same directory with the following characteristics:
-* Name of the qcow2 file should be: rapidVM.qcow2
- This default name can be overruled on the rapid command line (--image_file)
-* Should have DPDK and PROX installed. PROX should be in /root/prox/ directory
-* Image should have cloud-init installed
-* /mnt/huge should exist to support a command that is executed at startup of the VM: 'mount -t hugetlbfs nodev /mnt/huge'
-* Compile prox with 'make crc=soft'. This is a workaround for some cases where the crc calculation offload is not working as expected.
-* Compile dpdk to support AESN-NI Multi Buffer Crypto Poll Mode Driver: http://dpdk.org/doc/guides/cryptodevs/aesni_mb.html
-
-Source the openrc file of the OpenStack environment so that the OpenStack CLI
-commands can be run:
- # source openrc
-Now you can run the createrapid.py file. Use help for more info on the usage:
- # ./createrapid.py --help
-
-createrapid.py will use the OpenStack CLI to create the flavor, key-pair, network, image,
-servers, ...
-It will create a <STACK>.env file containing all info that will be used by runrapid.py
-to actually run the tests. Logging can be found in the CREATE<STACK>.log file
-You can use floating IP addresses by specifying the floating IP network
---floating_network NETWORK
-or directly connect throught the INTERNAL_NETWORK by using the following parameter:
---floating_network NO
-
-Now you can run the runrapid.py file. Use help for more info on the usage:
- # ./runrapid.py --help
-The script will connect to all machines that have been instantiated and it will launch
-PROX in all machines. This will be done through the admin IP assigned to the machines.
-Once that is done it will connect to the PROX tcp socket and start sending
-commands to run the actual test.
-It will print test results on the screen while running.
-The actual test that is running is described in <TEST>.test.
-
-Notes about prox_user_data.sh script:
-- The script contains commands that will be executed using cloud-init at
- startup of the VMs.
-- The script also assumes some specific DPDK directory and tools which might
- change over different DPDK release. This release has been tested with DPDK-17.02.
-- huge pages are allocated for DPDK on node 0 (hard-coded) in the VM.
-
-Note on using SRIOV ports:
-Before running createrapid, make sure the network, subnet and ports are already created
-This can be done as follows (change the parameters to your needs):
-openstack network create --share --external --provider-network-type flat --provider-physical-network physnet2 fast-network
-openstack subnet create --network fast-network --subnet-range 20.20.20.0/24 --gateway none fast-subnet
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port1
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port2
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port3
-Make sure to use the network and subnet in the createrapid parameters list. Port1, Port2 and Port3
-are being used in the *.env file.
-
-Note when doing tests using the gateway functionality on OVS:
-When a GW VM is sending packets on behalf of another VM (e.g. the generator), we need to make sure the OVS
-will allow those packets to go through. Therefore you need to the IP address of the generator in the
-"allowed address pairs" of the GW VM.
-
-Note when doing tests using encryption on OVS:
-Your OVS configuration might block encrypted packets. To allow packets to go through,
-you can disable port_security. You can do this by using the following commands
-neutron port-update xxxxxx --no-security-groups
-neutron port-update xxxxxx --port_security_enabled=False
-
-An example of the env file generated by createrapid.py can be found below.
-Note that this file can be created manually in case the stack is created in a
-different way (not using the createrapid.py). This can be useful in case you are
-not using OpenStack as a VIM or when using special configurations that cannot be
-achieved using createrapid.py. Only the [Mx] sections are used as
-input for runrapid.py.
-[DEFAULT]
-admin_ip = none
-
-[M1]
-admin_ip = 192.168.4.130
-dp_ip = 10.10.10.6
-dp_mac = fa:16:3e:3c:1e:12
-
-[M2]
-admin_ip = 192.168.4.140
-dp_ip = 10.10.10.9
-dp_mac = fa:16:3e:2a:00:5d
-
-[M3]
-admin_ip = 192.168.4.138
-dp_ip = 10.10.10.11
-dp_mac = fa:16:3e:ae:fa:86
-
-[OpenStack]
-stack = rapid
-yaml = 3VMrapid.yaml
-key = prox
-flavor = prox_flavor
-image = rapidVM
-image_file = rapidVM.qcow2
-dataplane_network = dataplane-network
-subnet = dpdk-subnet
-subnet_cidr = 10.10.10.0/24
-internal_network = admin_internal_net
-floating_network = admin_floating_net
-
-[rapid]
-loglevel = DEBUG
-version = 17.10.25
-total_number_of_vms = 3
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py
deleted file mode 100755
index ffba5013..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/usr/bin/python
-
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import stat
-import sys
-import time
-import subprocess
-import getopt
-import re
-import logging
-from logging.handlers import RotatingFileHandler
-from logging import handlers
-from prox_ctrl import prox_ctrl
-import ConfigParser
-
-version="18.3.27"
-stack = "rapid" #Default string for stack. This is not an OpenStack Heat stack, just a group of VMs
-vms = "rapidVMs" #Default string for vms file
-key = "prox" # default name for kay
-image = "rapidVM" # default name for the image
-image_file = "rapidVM.qcow2"
-dataplane_network = "dataplane-network" # default name for the dataplane network
-subnet = "dpdk-subnet" #subnet for dataplane
-subnet_cidr="10.10.10.0/24" # cidr for dataplane
-internal_network="admin_internal_net"
-floating_network="admin_floating_net"
-loglevel="DEBUG" # sets log level for writing to file
-runtime=10 # time in seconds for 1 test run
-
-def usage():
- print("usage: createrapid [--version] [-v]")
- print(" [--stack STACK_NAME]")
- print(" [--vms VMS_FILE]")
- print(" [--key KEY_NAME]")
- print(" [--image IMAGE_NAME]")
- print(" [--image_file IMAGE_FILE]")
- print(" [--dataplane_network DP_NETWORK]")
- print(" [--subnet DP_SUBNET]")
- print(" [--subnet_cidr SUBNET_CIDR]")
- print(" [--internal_network ADMIN_NETWORK]")
- print(" [--floating_network ADMIN_NETWORK]")
- print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
- print(" [-h] [--help]")
- print("")
- print("Command-line interface to createrapid")
- print("")
- print("optional arguments:")
- print(" -v, --version Show program's version number and exit")
- print(" --stack STACK_NAME Specify a name for the stack. Default is %s."%stack)
- print(" --vms VMS_FILE Specify the vms file to be used. Default is %s.vms."%vms)
- print(" --key KEY_NAME Specify the key to be used. Default is %s."%key)
- print(" --image IMAGE_NAME Specify the image to be used. Default is %s."%image)
- print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is %s."%image_file)
- print(" --dataplane_network NETWORK Specify the network name to be used for the dataplane. Default is %s."%dataplane_network)
- print(" --subnet DP_SUBNET Specify the subnet name to be used for the dataplane. Default is %s."%subnet)
- print(" --subnet_cidr SUBNET_CIDR Specify the subnet CIDR to be used for the dataplane. Default is %s."%subnet_cidr)
- print(" --internal_network NETWORK Specify the network name to be used for the control plane. Default is %s."%internal_network)
- print(" --floating_network NETWORK Specify the external floating ip network name. Default is %s. NO if no floating ip used."%floating_network)
- print(" --log Specify logging level for log file output, screen output level is hard coded")
- print(" -h, --help Show help message and exit.")
- print("")
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "vms=","stack=","key=","image=","image_file=","dataplane_network=","subnet=","subnet_cidr=","internal_network=","floating_network=","log="])
-except getopt.GetoptError as err:
- print("===========================================")
- print(str(err))
- print("===========================================")
- usage()
- sys.exit(2)
-if args:
- usage()
- sys.exit(2)
-for opt, arg in opts:
- if opt in ("-h", "--help"):
- usage()
- sys.exit()
- if opt in ("-v", "--version"):
- print("Rapid Automated Performance Indication for Dataplane "+version)
- sys.exit()
- if opt in ("--stack"):
- stack = arg
- print ("Using '"+stack+"' as name for the stack")
- elif opt in ("--vms"):
- vms = arg
- print ("Using Virtual Machines Description: "+vms)
- elif opt in ("--key"):
- key = arg
- print ("Using key: "+key)
- elif opt in ("--image"):
- image = arg
- print ("Using image: "+image)
- elif opt in ("--image_file"):
- image_file = arg
- print ("Using qcow2 file: "+image_file)
- elif opt in ("--dataplane_network"):
- dataplane_network = arg
- print ("Using dataplane network: "+ dataplane_network)
- elif opt in ("--subnet"):
- subnet = arg
- print ("Using dataplane subnet: "+ subnet)
- elif opt in ("--subnet_cidr"):
- subnet_cidr = arg
- print ("Using dataplane subnet: "+ subnet_cidr)
- elif opt in ("--internal_network"):
- internal_network = arg
- print ("Using control plane network: "+ internal_network)
- elif opt in ("--floating_network"):
- floating_network = arg
- print ("Using floating ip network: "+ floating_network)
- elif opt in ("--log"):
- loglevel = arg
- print ("Log level: "+ loglevel)
-
-
-# create formatters
-screen_formatter = logging.Formatter("%(message)s")
-file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-
-# get a top-level logger,
-# set its log level,
-# BUT PREVENT IT from propagating messages to the root logger
-#
-log = logging.getLogger()
-numeric_level = getattr(logging, loglevel.upper(), None)
-if not isinstance(numeric_level, int):
- raise ValueError('Invalid log level: %s' % loglevel)
-log.setLevel(numeric_level)
-log.propagate = 0
-
-# create a console handler
-# and set its log level to the command-line option
-#
-console_handler = logging.StreamHandler(sys.stdout)
-console_handler.setLevel(logging.INFO)
-console_handler.setFormatter(screen_formatter)
-
-# create a file handler
-# and set its log level to DEBUG
-#
-log_file = 'CREATE' +stack +'.log'
-file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
-#file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
-file_handler.setLevel(numeric_level)
-file_handler.setFormatter(file_formatter)
-
-# add handlers to the logger
-#
-log.addHandler(file_handler)
-log.addHandler(console_handler)
-
-# Check if log exists and should therefore be rolled
-needRoll = os.path.isfile(log_file)
-
-
-# This is a stale log, so roll it
-if needRoll:
- # Add timestamp
- log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
-
- # Roll over on application start
- log.handlers[0].doRollover()
-
-# Add timestamp
-log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
-
-log.debug("createrapid.py version: "+version)
-# Checking if the control network already exists, if not, stop the script
-log.debug("Checking control plane network: "+internal_network)
-cmd = 'openstack network show '+internal_network
-log.debug (cmd)
-cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
-NetworkExist = subprocess.check_output(cmd , shell=True).strip()
-if NetworkExist == 'ACTIVE':
- log.info("Control plane network ("+internal_network+") already active")
-else:
- log.exception("Control plane network " + internal_network + " not existing")
- raise Exception("Control plane network " + internal_network + " not existing")
-
-# Checking if the floating ip network already exists, if not, stop the script
-if floating_network <>'NO':
- log.debug("Checking floating ip network: "+floating_network)
- cmd = 'openstack network show '+floating_network
- log.debug (cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- NetworkExist = subprocess.check_output(cmd , shell=True).strip()
- if NetworkExist == 'ACTIVE':
- log.info("Floating ip network ("+floating_network+") already active")
- else:
- log.exception("Floating ip network " + floating_network + " not existing")
- raise Exception("Floating ip network " + floating_network + " not existing")
-
-# Checking if the image already exists, if not create it
-log.debug("Checking image: "+image)
-cmd = 'openstack image show '+image
-log.debug(cmd)
-cmd = cmd +' |grep "status " | tr -s " " | cut -d" " -f 4'
-ImageExist = subprocess.check_output(cmd , shell=True).strip()
-if ImageExist == 'active':
- log.info("Image ("+image+") already available")
-else:
- log.info('Creating image ...')
- cmd = 'openstack image create --disk-format qcow2 --container-format bare --public --file ./'+image_file+ ' ' +image
- log.debug(cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- ImageExist = subprocess.check_output(cmd , shell=True).strip()
- if ImageExist == 'active':
- log.info('Image created and active')
- cmd = 'openstack image set --property hw_vif_multiqueue_enabled="true" ' +image
-# subprocess.check_call(cmd , shell=True)
- else :
- log.exception("Failed to create image")
- raise Exception("Failed to create image")
-
-# Checking if the key already exists, if not create it
-log.debug("Checking key: "+key)
-cmd = 'openstack keypair show '+key
-log.debug (cmd)
-cmd = cmd + ' |grep "name " | tr -s " " | cut -d" " -f 4'
-KeyExist = subprocess.check_output(cmd , shell=True).strip()
-if KeyExist == key:
- log.info("Key ("+key+") already installed")
-else:
- log.info('Creating key ...')
- cmd = 'openstack keypair create '+ key + '>' +key+'.pem'
- log.debug(cmd)
- subprocess.check_call(cmd , shell=True)
- cmd = 'chmod 600 ' +key+'.pem'
- subprocess.check_call(cmd , shell=True)
- cmd = 'openstack keypair show '+key
- log.debug(cmd)
- cmd = cmd + ' |grep "name " | tr -s " " | cut -d" " -f 4'
- KeyExist = subprocess.check_output(cmd , shell=True).strip()
- if KeyExist == key:
- log.info("Key created")
- else :
- log.exception("Failed to create key: " + key)
- raise Exception("Failed to create key: " + key)
-
-
-# Checking if the dataplane network already exists, if not create it
-log.debug("Checking dataplane network: "+dataplane_network)
-cmd = 'openstack network show '+dataplane_network
-log.debug (cmd)
-cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
-NetworkExist = subprocess.check_output(cmd , shell=True).strip()
-if NetworkExist == 'ACTIVE':
- log.info("Dataplane network ("+dataplane_network+") already active")
-else:
- log.info('Creating dataplane network ...')
- cmd = 'openstack network create '+dataplane_network
- log.debug(cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- NetworkExist = subprocess.check_output(cmd , shell=True).strip()
- if NetworkExist == 'ACTIVE':
- log.info("Dataplane network created")
- else :
- log.exception("Failed to create dataplane network: " + dataplane_network)
- raise Exception("Failed to create dataplane network: " + dataplane_network)
-
-# Checking if the dataplane subnet already exists, if not create it
-log.debug("Checking subnet: "+subnet)
-cmd = 'openstack subnet show '+ subnet
-log.debug (cmd)
-cmd = cmd +' |grep "name " | tr -s " " | cut -d"|" -f 3'
-SubnetExist = subprocess.check_output(cmd , shell=True).strip()
-if SubnetExist == subnet:
- log.info("Subnet (" +subnet+ ") already exists")
-else:
- log.info('Creating subnet ...')
- cmd = 'openstack subnet create --network ' + dataplane_network + ' --subnet-range ' + subnet_cidr +' --gateway none ' + subnet
- log.debug(cmd)
- cmd = cmd + ' |grep "name " | tr -s " " | cut -d"|" -f 3'
- SubnetExist = subprocess.check_output(cmd , shell=True).strip()
- if SubnetExist == subnet:
- log.info("Subnet created")
- else :
- log.exception("Failed to create subnet: " + subnet)
- raise Exception("Failed to create subnet: " + subnet)
-
-
-config = ConfigParser.RawConfigParser()
-vmconfig = ConfigParser.RawConfigParser()
-vmconfig.read(vms+'.vms')
-total_number_of_VMs = vmconfig.get('DEFAULT', 'total_number_of_vms')
-for vm in range(1, int(total_number_of_VMs)+1):
- flavor_info = vmconfig.get('VM%d'%vm, 'flavor_info')
- flavor_meta_data = vmconfig.get('VM%d'%vm, 'flavor_meta_data')
- boot_info = vmconfig.get('VM%d'%vm, 'boot_info')
- SRIOV_port = vmconfig.get('VM%d'%vm, 'SRIOV_port')
- server_name = '%s-VM%d'%(stack,vm)
- flavor_name = '%s-VM%d-flavor'%(stack,vm)
- log.debug("Checking server: "+server_name)
- cmd = 'openstack server show '+server_name
- log.debug (cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- ServerExist = subprocess.check_output(cmd , shell=True).strip()
- if ServerExist == server_name:
- log.info("Server ("+server_name+") already active")
- else:
- # Checking if the flavor already exists, if not create it
- log.debug("Checking flavor: "+flavor_name)
- cmd = 'openstack flavor show '+flavor_name
- log.debug (cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- FlavorExist = subprocess.check_output(cmd , shell=True).strip()
- if FlavorExist == flavor_name:
- log.info("Flavor ("+flavor_name+") already installed")
- else:
- log.info('Creating flavor ...')
- cmd = 'openstack flavor create %s %s'%(flavor_name,flavor_info)
- log.debug(cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- FlavorExist = subprocess.check_output(cmd , shell=True).strip()
- if FlavorExist == flavor_name:
- cmd = 'openstack flavor set %s %s'%(flavor_name, flavor_meta_data)
- log.debug(cmd)
- subprocess.check_call(cmd , shell=True)
- log.info("Flavor created")
- else :
- log.exception("Failed to create flavor: " + flavor_name)
- raise Exception("Failed to create flavor: " + flavor_name)
- if SRIOV_port == 'NO':
- nic_info = '--nic net-id=%s --nic net-id=%s'%(internal_network,dataplane_network)
- else:
- nic_info = '--nic net-id=%s'%(internal_network)
- for port in SRIOV_port.split(','):
- nic_info = nic_info + ' --nic port-id=%s'%(port)
- if vm==int(total_number_of_VMs):
- # For the last server, we want to wait for the server creation to complete, so the next operations will succeeed (e.g. IP allocation)
- # Note that this waiting is not bullet proof. Imagine, we loop through all the VMs, and the last VM was already running, while the previous
- # VMs still needed to be created. Or the previous server creations take much longer than the last one.
- # In that case, we might be to fast when we query for the IP & MAC addresses.
- wait = ' --wait '
- else:
- wait = ' '
- log.info("Creating server...")
- cmd = 'openstack server create --flavor %s --key-name %s --image %s %s %s%s%s'%(flavor_name,key,image,nic_info,boot_info,wait,server_name)
- log.debug(cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- ServerExist = subprocess.check_output(cmd , shell=True).strip()
- if floating_network <> 'NO':
- log.info('Creating floating IP ...')
- cmd = 'openstack floating ip create ' + floating_network
- log.debug(cmd)
- cmd = cmd + ' |grep "floating_ip_address " | tr -s " " | cut -d"|" -f 3'
- vmAdminIP = subprocess.check_output(cmd , shell=True).strip()
- log.info('Associating floating IP ...')
- cmd = 'openstack server add floating ip %s %s'%(server_name,vmAdminIP)
- log.debug(cmd)
- output = subprocess.check_output(cmd , shell=True).strip()
- print (output)
-for vm in range(1, int(total_number_of_VMs)+1):
- server_name = '%s-VM%d'%(stack,vm)
- cmd = 'openstack server show %s'%(server_name)
- log.debug(cmd)
- output = subprocess.check_output(cmd , shell=True).strip()
- searchString = '.*%s.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)' %(dataplane_network)
- matchObj = re.search(searchString, output, re.DOTALL)
- vmDPIP = matchObj.group(1)
- searchString = '.*%s=([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+),*\s*([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)*' %(internal_network)
- matchObj = re.search(searchString, output, re.DOTALL)
- vmAdminIP = matchObj.group(2)
- if vmAdminIP == None:
- vmAdminIP = matchObj.group(1)
- cmd = 'openstack port list |grep %s | tr -s " " | cut -d"|" -f 4'%(vmDPIP)
- log.debug(cmd)
- vmDPmac = subprocess.check_output(cmd , shell=True).strip()
- config.add_section('M%d'%vm)
- config.set('M%d'%vm, 'name', server_name)
- config.set('M%d'%vm, 'admin_ip', vmAdminIP)
- config.set('M%d'%vm, 'dp_ip', vmDPIP)
- config.set('M%d'%vm, 'dp_mac', vmDPmac)
- log.info('%s: (admin IP: %s), (dataplane IP: %s), (dataplane MAC: %s)' % (server_name,vmAdminIP,vmDPIP,vmDPmac))
-
-config.add_section('OpenStack')
-config.set('OpenStack', 'stack', stack)
-config.set('OpenStack', 'VMs', vms)
-config.set('OpenStack', 'key', key)
-config.set('OpenStack', 'image', image)
-config.set('OpenStack', 'image_file', image_file)
-config.set('OpenStack', 'dataplane_network', dataplane_network)
-config.set('OpenStack', 'subnet', subnet)
-config.set('OpenStack', 'subnet_cidr', subnet_cidr)
-config.set('OpenStack', 'internal_network', internal_network)
-config.set('OpenStack', 'floating_network', floating_network)
-config.add_section('rapid')
-config.set('rapid', 'loglevel', loglevel)
-config.set('rapid', 'version', version)
-config.set('rapid', 'total_number_of_machines', total_number_of_VMs)
-config.set('DEFAULT', 'admin_ip', 'none')
-# Writing the environment file
-with open(stack+'.env', 'wb') as envfile:
- config.write(envfile)
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test
deleted file mode 100644
index 3ad014d5..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test
+++ /dev/null
@@ -1,56 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-[DEFAULT]
-name = BasicSwapTesting
-number_of_tests = 2
-total_number_of_test_machines = 2
-init_code= not_used
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 1
-accuracy = 0.01
-
-[TestM1]
-name = InterruptTesting
-machine_index = 1
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-[TestM2]
-name = InterruptTesting
-machine_index = 2
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-[TestM3]
-name = InterruptTesting
-machine_index = 3
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-
-[test1]
-cmd=run_irqtest(sock[0])
-[test2]
-cmd=run_irqtest(sock[1])
-[test3]
-cmd=run_irqtest(sock[2])
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
deleted file mode 100644
index 059cbf71..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
+++ /dev/null
@@ -1,248 +0,0 @@
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import subprocess
-import socket
-
-class prox_ctrl(object):
- def __init__(self, ip, key=None, user=None):
- self._ip = ip
- self._key = key
- self._user = user
- self._children = []
- self._proxsock = []
-
- def ip(self):
- return self._ip
-
- def connect(self):
- """Simply try to run 'true' over ssh on remote system.
- On failure, raise RuntimeWarning exception when possibly worth
- retrying, and raise RuntimeError exception otherwise.
- """
- return self.run_cmd('true', True)
-
- def close(self):
- """Must be called before program termination."""
- for prox in self._proxsock:
- prox.quit()
- children = len(self._children)
- if children == 0:
- return
- if children > 1:
- print('Waiting for %d child processes to complete ...' % children)
- for child in self._children:
- ret = os.waitpid(child[0], os.WNOHANG)
- if ret[0] == 0:
- print("Waiting for child process '%s' to complete ..." % child[1])
- ret = os.waitpid(child[0], 0)
- rc = ret[1]
- if os.WIFEXITED(rc):
- if os.WEXITSTATUS(rc) == 0:
- print("Child process '%s' completed successfully" % child[1])
- else:
- print("Child process '%s' returned exit status %d" % (
- child[1], os.WEXITSTATUS(rc)))
- elif os.WIFSIGNALED(rc):
- print("Child process '%s' exited on signal %d" % (
- child[1], os.WTERMSIG(rc)))
- else:
- print("Wait status for child process '%s' is 0x%04x" % (
- child[1], rc))
-
- def run_cmd(self, command, _connect=False):
- """Execute command over ssh on remote system.
- Wait for remote command completion.
- Return command output (combined stdout and stderr).
- _connect argument is reserved for connect() method.
- """
- cmd = self._build_ssh(command)
- try:
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- if _connect and ex.returncode == 255:
- raise RuntimeWarning(ex.output.strip())
- raise RuntimeError('ssh returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
-
- def fork_cmd(self, command, name=None):
- """Execute command over ssh on remote system, in a child process.
- Do not wait for remote command completion.
- Return child process id.
- """
- if name is None:
- name = command
- cmd = self._build_ssh(command)
- pid = os.fork()
- if (pid != 0):
- # In the parent process
- self._children.append((pid, name))
- return pid
- # In the child process: use os._exit to terminate
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError("Child process '%s' failed:\n"
- 'ssh returned exit status %d:\n%s'
- % (name, ex.returncode, ex.output.strip()))
- os._exit(0)
-
- def prox_sock(self, port=8474):
- """Connect to the PROX instance on remote system.
- Return a prox_sock object on success, None on failure.
- """
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- sock.connect((self._ip, port))
- prox = prox_sock(sock)
- self._proxsock.append(prox)
- return prox
- except:
- return None
-
- def scp_put(self, src, dst):
- """Copy src file from local system to dst on remote system."""
- cmd = [ 'scp',
- '-B',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- cmd.append(src)
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip + ':' + dst
- cmd.append(remote)
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError('scp returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
-
- def _build_ssh(self, command):
- cmd = [ 'ssh',
- '-oBatchMode=yes',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip
- cmd.append(remote)
- cmd.append(command)
- return cmd
-
-class prox_sock(object):
- def __init__(self, sock):
- self._sock = sock
- self._rcvd = b''
-
- def quit(self):
- if self._sock is not None:
- self._send('quit')
- self._sock.close()
- self._sock = None
-
- def start(self, cores):
- self._send('start %s' % ','.join(map(str, cores)))
-
- def stop(self, cores):
- self._send('stop %s' % ','.join(map(str, cores)))
-
- def speed(self, speed, cores, tasks=None):
- if tasks is None:
- tasks = [ 0 ] * len(cores)
- elif len(tasks) != len(cores):
- raise ValueError('cores and tasks must have the same len')
- for (core, task) in zip(cores, tasks):
- self._send('speed %s %s %s' % (core, task, speed))
-
- def reset_stats(self):
- self._send('reset stats')
-
- def lat_stats(self, cores, task=0):
- min_lat = 999999999
- max_lat = avg_lat = 0
- self._send('lat stats %s %s' % (','.join(map(str, cores)), task))
- for core in cores:
- stats = self._recv().split(',')
- min_lat = min(int(stats[0]),min_lat)
- max_lat = max(int(stats[1]),max_lat)
- avg_lat += int(stats[2])
- avg_lat = avg_lat/len(cores)
- return min_lat, max_lat, avg_lat
-
- def irq_stats(self, core, bucket, task=0):
- self._send('stats task.core(%s).task(%s).irq(%s)' % (core, task, bucket))
- stats = self._recv().split(',')
- return int(stats[0])
-
- def show_irq_buckets(self, core, task=0):
- rx = tx = drop = tsc = hz = 0
- self._send('show irq buckets %s %s' % (core,task))
- buckets = self._recv().split(';')
- buckets = buckets[:-1]
- return buckets
-
- def core_stats(self, cores, task=0):
- rx = tx = drop = tsc = hz = 0
- self._send('core stats %s %s' % (','.join(map(str, cores)), task))
- for core in cores:
- stats = self._recv().split(',')
- rx += int(stats[0])
- tx += int(stats[1])
- drop += int(stats[2])
- tsc = int(stats[3])
- hz = int(stats[4])
- return rx, tx, drop, tsc, hz
-
- def set_random(self, cores, task, offset, mask, length):
- self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length))
-
- def set_size(self, cores, task, pkt_size):
- self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task, pkt_size))
-
- def set_value(self, cores, task, offset, value, length):
- self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, value, length))
-
- def _send(self, cmd):
- """Append LF and send command to the PROX instance."""
- if self._sock is None:
- raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
- self._sock.sendall(cmd.encode() + b'\n')
-
- def _recv(self):
- """Receive response from PROX instance, and return it with LF removed."""
- if self._sock is None:
- raise RuntimeError("PROX socket closed, cannot receive anymore")
- pos = self._rcvd.find(b'\n')
- while pos == -1:
- self._rcvd += self._sock.recv(256)
- pos = self._rcvd.find(b'\n')
- rsp = self._rcvd[:pos]
- self._rcvd = self._rcvd[pos+1:]
- return rsp.decode()
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms
deleted file mode 100644
index cf7b2c8d..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms
+++ /dev/null
@@ -1,31 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-
-[DEFAULT]
-total_number_of_vms=3
-flavor_info=--ram 4096 --disk 20 --vcpus 4
-;flavor_meta_data=--property hw:mem_page_size=large --property hw:cpu_policy=dedicated --property hw:cpu_thread_policy=isolate --property hw:numa_nodes=1 --property hw:numa_cpus.0=0,1,2,3 --property hw:numa_mempolicy=strict --property hw:numa_mem.0=4096
-flavor_meta_data=--property hw:mem_page_size=large --property hw:cpu_policy=dedicated --property hw:cpu_thread_policy=isolate
-boot_info=--availability-zone nova --user-data prox_user_data.sh --security-group default
-SRIOV_port=NO
-
-[VM1]
-
-[VM2]
-
-[VM3]
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py
deleted file mode 100755
index 0f523cc0..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py
+++ /dev/null
@@ -1,574 +0,0 @@
-#!/usr/bin/python
-
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import stat
-import sys
-import time
-import subprocess
-import getopt
-import re
-import logging
-from logging.handlers import RotatingFileHandler
-from logging import handlers
-from prox_ctrl import prox_ctrl
-import ConfigParser
-import ast
-
-version="18.3.27"
-env = "rapid" #Default string for environment
-test = "basicrapid" #Default string for test
-loglevel="DEBUG" # sets log level for writing to file
-runtime=10 # time in seconds for 1 test run
-configonly = False # IF True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
-
-def usage():
- print("usage: runrapid [--version] [-v]")
- print(" [--env ENVIRONMENT_NAME]")
- print(" [--test TEST_NAME]")
- print(" [--runtime TIME_FOR_TEST]")
- print(" [--configonly False|True]")
- print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
- print(" [-h] [--help]")
- print("")
- print("Command-line interface to runrapid")
- print("")
- print("optional arguments:")
- print(" -v, --version Show program's version number and exit")
- print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME.env Default is %s."%env)
- print(" --test TEST_NAME Test cases will be read from TEST_NAME.test Default is %s."%test)
- print(" --runtime Specify time in seconds for 1 test run")
- print(" --configonly If True, only upload all config files to the VMs, do not run the tests. Default is %s."%configonly)
- print(" --log Specify logging level for log file output, screen output level is hard coded")
- print(" -h, --help Show help message and exit.")
- print("")
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=","runtime=","configonly=","log="])
-except getopt.GetoptError as err:
- print("===========================================")
- print(str(err))
- print("===========================================")
- usage()
- sys.exit(2)
-if args:
- usage()
- sys.exit(2)
-for opt, arg in opts:
- if opt in ("-h", "--help"):
- usage()
- sys.exit()
- if opt in ("-v", "--version"):
- print("Rapid Automated Performance Indication for Dataplane "+version)
- sys.exit()
- if opt in ("--env"):
- env = arg
- print ("Using '"+env+"' as name for the environment")
- if opt in ("--test"):
- test = arg
- print ("Using '"+test+".test' for test case definition")
- if opt in ("--runtime"):
- runtime = arg
- print ("Runtime: "+ runtime)
- if opt in ("--configonly"):
- configonly = arg
- print ("configonly: "+ configonly)
- if opt in ("--log"):
- loglevel = arg
- print ("Log level: "+ loglevel)
-
-
-# create formatters
-screen_formatter = logging.Formatter("%(message)s")
-file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-
-# get a top-level logger,
-# set its log level,
-# BUT PREVENT IT from propagating messages to the root logger
-#
-log = logging.getLogger()
-numeric_level = getattr(logging, loglevel.upper(), None)
-if not isinstance(numeric_level, int):
- raise ValueError('Invalid log level: %s' % loglevel)
-log.setLevel(numeric_level)
-log.propagate = 0
-
-# create a console handler
-# and set its log level to the command-line option
-#
-console_handler = logging.StreamHandler(sys.stdout)
-console_handler.setLevel(logging.INFO)
-console_handler.setFormatter(screen_formatter)
-
-# create a file handler
-# and set its log level to DEBUG
-#
-log_file = 'RUN' +env+'.'+test+'.log'
-file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
-#file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
-file_handler.setLevel(numeric_level)
-file_handler.setFormatter(file_formatter)
-
-# add handlers to the logger
-#
-log.addHandler(file_handler)
-log.addHandler(console_handler)
-
-# Check if log exists and should therefore be rolled
-needRoll = os.path.isfile(log_file)
-
-
-# This is a stale log, so roll it
-if needRoll:
- # Add timestamp
- log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
-
- # Roll over on application start
- log.handlers[0].doRollover()
-
-# Add timestamp
-log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
-
-log.debug("runrapid.py version: "+version)
-#========================================================================
-def connect_socket(client):
- attempts = 1
- log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
- sock = None
- while True:
- sock = client.prox_sock()
- if sock is not None:
- break
- attempts += 1
- if attempts > 20:
- log.exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
- raise Exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
- time.sleep(2)
- log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
- log.info("Connected to PROX on %s" % client.ip())
- return sock
-
-def connect_client(client):
- attempts = 1
- log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
- while True:
- try:
- client.connect()
- break
- except RuntimeWarning, ex:
- attempts += 1
- if attempts > 20:
- log.exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
- raise Exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
- time.sleep(2)
- log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
- log.debug("Connected to VM on %s" % client.ip())
-
-def run_iteration(gensock,sutsock):
- sleep_time = 2
- # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
- time.sleep(sleep_time)
- abs_old_rx, abs_old_tx, abs_old_drop, abs_old_tsc, abs_tsc_hz = gensock.core_stats(genstatcores)
- gensock.start(gencores)
- time.sleep(sleep_time)
- if sutsock!='none':
- old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores)
- old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(genstatcores)
- time.sleep(float(runtime))
- lat_min, lat_max, lat_avg = gensock.lat_stats(latcores)
- # Get statistics after some execution time
- new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(genstatcores)
- if sutsock!='none':
- new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores)
- #Stop generating
- gensock.stop(gencores)
- time.sleep(sleep_time)
- abs_new_rx, abs_new_tx, abs_new_drop, abs_new_tsc, abs_tsc_hz = gensock.core_stats(genstatcores)
- drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
- rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
- tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
- abs_dropped = (abs_new_tx - abs_old_tx) - (abs_new_rx - abs_old_rx)
- tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
- pps_req_tx = (tx+drop-rx)*tsc_hz*1.0/(tsc*1000000)
- pps_tx = tx*tsc_hz*1.0/(tsc*1000000)
- pps_rx = rx*tsc_hz*1.0/(tsc*1000000)
- if sutsock!='none':
- sut_rx = new_sut_rx - old_sut_rx
- sut_tx = new_sut_tx - old_sut_tx
- sut_tsc = new_sut_tsc - old_sut_tsc
- pps_sut_tx = sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000)
- pps_sut_tx_str = '{:>9.3f}'.format(pps_sut_tx)
- else:
- pps_sut_tx = 0
- pps_sut_tx_str = 'NO MEAS.'
- if (tx == 0):
- log.critical("TX = 0. Test interrupted since no packet has been sent.")
- raise Exception("TX = 0")
- return(pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max,abs_dropped,(abs_new_tx - abs_old_tx))
-
-def new_speed(speed,minspeed,maxspeed,success):
- # Following calculates the ratio for the new speed to be applied
- # On the Y axis, we will find the ratio, a number between 0 and 1
- # On the x axis, we find the % of dropped packets, a number between 0 and 100
- # 2 lines are drawn and we take the minumun of these lines to calculate the ratio
- # One line goes through (0,y0) and (p,q)
- # The second line goes through (p,q) and (100,y100)
-# y0=0.99
-# y100=0.1
-# p=1
-# q=.99
-# ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100))
-# return (int(speed*ratio*100)+0.5)/100.0
- if success:
- minspeed = speed
- else:
- maxspeed = speed
- newspeed = (maxspeed+minspeed)/2.0
- return (newspeed,minspeed,maxspeed)
-
-def get_pps(speed,size):
- return (speed * 100.0 / (8*(size+24)))
-
-def run_speedtest(gensock,sutsock):
- log.info("+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio | Result |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- maxspeed = speed = 100
- minspeed = 0
- size=60
- attempts = 0
- endpps_sut_tx_str = 'NO_RESULTS'
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- while (maxspeed-minspeed > ACCURACY):
- attempts += 1
- print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- time.sleep(1)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% | SUCCESS |')
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% | FAILED |')
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- log.info('|{:>7}'.format('END')+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+'{:>14d}'.format(endabs_dropped)+ ' |''{:>9.2f}'.format(enddrop_rate)+ '% | SUCCESS |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- else:
- log.info('| Speed 0 or close to 0')
-
-def run_flowtest(gensock,sutsock):
- log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- log.info("| Flows | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- size=60
- # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable.
- flows={128:['1000000000000XXX','100000000000XXXX'],1024:['10000000000XXXXX','10000000000XXXXX'],8192:['1000000000XXXXXX','100000000XXXXXXX'],65535:['10000000XXXXXXXX','10000000XXXXXXXX'],524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']}
-# flows={524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']}
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- for flow_number in sorted(flows.iterkeys()):
- #speed = 100 Commented out: Not starting from 100% since we are trying more flows, so speed will not be higher than the speed achieved in previous loop
- gensock.reset_stats()
- if sutsock!='none':
- sutsock.reset_stats()
- source_port,destination_port = flows[flow_number]
- gensock.set_random(gencores,0,34,source_port,2)
- gensock.set_random(gencores,0,36,destination_port,2)
- endpps_sut_tx_str = 'NO_RESULTS'
- maxspeed = speed = 100
- minspeed = 0
- while (maxspeed-minspeed > ACCURACY):
- print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- time.sleep(1)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info('|{:>7}'.format(str(flow_number))+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- else:
- log.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
-
-def run_sizetest(gensock,sutsock):
- log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| UDP, 1 flow, different packet sizes |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- log.info("| Pktsize| Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- # PROX will use different packet sizes as defined in sizes[]
-# sizes=[1496,1020,508,252,124,60]
- sizes=[1020,508,252,124,60]
- for size in sizes:
- #speed = 100 Commented out: Not starting from 100% since we are trying smaller packets, so speed will not be higher than the speed achieved in previous loop
- gensock.reset_stats()
- if sutsock!='none':
- sutsock.reset_stats()
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- endpps_sut_tx_str = 'NO_RESULTS'
- maxspeed = speed = 100
- minspeed = 0
- while (maxspeed-minspeed > ACCURACY):
- print(str(size+4)+' bytes: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info('|{:>7}'.format(size+4)+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+'{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- else:
- log.debug('|{:>7}'.format(str(size))+" | Speed 0 or close to 0")
-
-
-def run_irqtest(sock):
- log.info("+----------------------------------------------------------------------------------------------------------------------------")
- log.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
- log.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
- log.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
- log.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
- log.info("| the duration of the test. This is to avoid rounding errors in the case of 0.0 ")
- log.info("+----------------------------------------------------------------------------------------------------------------------------")
- sys.stdout.flush()
- buckets=sock.show_irq_buckets(1)
- print('Measurement ongoing ... ',end='\r')
- sock.stop(irqcores)
- old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq[0][0] = 'bucket us'
- for j,bucket in enumerate(buckets,start=1):
- irq[0][j] = '<'+ bucket
- irq[0][-1] = '>'+ buckets [-2]
- for j,bucket in enumerate(buckets,start=1):
- for i,irqcore in enumerate(irqcores,start=1):
- old_irq[i][j] = sock.irq_stats(irqcore,j-1)
- sock.start(irqcores)
- time.sleep(float(runtime))
- sock.stop(irqcores)
- for i,irqcore in enumerate(irqcores,start=1):
- irq[i][0]='core %s '%irqcore
- for j,bucket in enumerate(buckets,start=1):
- diff = sock.irq_stats(irqcore,j-1) - old_irq[i][j]
- if diff == 0:
- irq[i][j] = '0'
- else:
- irq[i][j] = diff/float(runtime)
- log.info('\n'.join([''.join(['{:>12}'.format(item) for item in row]) for row in irq]))
-
-
-def init_test():
-# Running at low speed to make sure the ARP messages can get through.
-# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
-# Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
-# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
- sock[0].speed(0.01, gencores)
- sock[0].start(genstatcores)
- time.sleep(2)
- sock[0].stop(gencores)
-
-global sutstatcores
-global genstatcores
-global latcores
-global gencores
-global irqcores
-global DROP_RATE_TRESHOLD
-global ACCURACY
-vmDPIP =[]
-vmAdminIP =[]
-vmDPmac =[]
-hexDPIP =[]
-config_file =[]
-script_control =[]
-
-testconfig = ConfigParser.RawConfigParser()
-testconfig.read(test+'.test')
-required_number_of_test_machines = testconfig.get('DEFAULT', 'total_number_of_test_machines')
-DROP_RATE_TRESHOLD = float(testconfig.get('DEFAULT', 'drop_rate_treshold'))
-ACCURACY = float(testconfig.get('DEFAULT', 'accuracy'))
-config = ConfigParser.RawConfigParser()
-config.read(env+'.env')
-key = config.get('OpenStack', 'key')
-total_number_of_machines = config.get('rapid', 'total_number_of_machines')
-if int(required_number_of_test_machines) > int(total_number_of_machines):
- log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
- raise Exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
-for vm in range(1, int(total_number_of_machines)+1):
- vmAdminIP.append(config.get('M%d'%vm, 'admin_ip'))
- vmDPmac.append(config.get('M%d'%vm, 'dp_mac'))
- vmDPIP.append(config.get('M%d'%vm, 'dp_ip'))
- ip = vmDPIP[-1].split('.')
- hexDPIP.append(hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2))
-machine_index = []
-for vm in range(1, int(required_number_of_test_machines)+1):
- machine_index.append(int(testconfig.get('TestM%d'%vm, 'machine_index'))-1)
-for vm in range(1, int(required_number_of_test_machines)+1):
- config_file.append(testconfig.get('TestM%d'%vm, 'config_file'))
- script_control.append(testconfig.get('TestM%d'%vm, 'script_control'))
- group1cores=testconfig.get('TestM%d'%vm, 'group1cores')
- if group1cores <> 'not_used':
- group1cores=ast.literal_eval(group1cores)
- group2cores=testconfig.get('TestM%d'%vm, 'group2cores')
- if group2cores <> 'not_used':
- group2cores=ast.literal_eval(group2cores)
- group3cores=testconfig.get('TestM%d'%vm, 'group3cores')
- if group3cores <> 'not_used':
- group3cores=ast.literal_eval(group3cores)
- with open("parameters%d.lua"%vm, "w") as f:
- f.write('name="%s"\n'% testconfig.get('TestM%d'%vm, 'name'))
- f.write('local_ip="%s"\n'% vmDPIP[machine_index[vm-1]])
- f.write('local_hex_ip="%s"\n'% hexDPIP[machine_index[vm-1]])
- gwVM = testconfig.get('TestM%d'%vm, 'gw_vm')
- if gwVM <> 'not_used':
- gwVMindex = int(gwVM)-1
- f.write('gw_ip="%s"\n'% vmDPIP[machine_index[gwVMindex]])
- f.write('gw_hex_ip="%s"\n'% hexDPIP[machine_index[gwVMindex]])
- destVM = testconfig.get('TestM%d'%vm, 'dest_vm')
- if destVM <> 'not_used':
- destVMindex = int(destVM)-1
- f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
- f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
- f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
- if group1cores <> 'not_used':
- f.write('group1="%s"\n'% ','.join(map(str, group1cores)))
- if group2cores <> 'not_used':
- f.write('group2="%s"\n'% ','.join(map(str, group2cores)))
- if group3cores <> 'not_used':
- f.write('group3="%s"\n'% ','.join(map(str, group3cores)))
- if re.match('(l2){0,1}gen.*\.cfg',config_file[-1]):
- gencores = group1cores
- latcores = group2cores
- genstatcores = group3cores
- elif config_file[-1] == 'gen_gw.cfg':
- gencores = group1cores
- latcores = group2cores
- genstatcores = group3cores
- elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
- sutstatcores = group1cores
- elif config_file[-1] == 'secgw2.cfg':
- sutstatcores = group1cores
- elif config_file[-1] == 'irq.cfg':
- irqcores = group1cores
- f.close
-#####################################################################################
-client =[]
-sock =[]
-
-for vm in range(0, int(required_number_of_test_machines)):
- client.append(prox_ctrl(vmAdminIP[machine_index[vm]], key+'.pem','root'))
- connect_client(client[-1])
-# Creating script to bind the right network interface to the poll mode driver
- devbindfile = "devbindvm%d.sh"%(vm+1)
- with open("devbind.sh") as f:
- newText=f.read().replace('MACADDRESS', vmDPmac[machine_index[vm]])
- with open(devbindfile, "w") as f:
- f.write(newText)
- st = os.stat(devbindfile)
- os.chmod(devbindfile, st.st_mode | stat.S_IEXEC)
- client[-1].scp_put('./%s'%devbindfile, '/root/devbind.sh')
- cmd = '/root/devbind.sh'
- client[-1].run_cmd(cmd)
- log.debug("devbind.sh running on VM%d"%(vm+1))
- client[-1].scp_put('./%s'%config_file[vm], '/root/%s'%config_file[vm])
- client[-1].scp_put('./parameters%d.lua'%(vm+1), '/root/parameters.lua')
- log.debug("Starting PROX on VM%d"%(vm+1))
- if script_control[vm] == 'true':
- cmd = '/root/prox/build/prox -e -t -o cli -f /root/%s'%config_file[vm]
- else:
- cmd = '/root/prox/build/prox -t -o cli -f /root/%s'%config_file[vm]
- if configonly == False:
- client[-1].fork_cmd(cmd, 'PROX Testing on TestM%d'%(vm+1))
- sock.append(connect_socket(client[-1]))
-if configonly:
- sys.exit()
-init_code = testconfig.get('DEFAULT', 'init_code')
-if init_code <> 'not_used':
- eval(init_code)
-####################################################
-# Run test cases
-# Best to run the flow test at the end since otherwise the tests coming after thatmight be influenced by the big number of entries in the switch flow tables
-####################################################
-number_of_tests = testconfig.get('DEFAULT', 'number_of_tests')
-for vm in range(1, int(number_of_tests)+1):
- cmd=testconfig.get('test%d'%vm,'cmd')
- eval(cmd)
-####################################################
-for vm in range(0, int(required_number_of_test_machines)):
- sock[vm].quit()
- client[vm].close()
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test
deleted file mode 100644
index 1ac171a6..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test
+++ /dev/null
@@ -1,59 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-[DEFAULT]
-name = GWTesting
-number_of_tests = 1
-total_number_of_test_machines = 3
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0.01
-accuracy = 0.01
-
-[TestM1]
-name = Generator
-machine_index = 1
-config_file = gen_gw.cfg
-dest_vm = 3
-gw_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
-
-[TestM2]
-name = GW1
-machine_index = 2
-config_file = secgw1.cfg
-dest_vm = 3
-group1cores = [1]
-
-[TestM3]
-name = GW2
-machine_index = 3
-config_file = secgw2.cfg
-group1cores = [1]
-
-[test1]
-cmd=run_speedtest(sock[0],sock[2])
-
-[test2]
-cmd=run_sizetest(sock[0],sock[2])
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
new file mode 100644
index 00000000..fef0fcaf
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
@@ -0,0 +1,119 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+##################################################
+# Build all components in separate builder image #
+##################################################
+
+FROM ubuntu:20.04 as builder
+
+ARG DPDK_VERSION=22.07
+ENV DPDK_VERSION=${DPDK_VERSION}
+
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install Dependencies
+RUN apt update && apt -y install git wget gcc unzip libpcap-dev libncurses5-dev \
+ libedit-dev liblua5.3-dev linux-headers-generic iperf3 pciutils \
+ libnuma-dev vim tuna wireshark make driverctl openssh-server sudo \
+ meson python3-pyelftools pkg-config
+
+WORKDIR ${BUILD_DIR}
+
+# Install DPDK
+RUN wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz \
+ && tar -xf ./dpdk-${DPDK_VERSION}.tar.xz \
+ && cd dpdk-${DPDK_VERSION} \
+ && meson build -Dlibdir=lib/x86_64-linux-gnu -Denable_driver_sdk=true \
+ && ninja -C build install
+
+WORKDIR ${BUILD_DIR}
+
+# Install Prox
+RUN git clone https://gerrit.opnfv.org/gerrit/samplevnf \
+ && cd samplevnf/VNFs/DPPD-PROX \
+ && COMMIT_ID=$(git rev-parse HEAD) \
+ && echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id \
+ && meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/prox ${BUILD_DIR}/prox
+
+# Build and copy port info app
+WORKDIR ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info
+RUN meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/build/port_info_app ${BUILD_DIR}/port_info_app
+
+RUN ldconfig && pkg-config --modversion libdpdk > ${BUILD_DIR}/dpdk_version
+# Create Minimal Install
+RUN ldd ${BUILD_DIR}/prox | awk '$2 ~ /=>/ {print $3}' >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/dpdk_version" >> ${BUILD_DIR}/list_of_install_components \
+ && find /usr/local/lib/x86_64-linux-gnu -not -path '*/\.*' >> ${BUILD_DIR}/list_of_install_components \
+ && tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+
+#############################
+# Create slim runtime image #
+#############################
+FROM ubuntu:20.04
+
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install Runtime Dependencies
+RUN apt update -y
+# Install required dynamically linked libraries + required packages
+RUN apt -y install sudo openssh-server libatomic1
+
+COPY --from=builder ${BUILD_DIR}/install_components.tgz ${BUILD_DIR}/install_components.tgz
+
+WORKDIR /
+RUN tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+RUN ldconfig
+RUN rm ${BUILD_DIR}/install_components.tgz
+
+# Expose SSH and PROX ports
+EXPOSE 22 8474
+
+RUN useradd -rm -d /home/rapid -s /bin/bash -g root -G sudo -u 1000 rapid \
+ && chmod 777 ${BUILD_DIR} \
+ && echo 'rapid:rapid' | chpasswd \
+ && mkdir /home/rapid/.ssh
+
+# Copy SSH keys
+COPY ./rapid_rsa_key.pub /home/rapid/.ssh/authorized_keys
+COPY ./rapid_rsa_key.pub /root/.ssh/authorized_keys
+
+RUN chown rapid:root /home/rapid/.ssh/authorized_keys \
+ && chmod 600 /home/rapid/.ssh/authorized_keys \
+ && chown root:root /root/.ssh/authorized_keys \
+ && chmod 600 /root/.ssh/authorized_keys
+
+#RUN apt-get clean && apt autoremove --purge
+RUN apt-get autoremove -y && apt-get clean all && rm -rf /var/cache/apt
+
+# Copy startup script
+COPY ./start.sh /start.sh
+RUN chmod +x /start.sh
+
+ENTRYPOINT ["/start.sh"]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/README b/VNFs/DPPD-PROX/helper-scripts/rapid/README
new file mode 100644
index 00000000..198b6db1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/README
@@ -0,0 +1,183 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+rapid (Rapid Automated Performance Indication for Dataplane)
+************************************************************
+
+rapid is a set of files offering an easy way to do a sanity check of the
+dataplane performance of an OpenStack or container environment.
+
+Most of the information below is now available on wiki.opnfv.org/display/SAM/Rapid+scripting
+
+In case of OpenStack, copy the files in a directory on a machine that can run the OpenStack CLI
+commands and that can reach the networks to connect to the VMs.
+
+You will need an image that has the PROX tool installed.
+A good way to do this is to use the packer tool to build an image for a target of your choice.
+You can also build this image manually by executing all the commands described in the deploycentostools.sh.
+The default name of the qcow2 file is rapidVM.qcow2
+
+When using the packer tool, the first step is to upload an
+existing CentOS cloud image from the internet into OpenStack.
+Check out: https://cloud.centos.org/centos/7/images/
+You should now create proper clouds.yaml file so Packer can connect to your OpenStack.
+Sample clouds.yaml could look like this:
+
+client:
+ force_ipv4: true
+clouds:
+ overcloud:
+ verify: False
+ interface: "public"
+ auth:
+ username: "admin"
+ password: "your_password"
+ project_name: "admin"
+ tenant_name: "admin"
+ auth_url: "https://192.168.1.1:5000/v3"
+ user_domain_name: "Default"
+ domain_name: "Default"
+ identity_api_version: "3"
+
+Packer could be run from docker image, you will need to create following alias:
+
+alias packer='docker run -it --env OS_CLOUD=$OS_CLOUD -v "$PWD":/root/project -w /root/project hashicorp/packer:light $@'
+and make sure the OS_CLOUD variable is set to the correct cloud: in the clouds.yaml example above, you would first
+export OS_CLOUD=overcloud
+
+There are 2 files: centos.json and deploycentostools.sh, allowing you to create
+an image automatically. Run
+ # packer build centos.json
+Edit centos.json to reflect the settings of your environment: The following fields need to populated
+with the values of your system:
+ - "source_image_name": Needs to be the name of the Centos cloud image
+ - "flavor": Needs to be the ID or name of the flavor existing in your OpenStack environment that will be used
+ to start the VM in which we will install all tools
+ - "network_discovery_cidrs": Should contain the CIDR of the network you want to use e.g. "10.6.6.0/24"
+ - "floating_ip_network": ID or name of the floating ip network in case floating ip are being used
+ - "security_groups": ID or name of the security group being used
+
+Refer to Packer docs for more details:
+https://www.packer.io/docs/builders/openstack.html
+
+Note that this procedure is not only installing the necessary tools to run PROX,
+but also does some system optimizations (tuned). Check deploycentostools.sh for more details.
+
+Now you need to create a stack, that will deploy the PROX VMs using the PROX
+image built in the previous step. The stack needs to have an ouput section
+with the following outputs:
+outputs:
+ number_of_servers:
+ value:
+ - <NUMBER_OF_SERVERS> # A list of <NUMBER_OF_SERVERS>
+ server_name:
+ value:
+ - - <SERVER_NAME> # A list containing a list of <SERVER_NAME>
+ data_plane_ips:
+ value:
+ - - <DATA_PLANE_IPS> # A list containing a list of <DATA_PLANE_IPS>
+ data_plane_macs:
+ value:
+ - - <DATA_PLANE_MACS> # A list containing a list of <DATA_PLANE_MACS>
+ mngmt_ips:
+ value:
+ - - <MNGMT_IP> # A list containing a list of <MNGMT_IP>
+where
+ * <NUMBER_OF_SERVERS> is an int
+ * <SERVER_NAME> is a string
+ * <DATA_PLANE_IPS> is a list of strings
+ * <DATA_PLANE_MACS> is a list of strings
+ * <MNGMT_IP> is a string
+
+createrapid.py will take the input from config_file, to create an ssh keypair
+and stack (if not already existing). The tool will use the yaml files as
+specified in the config_file and create a <STACK>.env file, containing
+input used for runrapid.py.
+
+Now you can run the runrapid.py file. Use help for more info on the usage:
+ # ./runrapid.py --help
+The script will connect to all machines that have been instantiated and it will launch
+PROX in all machines. This will be done through the admin IP assigned to the machines.
+Once that is done it will connect to the PROX tcp socket and start sending
+commands to run the actual test.
+Make sure the security groups allow for tcp access (ssh & prox port).
+It will print test results on the screen while running.
+The actual test that is running is described in <TEST>.test.
+
+Notes about prox_user_data.sh script:
+- The script contains commands that will be executed using cloud-init at
+ startup of the VMs.
+- huge pages are allocated for DPDK on node 0 (hard-coded) in the VM.
+
+Note on using SRIOV ports:
+Before running createrapid, make sure the network, subnet and ports are already created
+This can be done as follows (change the parameters to your needs):
+openstack network create --share --external --provider-network-type flat --provider-physical-network physnet2 fast-network
+openstack subnet create --network fast-network --subnet-range 20.20.20.0/24 --gateway none fast-subnet
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port1
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port2
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port3
+
+Note when doing tests using the gateway functionality on OVS:
+When a GW VM is sending packets on behalf of another VM (e.g. the generator), we need to make sure the OVS
+will allow those packets to go through. Therefore you need to the IP address of the generator in the
+"allowed address pairs" of the GW VM.
+
+Note when doing tests using encryption on OVS:
+Your OVS configuration might block encrypted packets. To allow packets to go through,
+you can disable port_security. You can do this by using the following commands
+neutron port-update xxxxxx --no-security-groups
+neutron port-update xxxxxx --port_security_enabled=False
+
+An example of the env file generated by createrapid.py can be found below.
+Note that this file can be created manually in case the stack is created in a
+different way than what is described in this text. This can be useful in case
+you are not using OpenStack as a VIM or when using special configurations that
+cannot be achieved using createrapid.py. Fields needed for runrapid are:
+* all info in the [Mx] sections
+* the key information in the [ssh] section
+* the total_number_of_vms information in the [rapid] section
+
+[rapid]
+loglevel = DEBUG
+version = 19.6.30
+total_number_of_machines = 3
+
+[M1]
+name = rapid-VM1
+admin_ip = 10.25.1.109
+dp_ip1 = 10.10.10.4
+dp_mac1 = fa:16:3e:25:be:25
+
+[M2]
+name = rapid-VM2
+admin_ip = 10.25.1.110
+dp_ip1 = 10.10.10.7
+dp_mac1 = fa:16:3e:72:bf:e8
+
+[M3]
+name = rapid-VM3
+admin_ip = 10.25.1.125
+dp_ip1 = 10.10.10.15
+dp_mac1 = fa:16:3e:69:f3:e7
+
+[ssh]
+key = prox.pem
+user = centos
+
+[Varia]
+vim = OpenStack
+stack = rapid
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s b/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s
new file mode 100644
index 00000000..e1abbe75
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s
@@ -0,0 +1,94 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+###############################################################################
+# REQUIREMENTS #
+###############################################################################
+1. Working Kubernetes cluster. It can be set up using Intel Container Bare
+Metal Reference Architecture https://github.com/intel/container-experience-kits
+
+2. 1024x 2M hugepages must be configured on the nodes
+
+3. SRIOV Network Device Plugin for Kubernetes installed
+https://github.com/intel/sriov-network-device-plugin.
+
+4. SRIOV VFs configured and rebind to the vfio-pci module
+As an example, SRIOV VFs (rebind to the vfio-pci driver) pool is named as
+intel.com/intel_sriov_vfio.
+
+Network attachment definition is named as
+k8s.v1.cni.cncf.io/networks: intel-sriov-vfio.
+
+5. PROX image created and pushed to the local registry or distributed and
+loaded on all of the testing nodes.
+
+###############################################################################
+# PROX IMAGE BUILD #
+###############################################################################
+Run
+# dockerimage.sh build
+to build PROX image.
+
+After the successfull build prox.tar will be created and can be used to load
+image on the k8s nodes or it can be pushed to the local repository using
+# dockerimage.sh push
+
+###############################################################################
+# TESTING #
+###############################################################################
+1. Edit rapidpods file and set the right name (nodeSelector_hostname) for the
+nodes on which you want to execute test PODs.
+
+# kubectl get nodes -o wide
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+k8s-master1 Ready master 7d13h v1.13.5 10.10.0.10 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+k8s-node1 Ready node 7d13h v1.13.5 10.10.0.12 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+k8s-node2 Ready node 7d13h v1.13.5 10.10.0.13 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+
+Set the right IP addresses (dp_ip) to use by the PODs for the Dataplane network.
+
+2. Edit pod-rapid.yaml file and set correct
+ - image name (image: localhost:5000/prox:latest)
+ - network attachment definition in metadata->annotation section
+ (k8s.v1.cni.cncf.io/networks: intel-sriov-vfio)
+ - SRIOV VFs resources attached to the vfio-pci driver
+ (intel.com/intel_sriov_vfio: '1')
+
+3. Copy SSH private key in the rapid_rsa_key file
+
+4. Run createrapidk8s.py to create test PODs according to the configuration from
+rapid.pods file.
+
+# ./createrapidk8s.py
+
+Check for rapid PODs. They should be up and running.
+
+# kubectl get pods -o wide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod-rapid-1 1/1 Running 0 18h 10.244.2.87 k8s-node1 <none> <none>
+pod-rapid-2 1/1 Running 0 18h 10.244.1.40 k8s-node2 <none> <none>
+pod-rapid-3 1/1 Running 0 18h 10.244.1.39 k8s-node2 <none> <none>
+
+5. Run test case.
+
+# ./runrapid.py --test basicrapid.test
+
+###############################################################################
+# NOTES #
+###############################################################################
+If layer 2 tests are planned to be executed MAC adresses must be
+preconfigured for the SRIOV VFs to avoid issues with randomly generated MACs
+each time when the PROX starts.
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json b/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json
new file mode 100644
index 00000000..51784c0e
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json
@@ -0,0 +1,52 @@
+{
+"_Copyright": "Copyright (c) 2010-2020 Intel Corporation",
+"_License": "SPDX-License-Identifier: Apache-2.0",
+"builders": [
+ {
+"type": "openstack",
+"ssh_username": "centos",
+"image_name": "rapidVM",
+"source_image_name": "CentOS",
+"flavor": "packer_flavor",
+"network_discovery_cidrs":"10.6.6.0/24",
+"floating_ip_network": "admin_floating_net",
+"security_groups": "prox_security_group",
+"ssh_timeout":"1000s",
+"ssh_pty":"true"
+ }
+],
+"provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "sudo mkdir -p /opt/rapid",
+ "sudo chmod 0777 /opt/rapid" ]
+ },
+ {
+ "type": "file",
+ "source": "./check_prox_system_setup.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./check-prox-system-setup.service",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./sharkproxlog.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./deploycentostools.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "chmod a+x /opt/rapid/deploycentostools.sh",
+ "/opt/rapid/deploycentostools.sh -u deploy" ]
+ }
+]
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service b/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service
new file mode 100644
index 00000000..f52055e7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Check PROX system setup (isolated_cores, vfio)
+DefaultDependencies=no
+After=multi-user.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/libexec/check_prox_system_setup.sh
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
new file mode 100755
index 00000000..3cf1113d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+## This script should run after booting: see check-prox-system-setup.service
+
+NCPUS="$(lscpu | egrep '^CPU\(s\):' | awk '{ print $2 }')"
+MAXCOREID="$((NCPUS-1))"
+
+tuned_config="/etc/tuned/realtime-virtual-guest-variables.conf"
+log_file="/opt/rapid/prox_system_setup.log"
+system_ready="/opt/rapid/system_ready_for_rapid"
+tuned_done="/opt/rapid/tuned_done"
+after_boot_file="/opt/rapid/after_boot.sh"
+
+tuned_and_reboot () {
+ echo "Applying tuned profile">>$log_file
+ tuned-adm profile realtime-virtual-guest
+ touch "$tuned_done"
+ echo "Rebooting...">>$log_file
+ reboot
+ exit 0
+}
+
+if [ -f "$tuned_config" ]
+then
+ while read -r line
+ do
+ case $line in
+ isolated_cores=1-$MAXCOREID*)
+ if test ! -f "$tuned_done"; then
+ tuned_and_reboot
+ fi
+ if test -f "$after_boot_file"; then
+ echo "Executing: $after_boot_file">>$log_file
+ ("$after_boot_file")
+ fi
+ echo "Isolated CPU(s) OK, no reboot: $line">>$log_file
+ ## rapid scripts will wait for the system_ready file to exist
+ ## Only then, they will be able to connect to the PROX instance
+ ## and start the testing
+ touch "$system_ready"
+ ## On some systems, we still need to use the igb_uio driver.
+ ## Example: good performance on AWS with the ENA interface.
+ ## Make sure that you change devbind.sh to use the preferred
+ ## driver. vfio is the default.
+ modprobe uio
+ insmod /opt/rapid/dpdk/build/kmod/igb_uio.ko wc_activate=1
+ exit 0
+ ;;
+ isolated_cores=*)
+ echo "Isolated CPU(s) NOK: $line">>$log_file
+ sed -i "/^isolated_cores=.*/c\isolated_cores=1-$MAXCOREID" $tuned_config
+ tuned_and_reboot
+ ;;
+ *)
+ echo "$line"
+ ;;
+ esac
+ done < "$tuned_config"
+ echo "isolated_cores=1-$MAXCOREID" >> $tuned_config
+ echo "No Isolated CPU(s) defined in config, line added: isolated_cores=1-$MAXCOREID">>$log_file
+ tuned_and_reboot
+else
+ echo "$tuned_config not found.">>$log_file
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/config_file b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
new file mode 100644
index 00000000..b5aeb3a9
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
@@ -0,0 +1,8 @@
+[OpenStack]
+cloud_name = openstackL6
+stack_name = rapid
+heat_template= openstack-rapid.yaml
+heat_param = params_rapid.yaml
+user = centos
+dataplane_subnet_mask = 24
+;push_gateway = http://192.168.36.61:9091/metrics/job/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
new file mode 100644
index 00000000..75267f35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
@@ -0,0 +1,81 @@
+;;
+;; Copyright (c) 2021 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+public_start_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..2
+public_stop_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..20
+cgnat_table = {}
+cgnat_table.dynamic = {
+ {public_ip_range_start = ip(public_start_ip),public_ip_range_stop = ip(public_stop_ip), public_port = val_range(10,20000)},
+}
+lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x212},
+}
+lpm4.routes = {};
+lpm4.routes[1] = {
+ cidr = {ip = ip(0), depth = 1},
+ next_hop_id = 0,
+}
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=internal_tap
+local ipv4=${local_ip1}
+
+[port 1]
+name=if1
+mac=hardware
+vlan=yes
+vdev=external_tap
+local ipv4=${local_ip2}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=nat
+task=0
+mode=cgnat
+sub mode=l3
+private=yes
+nat table=cgnat_table
+route table=lpm4
+rx port=if0
+tx ports from routing table=if1
+
+task=1
+mode=cgnat
+sub mode=l3
+private=no
+nat table=cgnat_table
+route table=lpm4
+rx port=if1
+tx ports from routing table=if0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
new file mode 100644
index 00000000..31728daf
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
@@ -0,0 +1,47 @@
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=esp_tap
+local ipv4=$local_ip1
+
+[defaults]
+mempool size=64K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=enc
+task=0
+mode=esp_enc
+sub mode=l3
+remote ipv4=$dest_ip1
+rx port=if0
+tx cores=$altcores task=0
+drop=yes
+
+
+[core $altcores]
+name=dec
+task=0
+mode=esp_dec
+sub mode=l3
+remote ipv4=$dest_ip1
+rx ring=yes
+tx port=if0
+drop=yes
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
index a87ce758..8d3f8581 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2020 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,49 +14,56 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=gen_tap
+local ipv4=${local_ip1}
+
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
+heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
-pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+bps=1250000
+pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-;gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+;arp update time=1
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -65,6 +72,9 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+accuracy limit nsec=1000000
+latency bucket size=${bucket_size_exp}
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
index 7feaa7fd..8a477e5f 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2020 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,49 +14,56 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=gen_tap
+local ipv4=${local_ip1}
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
+heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
-pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+bps=1250000
+pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
+gateway ipv4=${gw_ip1}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+;arp update time=1
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -65,5 +72,7 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+latency bucket size=${bucket_size_exp}
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
new file mode 100644
index 00000000..32fadbc7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
@@ -0,0 +1,78 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+heartbeat timeout=${heartbeat}
+
+[core $mcore]
+mode=master
+
+[core $gencores]
+name=gen
+task=0
+mode=gen
+sub mode=ndp
+tx port=p0
+bps=1000
+pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 86 dd 60 00 00 00 00 1a 11 40 ${local_hex_ip1} ${dest_hex_ip1} 13 88 13 88 00 1a 55 7b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+global ipv6=${local_ip1}
+min bulk size=$mbs
+max bulk size=16
+drop=yes
+lat pos=62
+packet id pos=66
+signature pos=72
+signature=0x98765432
+accuracy pos=76
+pkt size=80
+
+
+
+[core $latcores]
+name=lat
+task=0
+mode=lat
+sub mode=ndp
+rx port=p0
+lat pos=62
+accuracy pos=76
+packet id pos=66
+signature=0x98765432
+signature pos=72
+accuracy limit nsec=1000000
+latency bucket size=${bucket_size_exp}
+global ipv6=${local_ip1}
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
index e8b3801d..3eaf80e7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,34 +14,39 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=impair_tap
+local ipv4=${local_ip1}
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=impair
task=0
mode=impair
sub mode=l3
rx port=if0
tx port=if0
-delay us=10
-probability=100
-local ipv4=${local_ip}
-
+delay us=1000
+proba delay=50
+proba no drop=100
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
index 3ae539c5..0f26e6eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
@@ -14,29 +14,30 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
-[lua]
-dofile("parameters.lua")
-
-[port 0]
+[;port 0]
name=p0
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=irq
task=0
mode=irq
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
index 9e7bf90e..3af0ac99 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,48 +14,50 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
tx port=p0
bps=1250000000
-pkt inline=${dest_hex_mac} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-;gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -63,5 +65,6 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+latency bucket size=${bucket_size_exp}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
new file mode 100644
index 00000000..dc988969
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
@@ -0,0 +1,59 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $gencores]
+name=p0
+task=0
+mode=gen
+tx port=p0
+bps=1250000000
+pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
+pkt size=60
+min bulk size=$mbs
+max bulk size=64
+drop=yes
+
+[core $latcores]
+name=drop
+task=0
+mode=none
+rx port=p0
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
index c02556d9..0ce3a1a3 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,31 +14,34 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=swap
task=0
mode=swap
rx port=if0
tx port=if0
drop=no
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
new file mode 100644
index 00000000..9ffd6e8f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
@@ -0,0 +1,57 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=public_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=PublicServer
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx cores=${self}t1
+drop=no
+
+task=1
+mode=mirror
+sub mode=l3
+multiplier=2
+mirror size=300
+rx ring=yes
+tx port=if0
+drop=no
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
index 30abb8f7..d941e5eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
@@ -18,12 +18,13 @@
; This is sample ESP config.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if
@@ -40,15 +41,15 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=esp_enc
task=0
mode=esp_enc
sub mode=l3
-local ipv4=${local_ip}
+local ipv4=${local_ip1}
remote ipv4=${dest_ip}
rx port=if
tx port=if
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
index a361e875..9aedc85d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
@@ -17,13 +17,15 @@
;;
; This is sample ESP config.
;;
-[eal options]
--n=4 ; force number of memory channels
-no-output=no ; disable DPDK debug output
[lua]
dofile("parameters.lua")
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
[port 0]
name=if
mac=hardware
@@ -39,15 +41,15 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=esp_dec
task=0
mode=esp_dec
sub mode=l3
-local ipv4=${local_ip}
+local ipv4=${local_ip1}
rx port=if
tx port=if
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
new file mode 100644
index 00000000..f5ff5447
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
index 4229c207..f66322a9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,33 +14,36 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+vlan=yes
+vdev=swap_tap
+local ipv4=${local_ip1}
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=swap
task=0
mode=swap
sub mode=l3
rx port=if0
tx port=if0
-local ipv4=${local_ip}
drop=no
-
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
new file mode 100644
index 00000000..abadfa64
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
@@ -0,0 +1,50 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=swap_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=swap
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx port=if0
+gateway ipv4=${gw_ip1}
+drop=no
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
new file mode 100644
index 00000000..61c8a594
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
@@ -0,0 +1,47 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=swap
+task=0
+mode=swap
+sub mode=ndp
+rx port=if0
+tx port=if0
+global ipv6=${local_ip1}
+drop=no
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
new file mode 100755
index 00000000..af1da307
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+from rapid_log import RapidLog
+from stackdeployment import StackDeployment
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+
+class RapidStackManager(object):
+ @staticmethod
+ def parse_config(rapid_stack_params):
+ config = configparser.RawConfigParser()
+ config.read('config_file')
+ section = 'OpenStack'
+ options = config.options(section)
+ for option in options:
+ rapid_stack_params[option] = config.get(section, option)
+ if 'dataplane_subnet_mask' not in rapid_stack_params.keys():
+ rapid_stack_params['dataplane_subnet_mask'] = 24
+ return (rapid_stack_params)
+
+ @staticmethod
+ def deploy_stack(rapid_stack_params):
+ cloud_name = rapid_stack_params['cloud_name']
+ stack_name = rapid_stack_params['stack_name']
+ heat_template = rapid_stack_params['heat_template']
+ heat_param = rapid_stack_params['heat_param']
+ user = rapid_stack_params['user']
+ dataplane_subnet_mask = rapid_stack_params['dataplane_subnet_mask']
+ deployment = StackDeployment(cloud_name)
+ deployment.deploy(stack_name, heat_template, heat_param)
+ deployment.generate_env_file(user, dataplane_subnet_mask)
+
+def main():
+ rapid_stack_params = {}
+ RapidStackManager.parse_config(rapid_stack_params)
+ log_file = 'CREATE{}.log'.format(rapid_stack_params['stack_name'])
+ RapidLog.log_init(log_file, 'DEBUG', 'INFO', '2021.03.15')
+ #cloud_name = 'openstackL6'
+ #stack_name = 'rapid'
+ #heat_template = 'openstack-rapid.yaml'
+ #heat_param = 'params_rapid.yaml'
+ #user = 'centos'
+ RapidStackManager.deploy_stack(rapid_stack_params)
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
new file mode 100755
index 00000000..c4667f1f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import argparse
+from rapid_k8s_deployment import K8sDeployment
+
+# Config file name for deployment creation
+CREATE_CONFIG_FILE_NAME = "rapid.pods"
+
+# Config file name for runrapid script
+RUN_CONFIG_FILE_NAME = "rapid.env"
+
+def main():
+ # Parse command line arguments
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument("-c", "--clean", action = "store_true",
+ help = "Terminate pod-rapid-* PODs. "
+ "Clean up cluster before or after the testing.")
+ args = argparser.parse_args()
+
+ # Create a new deployment
+ deployment = K8sDeployment()
+
+ # Load config file with test environment description
+ deployment.load_create_config(CREATE_CONFIG_FILE_NAME)
+
+ if args.clean:
+ deployment.delete_pods()
+ return
+
+ # Create PODs for test
+ deployment.create_pods()
+
+ # Save config file for runrapid script
+ deployment.save_runtime_config(RUN_CONFIG_FILE_NAME)
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
new file mode 100644
index 00000000..a0fe7cb2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
@@ -0,0 +1,305 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# Directory for package build
+BUILD_DIR="/opt/rapid"
+DPDK_VERSION="20.05"
+MULTI_BUFFER_LIB_VER="0.52"
+export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+# By default, do not update OS
+OS_UPDATE="n"
+# By default, asumming that we are in the VM
+K8S_ENV="n"
+
+# If already running from root, no need for sudo
+SUDO=""
+[ $(id -u) -ne 0 ] && SUDO="sudo"
+
+function os_pkgs_install()
+{
+ ${SUDO} yum install -y deltarpm yum-utils
+
+ # NASM repository for AESNI MB library
+ #${SUDO} yum-config-manager --add-repo http://www.nasm.us/nasm.repo
+
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+ ${SUDO} yum install -y git wget gcc unzip libpcap-devel ncurses-devel \
+ libedit-devel lua-devel kernel-devel iperf3 pciutils \
+ numactl-devel vim tuna openssl-devel wireshark \
+ make driverctl
+
+ ${SUDO} wget --no-check-certificate \
+ https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+ ${SUDO} rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+}
+
+function k8s_os_pkgs_runtime_install()
+{
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+
+ # Install required dynamically linked libraries + required packages
+ ${SUDO} yum install -y numactl-libs libpcap openssh openssh-server \
+ openssh-clients sudo
+
+ # Install additional packets for universal image
+ ${SUDO} yum install -y epel-release python3 kubernetes-client
+ ${SUDO} yum install -y python3-paramiko python3-future
+ ${SUDO} python3 -m pip install --upgrade pip
+ ${SUDO} pip3 install scp kubernetes
+}
+
+function os_cfg()
+{
+ # huge pages to be used by DPDK
+ ${SUDO} sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+ ${SUDO} sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+ # Enabling tuned with the realtime-virtual-guest profile
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm
+ # Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+ # do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ # Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+ # isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+ # When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+ # and update the realtime-virtual-guest-variables.conf accordingly.
+ echo "isolated_cores=1-3" | ${SUDO} tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+ ${SUDO} tuned-adm profile realtime-virtual-guest
+
+ # Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+ # assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+ ${SUDO} chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+ ${SUDO} mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+ ${SUDO} mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+ ${SUDO} systemctl daemon-reload
+ ${SUDO} systemctl enable check-prox-system-setup.service
+ popd > /dev/null 2>&1
+}
+
+function k8s_os_cfg()
+{
+ [ ! -f /etc/ssh/ssh_host_rsa_key ] && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ecdsa_key ] && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ed25519_key ] && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ''
+
+ [ ! -d /var/run/sshd ] && mkdir -p /var/run/sshd
+
+ USER_NAME="centos"
+ USER_PWD="centos"
+
+ useradd -m -d /home/${USER_NAME} -s /bin/bash -U ${USER_NAME}
+ echo "${USER_NAME}:${USER_PWD}" | chpasswd
+ usermod -aG wheel ${USER_NAME}
+
+ echo "%wheel ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/wheelnopass
+}
+
+function mblib_install()
+{
+ export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+
+ # Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+ unzip v${MULTI_BUFFER_LIB_VER}.zip
+ pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function dpdk_install()
+{
+ # Build DPDK for the latest kernel installed
+ LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+ export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+ # Get and compile DPDK
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz
+ tar -xf ./dpdk-${DPDK_VERSION}.tar.xz
+ popd > /dev/null 2>&1
+
+ ${SUDO} ln -s ${RTE_SDK} ${BUILD_DIR}/dpdk
+
+ pushd ${RTE_SDK} > /dev/null 2>&1
+ make config T=${RTE_TARGET}
+ # Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+ # Uncomment the sed command to enable the driver compilation
+ #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
+
+ # For Kubernetes environment we use host vfio module
+ if [ "${K8S_ENV}" == "y" ]; then
+ sed -i 's/CONFIG_RTE_EAL_IGB_UIO=y/CONFIG_RTE_EAL_IGB_UIO=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_LIBRTE_KNI=y/CONFIG_RTE_LIBRTE_KNI=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_KNI_KMOD=y/CONFIG_RTE_KNI_KMOD=n/g' ${RTE_SDK}/build/.config
+ fi
+
+ # Compile with MB library
+ sed -i '/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/c\CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y' ${RTE_SDK}/build/.config
+ make -j`getconf _NPROCESSORS_ONLN`
+ ln -s ${RTE_SDK}/build ${RTE_SDK}/${RTE_TARGET}
+ popd > /dev/null 2>&1
+}
+
+function prox_compile()
+{
+ # Compile PROX
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
+ COMMIT_ID=$(git rev-parse HEAD)
+ echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+ popd > /dev/null 2>&1
+}
+
+function prox_install()
+{
+ # Clone PROX
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ git clone https://git.opnfv.org/samplevnf
+ cp -R ./samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid ./src
+ popd > /dev/null 2>&1
+ prox_compile
+
+ # Clean build folder
+ rm -rf ${BUILD_DIR}/samplevnf
+}
+
+function port_info_build()
+{
+ [ ! -d ${BUILD_DIR}/port_info ] && echo "Skipping port_info compilation..." && return
+
+ pushd ${BUILD_DIR}/port_info > /dev/null 2>&1
+ make
+ ${SUDO} cp ${BUILD_DIR}/port_info/build/app/port_info_app ${BUILD_DIR}/port_info_app
+ popd > /dev/null 2>&1
+}
+
+function create_minimal_install()
+{
+ ldd ${BUILD_DIR}/prox | awk '{ if ($(NF-1) != "=>") print $(NF-1) }' >> ${BUILD_DIR}/list_of_install_components
+
+ echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components
+
+ tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+}
+
+function cleanup()
+{
+ ${SUDO} yum autoremove -y
+ ${SUDO} yum clean all
+ ${SUDO} rm -rf /var/cache/yum
+}
+
+function k8s_runtime_image()
+{
+ k8s_os_pkgs_runtime_install
+ k8s_os_cfg
+ cleanup
+
+ pushd / > /dev/null 2>&1
+ tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+ popd > /dev/null 2>&1
+
+ ldconfig
+
+ rm -rf ${BUILD_DIR}/install_components.tgz
+}
+
+function print_usage()
+{
+ echo "Usage: ${0} [OPTIONS] [COMMAND]"
+ echo "Options:"
+ echo " -u, --update Full OS update"
+ echo " -k, --kubernetes Build for Kubernetes environment"
+ echo "Commands:"
+ echo " deploy Run through all deployment steps"
+ echo " compile PROX compile only"
+ echo " runtime_image Apply runtime configuration only"
+}
+
+COMMAND=""
+# Parse options and comman
+for opt in "$@"; do
+ case ${opt} in
+ -u|--update)
+ echo 'Full OS update will be done!'
+ OS_UPDATE="y"
+ ;;
+ -k|--kubernetes)
+ echo "Kubernetes environment is set!"
+ K8S_ENV="y"
+ ;;
+ compile)
+ COMMAND="compile"
+ ;;
+ runtime_image)
+ COMMAND="runtime_image"
+ ;;
+ deploy)
+ COMMAND="deploy"
+ ;;
+ *)
+ echo "Unknown option/command ${opt}"
+ print_usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ "${COMMAND}" == "compile" ]; then
+ echo "PROX compile only..."
+ prox_compile
+elif [ "${COMMAND}" == "runtime_image" ]; then
+ echo "Runtime image intallation and configuration..."
+ k8s_runtime_image
+elif [ "${COMMAND}" == "deploy" ]; then
+ [ ! -d ${BUILD_DIR} ] && ${SUDO} mkdir -p ${BUILD_DIR}
+ ${SUDO} chmod 0777 ${BUILD_DIR}
+
+ os_pkgs_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ k8s_os_cfg
+ else
+ os_cfg
+ fi
+
+ mblib_install
+ dpdk_install
+ prox_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ port_info_build
+ create_minimal_install
+ fi
+
+ cleanup
+else
+ print_usage
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
new file mode 100755
index 00000000..0bde3cc2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
@@ -0,0 +1,12 @@
+link="$(sudo ip -o link | grep MACADDRESS |cut -d":" -f 2)"
+if [ -n "$link" ];
+then
+ echo Need to bind
+ # Uncomment one of the following lines, depending on which driver
+ # you want to use: vfio-pci or igb_uio
+ #sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
+ sudo driverctl set-override $(sudo ethtool -i $link |grep bus-info | cut -d" " -f 2) vfio-pci
+else
+ echo Assuming port is already bound to DPDK poll mode driver
+fi
+exit 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
new file mode 100755
index 00000000..e2266e58
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+PROX_DEPLOY_DIR="."
+PROX_IMAGE_NAME="rapid"
+RSA_KEY_FILE_NAME="rapid_rsa_key"
+
+DOCKERFILE="Dockerfile"
+DOCKER_REGISTRY="localhost:5000"
+
+USE_DOCKER_CACHE="n"
+
+IMAGE_BUILD_LOG="dockerimage-build.log"
+
+function create_ssh_key()
+{
+ if [ -f ./${RSA_KEY_FILE_NAME} ]; then
+ read -p "RSA key already exist! Do you want to remove it (yYnN)?" -n 1 -r
+
+ if [ "${REPLY}" == "y" ] || [ "${REPLY}" == "Y" ]; then
+ echo "Removing existing key..."
+ sleep 3
+
+ [ -f "./${RSA_KEY_FILE_NAME}" ] && rm -rf ./${RSA_KEY_FILE_NAME}
+ [ -f "./${RSA_KEY_FILE_NAME}.pub" ] && rm -rf ./${RSA_KEY_FILE_NAME}.pub
+ else
+ echo "Using existing key..."
+ return
+ fi
+ fi
+
+ echo "Generating new RSA key..."
+ ssh-keygen -t rsa -b 4096 -N "" -f ./${RSA_KEY_FILE_NAME}
+}
+
+function build_prox_image()
+{
+ if [ "${USE_DOCKER_CACHE}" == "y" ]; then
+ echo "Building image using cache..."
+ docker build --rm -t ${PROX_IMAGE_NAME}:latest -f ${DOCKERFILE} ${PROX_DEPLOY_DIR} 2>&1 | tee ./${IMAGE_BUILD_LOG}
+ else
+ echo "Building image without cache..."
+ docker build --no-cache --rm -t ${PROX_IMAGE_NAME}:latest -f ${DOCKERFILE} ${PROX_DEPLOY_DIR} 2>&1 | tee ./${IMAGE_BUILD_LOG}
+ fi
+}
+
+function save_prox_image()
+{
+ echo "Saving image ${PROX_IMAGE_NAME}:latest to ./${PROX_IMAGE_NAME}.tar"
+ docker save -o ./${PROX_IMAGE_NAME}.tar ${PROX_IMAGE_NAME}:latest
+}
+
+function load_prox_image()
+{
+ echo "Loading image ./${PROX_IMAGE_NAME}.tar"
+ docker load -i ./${PROX_IMAGE_NAME}.tar
+}
+
+function push_prox_image()
+{
+ docker tag ${PROX_IMAGE_NAME}:latest ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME}
+ docker push ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME}
+}
+
+function print_help()
+{
+ echo "${0}: [build|load|push]"
+ echo " build: build and save image ${PROX_IMAGE_NAME}:latest using ${DOCKERFILE}"
+ echo " load: load saved image from ${PROX_IMAGE_NAME}.tar file in the local registry"
+ echo " push: tag and push local ${PROX_IMAGE_NAME}:latest image in the ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME} registry"
+}
+
+if [ "$1" == "build" ]; then
+ create_ssh_key
+ build_prox_image
+ save_prox_image
+elif [ "$1" == "load" ]; then
+ load_prox_image
+elif [ "$1" == "push" ]; then
+ push_prox_image
+else
+ print_help
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
new file mode 100644
index 00000000..8dcb09ba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
@@ -0,0 +1,105 @@
+;Format: PushGateway
+;Format: Xtesting
+;URL:
+ part1: http://testresults.opnfv.org/test/api/v1/results
+;URL:
+ part1: http://192.168.36.61:9091/metrics/job/
+ part2: test
+ part3: /instance/
+ part4: environment_file
+;FlowsizeTest:
+ Flows: Flows
+ Size: Size
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+FlowSizeTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+IrqTest:
+ Environment: environment_file
+ Test: test
+ Buckets: buckets
+ Machine_data: machine_data
+ImpairTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+CoreStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NonDPReceived: NonDPReceived
+ NonDPSent: NonDPSent
+ Dropped: Dropped
+PortStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NoMbufs: NoMbufs
+ iErrMiss: iErrMiss
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
new file mode 100644
index 00000000..a5633409
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
@@ -0,0 +1,77 @@
+--
+-- Copyright (c) 2020 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+function convertIPToHex(ip)
+ local address_chunks = {}
+ if type(ip) ~= "string" then
+ print ("IP ADDRESS ERROR: ", ip)
+ return "IP ADDRESS ERROR"
+ end
+
+ local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)(/%d+)$")}
+ if #chunks == 5 then
+ for i,v in ipairs(chunks) do
+ if i < 5 then
+ if tonumber(v) > 255 then
+ print ("IPV4 ADDRESS ERROR: ", ip)
+ return "IPV4 ADDRESS ERROR"
+ end
+ address_chunks[#address_chunks + 1] = string.format ("%02x", v)
+ end
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV4: ", result)
+ return result
+ end
+
+ local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)$")}
+ if #chunks == 4 then
+ for i,v in ipairs(chunks) do
+ if tonumber(v) > 255 then
+ print ("IPV4 ADDRESS ERROR: ", ip)
+ return "IPV4 ADDRESS ERROR"
+ end
+ address_chunks[#address_chunks + 1] = string.format ("%02x", v)
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV4: ", result)
+ return result
+ end
+
+ delimiter = ":"
+ for match in (ip..delimiter):gmatch("(.-)"..delimiter) do
+ if match ~= "" then
+ number = tonumber(match, 16)
+ if number <= 65535 then
+ table.insert(address_chunks, string.format("%02x %02x",number/256,number % 256))
+ end
+ else
+ table.insert(address_chunks, "")
+ end
+ end
+ for i, chunk in ipairs(address_chunks) do
+ if chunk =="" then
+ table.remove(address_chunks, i)
+ for j = 1,(8-#address_chunks) do
+ table.insert(address_chunks, i, "00 00")
+ end
+ break
+ end
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV6: ", result)
+ return result
+end
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
index adc184e3..38bc5a7e 100755..100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
@@ -1,7 +1,5 @@
-#!/bin/bash
-
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2010-2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -15,12 +13,21 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+## This file contains the mapping for each test machine. The test machine will
+## be deployed on a machine defined in the *.env file, as defined by the
+## machine_index
+
+[DEFAULT]
+machine_index=0
+
+[TestM1]
+machine_index=1
+
+[TestM2]
+machine_index=2
+
+[TestM3]
+machine_index=3
-link="$(ip -o link | grep MACADDRESS |cut -d":" -f 2)"
-if [ -n "$link" ];
-then
- echo Need to bind
- /root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(/root/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
-else
- echo Assuming port is already bound to DPDK
-fi
+[TestM4]
+machine_index=4
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
new file mode 100644
index 00000000..1cc11e04
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
@@ -0,0 +1,168 @@
+heat_template_version: 2015-10-15
+
+description: >
+ Template for deploying n PROX instances. The template allows for deploying
+ multiple groups of PROX VMs. You can create a first group with certain
+ flavors, availability groups, etc... Another group can be created with
+ different characteristics.
+
+parameters:
+ public_net_name: {description: Public network to allocate (floating) IPs to VMs', type: string, default: admin_floating_net}
+ mgmt_net_name: {description: Name of PROX mgmt network to be created, type: string, default: admin_internal_net}
+ PROX_image: {description: Image name to use for PROX, type: string, default: rapidVM}
+ PROX_key: {description: DO NOT CHANGE THIS DEFAULT KEY NAME, type: string, default: rapid_rsa_key}
+ my_availability_zone: {description: availability_zone for Hosting VMs, type: string, default: nova}
+ security_group: {description: Security Group to use, type: string, default: prox_security_group}
+ PROXType1VM_count: {description: Total number of testVMs to create, type: number, default: 2}
+ PROXType2VM_count: {description: Total number of testVMs type 2 to create, type: number, default: 1}
+ PROXType3VM_count: {description: Total number of testVMs type 3 to create, type: number, default: 1}
+
+# The following paramters are not used, but are here in case you want to also
+# create the management and dataplane networks in this template
+ mgmt_net_cidr: {description: PROX mgmt network CIDR, type: string, default: 20.20.1.0/24}
+ mgmt_net_gw: {description: PROX mgmt network gateway address, type: string, default: 20.20.1.1}
+ mgmt_net_pool_start: {description: Start of mgmt network IP address allocation pool, type: string, default: 20.20.1.100}
+ mgmt_net_pool_end: {description: End of mgmt network IP address allocation pool, type: string, default: 20.20.1.200}
+ data_net_name: {description: Name of PROX private network to be created, type: string, default: dataplane-network}
+ data_net_cidr: {description: PROX private network CIDR,type: string, default: 30.30.1.0/24}
+ data_net_pool_start: {description: Start of private network IP address allocation pool, type: string, default: 30.30.1.100}
+ data_net_pool_end: {description: End of private network IP address allocation pool, type: string, default: 30.30.1.200}
+ data2_net_name: {description: Name of PROX private network 2 to be created, type: string, default: data2}
+ dns:
+ type: comma_delimited_list
+ label: DNS nameservers
+ description: Comma separated list of DNS nameservers for the management network.
+ default: '8.8.8.8'
+
+resources:
+ PROXType1VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType1VM_count }
+ resource_def:
+ type: rapid-openstack-server.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidVM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ PROXType2VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType2VM_count }
+ resource_def:
+ type: rapid-openstack-server-2ports.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidType2VM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data_net_name}
+ PROX_data2_net_id: {get_param: data2_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ PROXType3VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType3VM_count }
+ resource_def:
+ type: rapid-openstack-server.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidType3VM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data2_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ MyConfig:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ users:
+ - default
+ - name: rapid
+ groups: "users,root"
+ lock-passwd: false
+ passwd: 'test'
+ shell: "/bin/bash"
+ sudo: "ALL=(ALL) NOPASSWD:ALL"
+ ssh_pwauth: true
+ chpasswd:
+ list: |
+ rapid:rapid
+ expire: False
+ write_files:
+ - path: /opt/rapid/after_boot_do_not_run.sh
+ # - path: /opt/rapid/after_boot.sh
+ # after_boot.sh is ran by check_prox_system_setup.sh, if it exists
+ # This can be used to fix some issues, like in the example below
+ # Remove this section or rename the file, if you do not want to run
+ # this after booting
+ # The code below is just an example of what could be ran after boot
+ content: |
+ OLDIFS="${IFS}"
+ IFS=$'\n'
+ list="$(ip route | grep via | grep -v 'dev eth0')"
+ # Delete all routes using gateway on other interfaces than eth0
+ for item in ${list}
+ do /bin/bash -c "sudo ip route del ${item}"
+ done
+ # Make sure to replace the IP address with your gateway
+ /bin/bash -c "sudo ip route add default via 10.6.6.1 dev eth0"
+ /bin/bash -c "echo nameserver 8.8.8.8 > /etc/resolv.conf"
+ IFS="${OLDIFS}"
+ permissions: '0777'
+
+outputs:
+ number_of_servers:
+ description: List of number or PROX instance
+ value:
+ - {get_param: PROXType1VM_count}
+ - {get_param: PROXType2VM_count}
+ - {get_param: PROXType3VM_count}
+ server_name:
+ description: List of list of names of the PROX instances
+ value:
+ - {get_attr: [PROXType1VMs, name]}
+ - {get_attr: [PROXType2VMs, name]}
+ - {get_attr: [PROXType3VMs, name]}
+ mngmt_ips:
+ description: List of list of Management IPs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, mngmt_ip]}
+ - {get_attr: [PROXType2VMs, mngmt_ip]}
+ - {get_attr: [PROXType3VMs, mngmt_ip]}
+ data_plane_ips:
+ description: List of list of list of DataPlane IPs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_ips]}
+ - {get_attr: [PROXType2VMs, data_plane_ips]}
+ - {get_attr: [PROXType3VMs, data_plane_ips]}
+ data_plane_macs:
+ description: List of list of list of DataPlane MACs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_mac]}
+ - {get_attr: [PROXType2VMs, data_plane_mac]}
+ - {get_attr: [PROXType3VMs, data_plane_mac]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
new file mode 100644
index 00000000..fbef2f54
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
@@ -0,0 +1,10 @@
+parameters:
+ public_net_name: admin_floating_net
+ data_net_name: dataplane-network
+ PROX_image: rapidVM
+ PROX_key: rapid_rsa_key
+ my_availability_zone: nova
+ security_group: prox_security_group
+ PROXType1VM_count: 3
+ PROXType2VM_count: 0
+ PROXType3VM_count: 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
new file mode 100644
index 00000000..9e269f60
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-rapid-
+ annotations:
+ k8s.v1.cni.cncf.io/networks: intel-sriov-vfio
+spec:
+ containers:
+ - name: pod-rapid
+ image: opnfv/rapid:latest
+ imagePullPolicy: Always
+ securityContext:
+ capabilities:
+ add: ["IPC_LOCK", "NET_ADMIN"]
+ volumeMounts:
+ - mountPath: /dev/hugepages
+ name: hugepages
+ resources:
+ requests:
+ hugepages-2Mi: 1Gi
+ memory: 1Gi
+ cpu: 8
+ intel.com/intel_sriov_vfio: '1'
+ limits:
+ hugepages-2Mi: 1Gi
+ memory: 1Gi
+ cpu: 8
+ intel.com/intel_sriov_vfio: '1'
+ volumes:
+ - name: hugepages
+ emptyDir:
+ medium: HugePages
+ restartPolicy: Never
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile
new file mode 100644
index 00000000..f91cf156
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile
@@ -0,0 +1,42 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = port_info_app
+
+# all source are stored in SRCS-y
+SRCS-y := port_info.c
+
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+EXTRA_CFLAGS += -O3 -g -Wfatal-errors
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
new file mode 100644
index 00000000..f2efd667
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
@@ -0,0 +1,101 @@
+##
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+project('port-info', 'C',
+ version:
+ run_command(['git', 'describe',
+ '--abbrev=8', '--dirty', '--always']).stdout().strip(),
+ license: 'Apache',
+ default_options: ['buildtype=release', 'c_std=gnu99'],
+ meson_version: '>= 0.47'
+)
+
+cc = meson.get_compiler('c')
+
+# Configure options for prox
+# Grab the DPDK version here "manually" as it is not available in the dpdk_dep
+# object
+dpdk_version = run_command('pkg-config', '--modversion', 'libdpdk').stdout()
+
+
+cflags = [
+ '-DPROGRAM_NAME="port_info_app"',
+ '-fno-stack-protector',
+ '-DGRE_TP',
+ '-D_GNU_SOURCE'] # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+# Add configured cflags to arguments
+foreach arg: cflags
+ add_project_arguments(arg, language: 'c')
+endforeach
+
+# enable warning flags if they are supported by the compiler
+warning_flags = [
+ '-Wno-unused',
+ '-Wno-unused-parameter',
+ '-Wno-unused-result',
+ '-Wno-deprecated-declarations']
+
+foreach arg: warning_flags
+ if cc.has_argument(arg)
+ add_project_arguments(arg, language: 'c')
+ endif
+endforeach
+
+has_sym_args = [
+ [ 'HAVE_LIBEDIT_EL_RFUNC_T', 'histedit.h',
+ 'el_rfunc_t' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'libedit_autoconf.h', configuration : config)
+
+# All other dependencies
+dpdk_dep = dependency('libdpdk', required: true)
+tinfo_dep = dependency('tinfo', required: false)
+threads_dep = dependency('threads', required: true)
+pcap_dep = dependency('pcap', required: true)
+libedit_dep = dependency('libedit', required: true)
+math_dep = cc.find_library('m', required : false)
+dl_dep = cc.find_library('dl', required : true)
+
+deps = [dpdk_dep,
+ tinfo_dep,
+ threads_dep,
+ pcap_dep,
+ libedit_dep,
+ math_dep,
+ dl_dep]
+
+# Explicitly add these to the dependency list
+deps += [cc.find_library('rte_bus_pci', required: true)]
+deps += [cc.find_library('rte_bus_vdev', required: true)]
+
+if dpdk_version.version_compare('<20.11.0')
+deps += [cc.find_library('rte_pmd_ring', required: true)]
+else
+deps += [cc.find_library('rte_net_ring', required: true)]
+endif
+
+sources = files(
+ 'port_info.c')
+
+executable('port_info_app',
+ sources,
+ c_args: cflags,
+ dependencies: deps,
+ install: true)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
new file mode 100644
index 00000000..917c0636
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
@@ -0,0 +1,70 @@
+/*
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_version.h>
+
+static const uint16_t rx_rings = 1, tx_rings = 1;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+static const struct rte_eth_conf port_conf = { .link_speeds = ETH_LINK_SPEED_AUTONEG };
+#else
+static const struct rte_eth_conf port_conf = { .link_speeds = RTE_ETH_LINK_SPEED_AUTONEG };
+#endif
+
+static inline int
+port_info(void)
+{
+ uint8_t port_id;
+ int ret_val;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ret_val = rte_eth_dev_configure(port_id, rx_rings, tx_rings, &port_conf);
+ if (ret_val != 0)
+ return ret_val;
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+ struct ether_addr addr;
+#else
+ struct rte_ether_addr addr;
+#endif
+ rte_eth_macaddr_get(port_id, &addr);
+ printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8
+ ":%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ (unsigned) port_id,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+ }
+
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ /* Initialize the Environment Abstraction Layer (EAL). */
+ int ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ argc -= ret;
+ argv += ret;
+
+ return port_info();
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
new file mode 100644
index 00000000..8754ebc4
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
@@ -0,0 +1,293 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from __future__ import print_function
+from __future__ import division
+
+from builtins import map
+from builtins import range
+from past.utils import old_div
+from builtins import object
+import os
+import time
+import subprocess
+import socket
+from rapid_log import RapidLog
+from rapid_sshclient import SSHClient
+
+class prox_ctrl(object):
+ def __init__(self, ip, key=None, user=None, password = None):
+ self._ip = ip
+ self._key = key
+ self._user = user
+ self._password = password
+ self._proxsock = []
+ self._sshclient = SSHClient(ip = ip, user = user, password = password,
+ rsa_private_key = key, timeout = None)
+
+ def ip(self):
+ return self._ip
+
+ def test_connection(self):
+ attempts = 1
+ RapidLog.debug("Trying to connect to machine \
+ on %s, attempt: %d" % (self._ip, attempts))
+ while True:
+ try:
+ if (self.run_cmd('test -e /opt/rapid/system_ready_for_rapid \
+ && echo exists')):
+ break
+ time.sleep(2)
+ except RuntimeWarning as ex:
+ RapidLog.debug("RuntimeWarning %d:\n%s"
+ % (ex.returncode, ex.output.strip()))
+ attempts += 1
+ if attempts > 20:
+ RapidLog.exception("Failed to connect to instance after %d\
+ attempts:\n%s" % (attempts, ex))
+ time.sleep(2)
+ RapidLog.debug("Trying to connect to machine \
+ on %s, attempt: %d" % (self._ip, attempts))
+ RapidLog.debug("Connected to machine on %s" % self._ip)
+
+ def connect_socket(self):
+ attempts = 1
+ RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
+ attempt: %d" % (self._ip, attempts))
+ sock = None
+ while True:
+ sock = self.prox_sock()
+ if sock is not None:
+ break
+ attempts += 1
+ if attempts > 20:
+ RapidLog.exception("Failed to connect to PROX on %s after %d \
+ attempts" % (self._ip, attempts))
+ time.sleep(2)
+ RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
+ attempt: %d" % (self._ip, attempts))
+ RapidLog.info("Connected to PROX on %s" % self._ip)
+ return sock
+
+ def close(self):
+ for sock in self._proxsock:
+ sock.quit()
+
+ def run_cmd(self, command):
+ self._sshclient.run_cmd(command)
+ return self._sshclient.get_output()
+
+ def prox_sock(self, port=8474):
+ """Connect to the PROX instance on remote system.
+ Return a prox_sock object on success, None on failure.
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self._ip, port))
+ prox = prox_sock(sock)
+ self._proxsock.append(prox)
+ return prox
+ except:
+ return None
+
+ def scp_put(self, src, dst):
+ self._sshclient.scp_put(src, dst)
+ RapidLog.info("Copying from {} to {}:{}".format(src, self._ip, dst))
+
+ def scp_get(self, src, dst):
+ self._sshclient.scp_get('/home/' + self._user + src, dst)
+ RapidLog.info("Copying from {}:/home/{}{} to {}".format(self._ip,
+ self._user, src, dst))
+
+class prox_sock(object):
+ def __init__(self, sock):
+ self._sock = sock
+ self._rcvd = b''
+
+ def __del__(self):
+ if self._sock is not None:
+ self._sock.close()
+ self._sock = None
+
+ def start(self, cores):
+ self._send('start %s' % ','.join(map(str, cores)))
+
+ def stop(self, cores):
+ self._send('stop %s' % ','.join(map(str, cores)))
+
+ def speed(self, speed, cores, tasks=[0]):
+ for core in cores:
+ for task in tasks:
+ self._send('speed %s %s %s' % (core, task, speed))
+
+ def reset_stats(self):
+ self._send('reset stats')
+
+ def lat_stats(self, cores, tasks=[0]):
+ result = {}
+ result['lat_min'] = 999999999
+ result['lat_max'] = result['lat_avg'] = 0
+ result['buckets'] = [0] * 128
+ result['mis_ordered'] = 0
+ result['extent'] = 0
+ result['duplicate'] = 0
+ number_tasks_returning_stats = 0
+ self._send('lat all stats %s %s' % (','.join(map(str, cores)),
+ ','.join(map(str, tasks))))
+ for core in cores:
+ for task in tasks:
+ stats = self._recv().split(',')
+ if 'is not measuring' in stats[0]:
+ continue
+ if stats[0].startswith('error'):
+ RapidLog.critical("lat stats error: unexpected reply from PROX\
+ (potential incompatibility between scripts and PROX)")
+ raise Exception("lat stats error")
+ number_tasks_returning_stats += 1
+ result['lat_min'] = min(int(stats[0]),result['lat_min'])
+ result['lat_max'] = max(int(stats[1]),result['lat_max'])
+ result['lat_avg'] += int(stats[2])
+ #min_since begin = int(stats[3])
+ #max_since_begin = int(stats[4])
+ result['lat_tsc'] = int(stats[5])
+ # Taking the last tsc as the timestamp since
+ # PROX will return the same tsc for each
+ # core/task combination
+ result['lat_hz'] = int(stats[6])
+ #coreid = int(stats[7])
+ #taskid = int(stats[8])
+ result['mis_ordered'] += int(stats[9])
+ result['extent'] += int(stats[10])
+ result['duplicate'] += int(stats[11])
+ stats = self._recv().split(':')
+ if stats[0].startswith('error'):
+ RapidLog.critical("lat stats error: unexpected lat bucket \
+ reply (potential incompatibility between scripts \
+ and PROX)")
+ raise Exception("lat bucket reply error")
+ result['buckets'][0] = int(stats[1])
+ for i in range(1, 128):
+ stats = self._recv().split(':')
+ result['buckets'][i] += int(stats[1])
+ result['lat_avg'] = old_div(result['lat_avg'],
+ number_tasks_returning_stats)
+ self._send('stats latency(0).used')
+ used = float(self._recv())
+ self._send('stats latency(0).total')
+ total = float(self._recv())
+ result['lat_used'] = old_div(used,total)
+ return (result)
+
+ def irq_stats(self, core, bucket, task=0):
+ self._send('stats task.core(%s).task(%s).irq(%s)' %
+ (core, task, bucket))
+ stats = self._recv().split(',')
+ return int(stats[0])
+
+ def show_irq_buckets(self, core, task=0):
+ rx = tx = drop = tsc = hz = 0
+ self._send('show irq buckets %s %s' % (core,task))
+ buckets = self._recv().split(';')
+ buckets = buckets[:-1]
+ return buckets
+
+ def core_stats(self, cores, tasks=[0]):
+ rx = tx = drop = tsc = hz = rx_non_dp = tx_non_dp = tx_fail = 0
+ self._send('dp core stats %s %s' % (','.join(map(str, cores)),
+ ','.join(map(str, tasks))))
+ for core in cores:
+ for task in tasks:
+ stats = self._recv().split(',')
+ if stats[0].startswith('error'):
+ if stats[0].startswith('error: invalid syntax'):
+ RapidLog.critical("dp core stats error: unexpected \
+ invalid syntax (potential incompatibility \
+ between scripts and PROX)")
+ raise Exception("dp core stats error")
+ continue
+ rx += int(stats[0])
+ tx += int(stats[1])
+ rx_non_dp += int(stats[2])
+ tx_non_dp += int(stats[3])
+ drop += int(stats[4])
+ tx_fail += int(stats[5])
+ tsc = int(stats[6])
+ hz = int(stats[7])
+ return rx, rx_non_dp, tx, tx_non_dp, drop, tx_fail, tsc, hz
+
+ def multi_port_stats(self, ports=[0]):
+ rx = tx = port_id = tsc = no_mbufs = errors = 0
+ self._send('multi port stats %s' % (','.join(map(str, ports))))
+ result = self._recv().split(';')
+ if result[0].startswith('error'):
+ RapidLog.critical("multi port stats error: unexpected invalid \
+ syntax (potential incompatibility between scripts and \
+ PROX)")
+ raise Exception("multi port stats error")
+ for statistics in result:
+ stats = statistics.split(',')
+ port_id = int(stats[0])
+ rx += int(stats[1])
+ tx += int(stats[2])
+ no_mbufs += int(stats[3])
+ errors += int(stats[4])
+ tsc = int(stats[5])
+ return rx, tx, no_mbufs, errors, tsc
+
+ def set_random(self, cores, task, offset, mask, length):
+ self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)),
+ task, offset, mask, length))
+
+ def set_size(self, cores, task, pkt_size):
+ self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task,
+ pkt_size))
+
+ def set_imix(self, cores, task, imix):
+ self._send('imix %s %s %s' % (','.join(map(str, cores)), task,
+ ','.join(map(str,imix))))
+
+ def set_value(self, cores, task, offset, value, length):
+ self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)),
+ task, offset, value, length))
+
+ def quit_prox(self):
+ self._send('quit')
+
+ def _send(self, cmd):
+ """Append LF and send command to the PROX instance."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
+ try:
+ self._sock.sendall(cmd.encode() + b'\n')
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
+
+ def _recv(self):
+ """Receive response from PROX instance, return it with LF removed."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot receive anymore")
+ try:
+ pos = self._rcvd.find(b'\n')
+ while pos == -1:
+ self._rcvd += self._sock.recv(256)
+ pos = self._rcvd.find(b'\n')
+ rsp = self._rcvd[:pos]
+ self._rcvd = self._rcvd[pos+1:]
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
+ return rsp.decode()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
new file mode 100644
index 00000000..374b58cb
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools>=42",
+ "wheel"
+]
+build-backend = "setuptools.build_meta"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
new file mode 100644
index 00000000..e1095fbd
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
@@ -0,0 +1,94 @@
+heat_template_version: 2014-10-16
+
+description: single server resource with 2 dataplane ports used by resource groups.
+
+parameters:
+ PROX_public_net:
+ type: string
+ PROX_mgmt_net_id:
+ type: string
+ PROX_data_net_id:
+ type: string
+ PROX_data2_net_id:
+ type: string
+ PROX_server_name:
+ type: string
+ PROX_availability_zone:
+ type: string
+ PROX_security_group:
+ type: string
+ PROX_image:
+ type: string
+ PROX_key:
+ type: string
+ PROX_config:
+ type: string
+
+resources:
+ PROX_instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: PROX_server_name }
+ availability_zone : {get_param: PROX_availability_zone}
+ flavor: {get_resource: PROX_flavor}
+ image: {get_param: PROX_image}
+ key_name: {get_param: PROX_key}
+ networks:
+ - port: {get_resource: mgmt_port }
+ - port: {get_resource: data_port }
+ - port: {get_resource: data2_port }
+ user_data: {get_param: PROX_config}
+ user_data_format: RAW
+
+ PROX_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 4096
+ vcpus: 4
+ disk: 80
+ extra_specs: {"hw:mem_page_size": "large","hw:cpu_policy": "dedicated","hw:cpu_thread_policy":"isolate"}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_mgmt_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: PROX_public_net}
+ port_id: {get_resource: mgmt_port}
+
+ data_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ data2_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data2_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+outputs:
+ name:
+ description: Name of the PROX instance
+ value: {get_attr: [PROX_instance, name]}
+ mngmt_ip:
+ description: Management IP of the VM
+ value: {get_attr: [floating_ip, floating_ip_address ]}
+ data_plane_ips:
+ description: List of DataPlane IPs of the VM
+ value:
+ - {get_attr: [data_port, fixed_ips, 0, ip_address]}
+ - {get_attr: [data2_port, fixed_ips, 0, ip_address]}
+ data_plane_mac:
+ description: List of DataPlane MACs of the VM
+ value:
+ - {get_attr: [data_port, mac_address]}
+ - {get_attr: [data2_port, mac_address]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml
new file mode 100644
index 00000000..84311e25
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml
@@ -0,0 +1,82 @@
+heat_template_version: 2014-10-16
+
+description: single server resource used by resource groups.
+
+parameters:
+ PROX_public_net:
+ type: string
+ PROX_mgmt_net_id:
+ type: string
+ PROX_data_net_id:
+ type: string
+ PROX_server_name:
+ type: string
+ PROX_availability_zone:
+ type: string
+ PROX_security_group:
+ type: string
+ PROX_image:
+ type: string
+ PROX_key:
+ type: string
+ PROX_config:
+ type: string
+
+resources:
+ PROX_instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: PROX_server_name }
+ availability_zone : {get_param: PROX_availability_zone}
+ flavor: {get_resource: PROX_flavor}
+ image: {get_param: PROX_image}
+ key_name: {get_param: PROX_key}
+ networks:
+ - port: {get_resource: mgmt_port }
+ - port: {get_resource: data_port }
+ user_data: {get_param: PROX_config}
+ user_data_format: RAW
+
+ PROX_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 4096
+ vcpus: 4
+ disk: 80
+ extra_specs: {"hw:mem_page_size": "large","hw:cpu_policy": "dedicated","hw:cpu_thread_policy":"isolate"}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_mgmt_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: PROX_public_net}
+ port_id: {get_resource: mgmt_port}
+
+ data_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+outputs:
+ name:
+ description: Name of the PROX instance
+ value: {get_attr: [PROX_instance, name]}
+ mngmt_ip:
+ description: Management IP of the VM
+ value: {get_attr: [floating_ip, floating_ip_address ]}
+ data_plane_ips:
+ description: List of DataPlane IPs of the VM
+ value:
+ - {get_attr: [data_port, fixed_ips, 0, ip_address]}
+ data_plane_mac:
+ description: List of DataPlane MACs of the VM
+ value:
+ - {get_attr: [data_port, mac_address]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
index f211934a..cd54d507 100755..100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
@@ -1,7 +1,5 @@
-#!/bin/bash
-
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -16,8 +14,16 @@
## limitations under the License.
##
-echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
-mount -t hugetlbfs nodev /mnt/huge
-modprobe uio
-insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-iptables -F
+[DEFAULT]
+total_number_of_pods=2
+namespace=rapid-testing
+
+[POD1]
+nodeSelector_hostname=k8s-node1
+dp_ip=192.168.30.11
+dp_subnet=24
+
+[POD2]
+nodeSelector_hostname=k8s-node2
+dp_ip=192.168.30.12
+dp_subnet=24
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
new file mode 100644
index 00000000..d103deba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import getopt
+import sys
+
+class RapidCli(object):
+ """
+ Class to deal with runrapid cli
+ """
+ @staticmethod
+ def usage(test_params):
+ print("usage: runrapid [--version] [-v]")
+ print(" [--env ENVIRONMENT_NAME]")
+ print(" [--test TEST_NAME]")
+ print(" [--map MACHINE_MAP_FILE]")
+ print(" [--runtime TIME_FOR_TEST]")
+ print(" [--configonly False|True]")
+ print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
+ print(" [-h] [--help]")
+ print("")
+ print("Command-line interface to runrapid")
+ print("")
+ print("optional arguments:")
+ print(" -v, --version Show program's version number and exit")
+ print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME. Default is %s."%test_params['environment_file'])
+ print(" --test TEST_NAME Test cases will be read from TEST_NAME. Default is %s."%test_params['test_file'])
+ print(" --map MACHINE_MAP_FILE Machine mapping will be read from MACHINE_MAP_FILE. Default is %s."%test_params['machine_map_file'])
+ print(" --map INDEX_LIST This parameter can also be a list of indices, e.g. [2,3]")
+ print(" --runtime Specify time in seconds for 1 test run")
+ print(" --configonly If this option is specified, only upload all config files to the VMs, do not run the tests")
+ print(" --log Specify logging level for log file output, default is DEBUG")
+ print(" --screenlog Specify logging level for screen output, default is INFO")
+ print(" -h, --help Show help message and exit.")
+ print("")
+
+ @staticmethod
+ def process_cli(test_params):
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=", "map=", "runtime=","configonly","log=","screenlog="])
+ except getopt.GetoptError as err:
+ print("===========================================")
+ print(str(err))
+ print("===========================================")
+ RapidCli.usage(test_params)
+ sys.exit(2)
+ if args:
+ RapidCli.usage(test_params)
+ sys.exit(2)
+ for opt, arg in opts:
+ if opt in ["-h", "--help"]:
+ RapidCli.usage(test_params)
+ sys.exit()
+ if opt in ["-v", "--version"]:
+ print("Rapid Automated Performance Indication for Dataplane "+test_params['version'])
+ sys.exit()
+ if opt in ["--env"]:
+ test_params['environment_file'] = arg
+ if opt in ["--test"]:
+ test_params['test_file'] = arg
+ if opt in ["--map"]:
+ test_params['machine_map_file'] = arg
+ if opt in ["--runtime"]:
+ test_params['runtime'] = int(arg)
+ if opt in ["--configonly"]:
+ test_params['configonly'] = True
+ print('No actual runs, only uploading configuration files')
+ if opt in ["--log"]:
+ test_params['loglevel'] = arg
+ print ("Log level: "+ test_params['loglevel'])
+ if opt in ["--screenlog"]:
+ test_params['screenloglevel'] = arg
+ print ("Screen Log level: "+ test_params['screenloglevel'])
+ print ("Using '"+test_params['environment_file']+"' as name for the environment")
+ print ("Using '"+test_params['test_file']+"' for test case definition")
+ print ("Using '"+test_params['machine_map_file']+"' for machine mapping")
+ print ("Runtime: "+ str(test_params['runtime']))
+ return(test_params)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
new file mode 100644
index 00000000..e6a7f517
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class CoreStatsTest(RapidTest):
+ """
+ Class to manage the corestatstesting
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ RapidLog.info("+------------------------------------------------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring core statistics on 1 or more PROX instances |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ RapidLog.info("| PROX ID | Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ duration = self.test['runtime']
+ tot_drop = []
+ old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
+ new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
+ machines_to_go = len (self.machines)
+ for machine in self.machines:
+ machine.reset_stats()
+ tot_drop.append(0)
+ old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
+ old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = machine.core_stats()
+ new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
+ while (duration > 0):
+ time.sleep(0.5)
+ # Get statistics after some execution time
+ for i, machine in enumerate(self.machines, start=0):
+ new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = machine.core_stats()
+ drop = new_drop[i]-old_drop[i]
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
+ non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ machines_to_go -= 1
+ old_drop[i] = new_drop[i]
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_non_dp_rx[i] = new_non_dp_rx[i]
+ old_non_dp_tx[i] = new_non_dp_tx[i]
+ old_tsc[i] = new_tsc[i]
+ tot_drop[i] = tot_drop[i] + tx - rx
+ RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NonDPReceived': non_dp_rx,
+ 'NonDPSent': non_dp_tx,
+ 'Dropped': tot_drop[i]}
+ result_details = self.post_data(result_details)
+ if machines_to_go == 0:
+ duration = duration - 1
+ machines_to_go = len (self.machines)
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ return (True, result_details)
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
new file mode 100644
index 00000000..27d2430d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+class RapidDefaults(object):
+ """
+ Class to define the test defaults
+ """
+ test_params = {
+ 'version' : '2023.01.16', # Please do NOT change, used for debugging
+ 'environment_file' : 'rapid.env', #Default string for environment
+ 'test_file' : 'tests/basicrapid.test', #Default string for test
+ 'machine_map_file' : 'machine.map', #Default string for machine map file
+ 'loglevel' : 'DEBUG', # sets log level for writing to file
+ 'screenloglevel' : 'INFO', # sets log level for writing to screen
+ 'runtime' : 10, # time in seconds for 1 test run
+ 'configonly' : False, # If True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
+ 'rundir' : '/opt/rapid', # Directory where to find the tools in the machines running PROX
+ 'resultsdir' : '.', # Directory where to store log files
+ 'sleep_time' : 2, # Sleep time between two loop iteration. Minimum is 2 seconds. Might be useful to let SUT clean caches
+ 'lat_percentile' : 0.99
+ }
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
new file mode 100644
index 00000000..ea42fc9a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+import sys
+import time
+import copy
+from math import ceil
+from statistics import mean
+from past.utils import old_div
+from rapid_log import RapidLog
+from rapid_log import bcolors
+from rapid_test import RapidTest
+inf = float("inf")
+
+class FlowSizeTest(RapidTest):
+ """
+ Class to manage the flowsizetesting
+ """
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines, sleep_time):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.gen_machine = gen_machine
+ self.sut_machine = sut_machine
+ self.background_machines = background_machines
+ self.test['lat_percentile'] = lat_percentile
+ self.test['sleep_time'] = sleep_time
+ if self.test['test'] == 'TST009test':
+ # This test implements some of the testing as defined in
+ # https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
+ self.test['TST009_n'] = int(ceil(old_div(
+ self.test['maxframespersecondallingress'],
+ self.test['stepsize'])))
+ self.test['TST009'] = True
+ self.test['TST009_L'] = 0
+ self.test['TST009_R'] = self.test['TST009_n'] - 1
+ self.test['TST009_S']= []
+ for m in range(0, self.test['TST009_n']):
+ self.test['TST009_S'].append((m+1) * self.test['stepsize'])
+ elif self.test['test'] == 'fixed_rate':
+ for key in['drop_rate_threshold','lat_avg_threshold',
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']:
+ self.test[key] = inf
+
+ def new_speed(self, speed,size,success):
+ if self.test['test'] == 'fixed_rate':
+ return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (speed + self.test['step'])
+ elif 'TST009' in self.test.keys():
+ if success:
+ self.test['TST009_L'] = self.test['TST009_m'] + 1
+ else:
+ self.test['TST009_R'] = max(self.test['TST009_m'] - 1,
+ self.test['TST009_L'])
+ self.test['TST009_m'] = int (old_div((self.test['TST009_L'] +
+ self.test['TST009_R']),2))
+ return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
+ else:
+ if success:
+ self.test['minspeed'] = speed
+ else:
+ self.test['maxspeed'] = speed
+ return (old_div((self.test['minspeed'] + self.test['maxspeed']),2.0))
+
+ def get_start_speed_and_init(self, size):
+ if self.test['test'] == 'fixed_rate':
+ return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (self.test['startspeed'])
+ elif 'TST009' in self.test.keys():
+ self.test['TST009_L'] = 0
+ self.test['TST009_R'] = self.test['TST009_n'] - 1
+ self.test['TST009_m'] = int(old_div((self.test['TST009_L'] +
+ self.test['TST009_R']), 2))
+ return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
+ else:
+ self.test['minspeed'] = 0
+ self.test['maxspeed'] = self.test['startspeed']
+ return (self.test['startspeed'])
+
+ def resolution_achieved(self):
+ if self.test['test'] == 'fixed_rate':
+ return (True)
+ elif 'TST009' in self.test.keys():
+ return (self.test['TST009_L'] == self.test['TST009_R'])
+ else:
+ return ((self.test['maxspeed'] - self.test['minspeed']) <= self.test['accuracy'])
+
+ def warm_up(self):
+ # Running at low speed to make sure the ARP messages can get through.
+ # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
+ # Note however that if we would run the test steps during a very long time, the ARP would expire in the switch.
+ # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
+
+ if WARMUPTIME == 0:
+ RapidLog.info(("Not Warming up"))
+ return
+
+ RapidLog.info(("Warming up during {} seconds..., packet size = {},"
+ " flows = {}, speed = {}").format(WARMUPTIME, imix, FLOWSIZE,
+ WARMUPSPEED))
+ self.gen_machine.set_generator_speed(WARMUPSPEED)
+ self.set_background_speed(self.background_machines, WARMUPSPEED)
+ self.gen_machine.set_udp_packet_size(imix)
+ self.set_background_size(self.background_machines, imix)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
+ self.set_background_flows(self.background_machines, FLOWSIZE)
+ self.gen_machine.start()
+ self.start_background_traffic(self.background_machines)
+ time.sleep(WARMUPTIME)
+ self.stop_background_traffic(self.background_machines)
+ self.gen_machine.stop()
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ TestResult = 0
+ end_data = {}
+ iteration_prefix = {}
+ self.warm_up()
+ for imix in self.test['imixs']:
+ size = mean(imix)
+ self.gen_machine.set_udp_packet_size(imix)
+ if self.background_machines:
+ backgroundinfo = ('{}Running {} x background traffic not '
+ 'represented in the table{}').format(bcolors.FLASH,
+ len(self.background_machines),bcolors.ENDC)
+ else:
+ backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
+ self.set_background_size(self.background_machines, imix)
+ RapidLog.info('+' + '-' * 200 + '+')
+ RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
+ "randomizing SRC & DST UDP port. {:128.128}|").
+ format(round(size), backgroundinfo))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Flows | Speed requested | Gen by core | Sent by'
+ ' NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f}'
+ ' Pcentil| Max. Lat.| Sent | Received | Lost | Total'
+ ' Lost|L.Ratio|Mis-ordered|Time').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
+ for flow_number in self.test['flows']:
+ attempts = 0
+ self.gen_machine.reset_stats()
+ if self.sut_machine:
+ self.sut_machine.reset_stats()
+ if flow_number != 0:
+ flow_number = self.gen_machine.set_flows(flow_number)
+ self.set_background_flows(self.background_machines, flow_number)
+ end_data['speed'] = None
+ speed = self.get_start_speed_and_init(size)
+ while True:
+ attempts += 1
+ endwarning = False
+ print('{} flows: Measurement ongoing at speed: {}%'.format(
+ str(flow_number), str(round(speed, 2))), end=' \r')
+ sys.stdout.flush()
+ iteration_data = self.run_iteration(
+ float(self.test['runtime']),flow_number,size,speed)
+ if iteration_data['r'] > 1:
+ retry_warning = '{} {:1} retries needed{}'.format(
+ bcolors.WARNING, iteration_data['r'],
+ bcolors.ENDC)
+ else:
+ retry_warning = ''
+ # Drop rate is expressed in percentage. lat_used is a ratio
+ # (0 to 1). The sum of these 2 should be 100%.
+ # If the sum is lower than 95, it means that more than 5%
+ # of the latency measurements where dropped for accuracy
+ # reasons.
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used'] * 100,
+ bcolors.ENDC)
+ else:
+ lat_warning = ''
+ iteration_prefix = {'speed' : bcolors.ENDC,
+ 'lat_avg' : bcolors.ENDC,
+ 'lat_perc' : bcolors.ENDC,
+ 'lat_max' : bcolors.ENDC,
+ 'abs_drop_rate' : bcolors.ENDC,
+ 'mis_ordered' : bcolors.ENDC,
+ 'drop_rate' : bcolors.ENDC}
+ if self.test['test'] == 'fixed_rate':
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
+ if lat_warning or retry_warning:
+ endwarning = '| | {:177.177} |'.format(
+ retry_warning + lat_warning)
+ success = True
+ # TestResult = TestResult + iteration_data['pps_rx']
+ # fixed rate testing result is strange: we just report
+ # the pps received
+ # The following if statement is testing if we pass the
+ # success criteria of a certain drop rate, average latency
+ # and maximum latency below the threshold.
+ # The drop rate success can be achieved in 2 ways: either
+ # the drop rate is below a treshold, either we want that no
+ # packet has been lost during the test.
+ # This can be specified by putting 0 in the .test file
+ elif ((self.get_pps(speed,size) - iteration_data['pps_tx']) / self.get_pps(speed,size)) \
+ < self.test['generator_threshold'] and \
+ ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or \
+ (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and \
+ (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and \
+ (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and \
+ (iteration_data['lat_max'] < self.test['lat_max_threshold'] and \
+ iteration_data['mis_ordered'] <= self.test['mis_ordered_threshold']):
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
+ success = True
+ success_message=' SUCCESS'
+ if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
+ iteration_prefix['speed'] = bcolors.WARNING
+ if iteration_data['abs_tx_fail'] > 0:
+ gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
+ else:
+ gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
+ break
+ else:
+ iteration_prefix['speed'] = bcolors.ENDC
+ gen_warning = ''
+ if lat_warning or retry_warning:
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
+ else:
+ success_message=' FAILED'
+ if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
+ iteration_prefix['abs_drop_rate'] = bcolors.FAIL
+ if (iteration_data['drop_rate'] <= self.test['drop_rate_threshold']):
+ iteration_prefix['drop_rate'] = bcolors.ENDC
+ else:
+ iteration_prefix['drop_rate'] = bcolors.FAIL
+ if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
+ iteration_prefix['lat_avg'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_avg'] = bcolors.FAIL
+ if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
+ iteration_prefix['lat_perc'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_perc'] = bcolors.FAIL
+ if (iteration_data['lat_max']< self.test['lat_max_threshold']):
+ iteration_prefix['lat_max'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_max'] = bcolors.FAIL
+ if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
+ iteration_prefix['speed'] = bcolors.ENDC
+ else:
+ iteration_prefix['speed'] = bcolors.FAIL
+ if (iteration_data['mis_ordered']< self.test['mis_ordered_threshold']):
+ iteration_prefix['mis_ordered'] = bcolors.ENDC
+ else:
+ iteration_prefix['mis_ordered'] = bcolors.FAIL
+
+ success = False
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) +
+ success_message + retry_warning + lat_warning)
+ speed = self.new_speed(speed, size, success)
+ if self.test['test'] == 'increment_till_fail':
+ if not success:
+ break
+ elif self.resolution_achieved():
+ break
+ if end_data['speed'] is None:
+ end_data = iteration_data
+ end_prefix = iteration_prefix
+ RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
+ RapidLog.info(self.report_result(flow_number, size,
+ end_data, end_prefix))
+ if end_data['avg_bg_rate']:
+ tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
+ endtotaltrafficrate = '| | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(RapidTest.get_speed(tot_avg_rx_rate,size) , tot_avg_rx_rate, ' '*84)
+ RapidLog.info (endtotaltrafficrate)
+ if endwarning:
+ RapidLog.info (endwarning)
+ if self.test['test'] != 'fixed_rate':
+ TestResult = TestResult + end_data['pps_rx']
+ end_data['test'] = self.test['testname']
+ end_data['environment_file'] = self.test['environment_file']
+ end_data['Flows'] = flow_number
+ end_data['Size'] = size
+ end_data['RequestedSpeed'] = RapidTest.get_pps(end_data['speed'] ,size)
+ result_details = self.post_data(end_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ return (TestResult, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
new file mode 100644
index 00000000..e52b17db
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from rapid_machine import RapidMachine
+from math import ceil, log2
+
+
+class RandomPortBits(object):
+ """
+ Class to generate PROX bitmaps for random bit generation
+ in source & dst UPD ports to emulate mutiple flows
+ """
+ @staticmethod
+ def get_bitmap(flow_number):
+ number_of_random_bits = ceil(log2(flow_number))
+ if number_of_random_bits > 30:
+ raise Exception("Not able to support that many flows")
+ # throw exeption since we need the first bit to be 1
+ # Otherwise, the randomization could results in all 0's
+ # and that might be an invalid UDP port and result in
+ # packets being discarded
+ src_number_of_random_bits = number_of_random_bits // 2
+ dst_number_of_random_bits = (number_of_random_bits -
+ src_number_of_random_bits)
+ src_port_bitmap = '1000000000000000'.replace ('0','X',
+ src_number_of_random_bits)
+ dst_port_bitmap = '1000000000000000'.replace ('0','X',
+ dst_number_of_random_bits)
+ return [src_port_bitmap, dst_port_bitmap, 1 << number_of_random_bits]
+
+class RapidGeneratorMachine(RapidMachine):
+ """
+ Class to deal with a generator PROX instance (VM, bare metal, container)
+ """
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly, ipv6):
+ mac_address_size = 6
+ ethertype_size = 2
+ FCS_size = 4
+ if ipv6:
+ ip_header_size = 40
+ self.ip_length_offset = 18
+ # In IPV6, the IP size is the size of the IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size)
+ else:
+ ip_header_size = 20
+ self.ip_length_offset = 16
+ # In IPV4, the IP size is the size of the IP header + IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + FCS_size)
+ self.frame_size_minus_udp_header_and_content = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size )
+ udp_header_start_offset = (2 * mac_address_size + ethertype_size +
+ ip_header_size)
+ self.udp_source_port_offset = udp_header_start_offset
+ self.udp_dest_port_offset = udp_header_start_offset + 2
+ self.udp_length_offset = udp_header_start_offset + 4
+ self.ipv6 = ipv6
+ if 'bucket_size_exp' in machine_params.keys():
+ self.bucket_size_exp = machine_params['bucket_size_exp']
+ else:
+ self.bucket_size_exp = 11
+ super().__init__(key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly)
+
+ def get_cores(self):
+ return (self.machine_params['gencores'] +
+ self.machine_params['latcores'])
+
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (gencores, latcores)
+ """
+ super().remap_all_cpus()
+
+ if self.cpu_mapping is None:
+ return
+
+ if 'gencores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['gencores'])
+ RapidLog.debug('{} ({}): gencores {} remapped to {}'.format(self.name, self.ip, self.machine_params['gencores'], cpus_remapped))
+ self.machine_params['gencores'] = cpus_remapped
+
+ if 'latcores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['latcores'])
+ RapidLog.debug('{} ({}): latcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['latcores'], cpus_remapped))
+ self.machine_params['latcores'] = cpus_remapped
+
+ def generate_lua(self):
+ appendix = 'gencores="%s"\n'% ','.join(map(str,
+ self.machine_params['gencores']))
+ appendix = appendix + 'latcores="%s"\n'% ','.join(map(str,
+ self.machine_params['latcores']))
+ appendix = (appendix +
+ 'bucket_size_exp="{}"\n'.format(self.bucket_size_exp))
+ if 'heartbeat' in self.machine_params.keys():
+ appendix = (appendix +
+ 'heartbeat="%s"\n'% self.machine_params['heartbeat'])
+ else:
+ appendix = appendix + 'heartbeat="60"\n'
+ super().generate_lua(appendix)
+
+ def start_prox(self):
+ # Start the generator with the -e option so that the cores don't
+ # start automatically
+ super().start_prox('-e')
+
+ def set_generator_speed(self, speed):
+ # The assumption is that we only use task 0 for generating
+ # We should check the gen.cfg file to make sure there is only task=0
+ speed_per_gen_core = speed / len(self.machine_params['gencores'])
+ self.socket.speed(speed_per_gen_core, self.machine_params['gencores'])
+
+ def set_udp_packet_size(self, imix_frame_sizes):
+ # We should check the gen.cfg to make sure we only send UDP packets
+ # If only 1 packet size, still using the 'old' way of setting the
+ # packet sizes in PROX. Otherwise, using the 'new' way which
+ # automatically sets IP and UDP sizes. We should switch to the new way
+ # eventually for all cases.
+ if len(imix_frame_sizes) == 1:
+ # Frame size = PROX pkt size + 4 bytes CRC
+ # The set_size function takes the PROX packet size as a parameter
+ self.socket.set_size(self.machine_params['gencores'], 0,
+ imix_frame_sizes[0] - 4)
+ # Writing length in the ip header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.ip_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_ip_size, 2)
+ # Writing length in the udp header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.udp_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_udp_header_and_content, 2)
+ else:
+ if self.ipv6:
+ RapidLog.critical('IMIX not supported for IPV6')
+ prox_sizes = [frame_size - 4 for frame_size in imix_frame_sizes]
+ self.socket.set_imix(self.machine_params['gencores'], 0,
+ prox_sizes)
+
+ def set_flows(self, number_of_flows):
+ source_port, destination_port, actualflows = RandomPortBits.get_bitmap(
+ number_of_flows)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_source_port_offset, source_port,2)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_dest_port_offset, destination_port,2)
+ return actualflows
+
+ def start_gen_cores(self):
+ self.socket.start(self.machine_params['gencores'])
+
+ def stop_gen_cores(self):
+ self.socket.stop(self.machine_params['gencores'])
+
+ def start_latency_cores(self):
+ self.socket.start(self.machine_params['latcores'])
+
+ def stop_latency_cores(self):
+ self.socket.stop(self.machine_params['latcores'])
+
+ def lat_stats(self):
+ # Checking all tasks in the cfg file. In this way, we can have more
+ # latency tasks on the same core
+ return (self.socket.lat_stats(self.machine_params['latcores'],
+ self.all_tasks_for_this_cfg))
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
new file mode 100644
index 00000000..4d210409
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rapid
+description: A Helm chart for deploying RAPID test scripts and environment
+type: application
+version: 0.0.1
+appVersion: "1.0.0"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
new file mode 100644
index 00000000..74fc6297
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rapid-testing
+ namespace: {{ .Values.namespace }}
+ labels:
+ app: rapid-testing
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rapid-testing
+ template:
+ metadata:
+ labels:
+ app: rapid-testing
+ spec:
+ serviceAccountName: rapid-testing-sa
+ containers:
+ - name: rapid-mgmt
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7886ade3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.namespace }}
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: rapid-testing-cr
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/status"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rapid-testing-crb
+subjects:
+- kind: ServiceAccount
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+roleRef:
+ kind: ClusterRole
+ name: rapid-testing-cr
+ apiGroup: rbac.authorization.k8s.io
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
new file mode 100644
index 00000000..76b8037a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
@@ -0,0 +1,8 @@
+namespace: rapid-testing
+
+image:
+ repository: opnfv/rapid
+ tag: "latest"
+ pullPolicy: IfNotPresent
+
+nodeSelector: {}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
new file mode 100644
index 00000000..3945cd8e
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_log import bcolors
+from rapid_test import RapidTest
+from statistics import mean
+
+class ImpairTest(RapidTest):
+ """
+ Class to manage the impair testing
+ """
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.gen_machine = gen_machine
+ self.sut_machine = sut_machine
+ self.background_machines = background_machines
+ self.test['lat_percentile'] = lat_percentile
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ imix = self.test['imix']
+ size = mean (imix)
+ flow_number = self.test['flowsize']
+ attempts = self.test['steps']
+ self.gen_machine.set_udp_packet_size(imix)
+ flow_number = self.gen_machine.set_flows(flow_number)
+ self.gen_machine.start_latency_cores()
+ RapidLog.info('+' + '-' * 188 + '+')
+ RapidLog.info(("| Generator is sending UDP ({:>5} flow) packets ({:>5}"
+ " bytes) to SUT via GW dropping and delaying packets. SUT sends "
+ "packets back.{:>60}").format(flow_number,round(size),'|'))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Test | Speed requested | Gen by core | Sent by NIC'
+ ' | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f} Pcentil'
+ '| Max. Lat.| Sent | Received | Lost | Total Lost|'
+ 'L.Ratio|Time|').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ speed = self.test['startspeed']
+ self.gen_machine.set_generator_speed(speed)
+ while attempts:
+ attempts -= 1
+ print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
+ sys.stdout.flush()
+ time.sleep(1)
+ # Get statistics now that the generation is stable and NO ARP messages any more
+ iteration_data = self.run_iteration(float(self.test['runtime']),flow_number,size,speed)
+ iteration_data['speed'] = speed
+ # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
+ # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used']*100, bcolors.ENDC)
+ else:
+ lat_warning = ''
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(attempts, size, iteration_data,
+ iteration_prefix))
+ iteration_data['test'] = self.test['testname']
+ iteration_data['environment_file'] = self.test['environment_file']
+ iteration_data['Flows'] = flow_number
+ iteration_data['Size'] = size
+ iteration_data['RequestedSpeed'] = RapidTest.get_pps(
+ iteration_data['speed'] ,size)
+ result_details = self.post_data(iteration_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ self.gen_machine.stop_latency_cores()
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
new file mode 100644
index 00000000..de7e6ae3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from past.utils import old_div
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class IrqTest(RapidTest):
+ """
+ Class to manage the irq testing
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic |")
+ RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and |")
+ RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was |")
+ RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout |")
+ RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 |")
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
+ sys.stdout.flush()
+ max_loop_duration = 0
+ machine_details = {}
+ for machine in self.machines:
+ buckets=machine.socket.show_irq_buckets(machine.get_cores()[0])
+ if max_loop_duration == 0:
+ # First time we go through the loop, we need to initialize
+ # result_details
+ result_details = {'test': self.test['testname'],
+ 'environment_file': self.test['environment_file'],
+ 'buckets': buckets}
+ print('Measurement ongoing ... ',end='\r')
+ machine.start() # PROX cores will be started within 0 to 1 seconds
+ # That is why we sleep a bit over 1 second to make sure all cores
+ # are started
+ time.sleep(1.2)
+ old_irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ column_names = []
+ for bucket in buckets:
+ column_names.append('<{}'.format(bucket))
+ column_names[-1] = '>{}'.format(buckets[-2])
+ for j,bucket in enumerate(buckets):
+ for i,irqcore in enumerate(machine.get_cores()):
+ old_irq[i][j] = machine.socket.irq_stats(irqcore,j)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
+ time.sleep(float(self.test['runtime']))
+ row_names = []
+ for i,irqcore in enumerate(machine.get_cores()):
+ row_names.append(irqcore)
+ for j,bucket in enumerate(buckets):
+ diff = machine.socket.irq_stats(irqcore,j) - old_irq[i][j]
+ if diff == 0:
+ irq[i][j] = '0'
+ else:
+ irq[i][j] = str(round(old_div(diff,
+ float(self.test['runtime'])), 2))
+ if max_loop_duration < int(bucket):
+ max_loop_duration = int(bucket)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
+ # Conclusion: we don't know the exact window size.
+ # Real measurement windows might be wrong by 1 second
+ # This could be fixed in this script by checking this data every
+ # 0.5 seconds Not implemented since we can also run this test for
+ # a longer time and decrease the error. The absolute number of
+ # interrupts is not so important.
+ machine.stop()
+ core_details = {}
+ RapidLog.info('Results for PROX instance %s'%machine.name)
+ RapidLog.info('{:>12}'.format('bucket us') +
+ ''.join(['{:>12}'.format(item) for item in column_names]))
+ for j, row in enumerate(irq):
+ RapidLog.info('Core {:>7}'.format(row_names[j]) +
+ ''.join(['{:>12}'.format(item) for item in row]))
+ core_details['Core {}'.format(row_names[j])] = row
+ machine_details[machine.name] = core_details
+ result_details['machine_data'] = machine_details
+ result_details = self.post_data(result_details)
+ return (500000 - max_loop_duration, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
new file mode 100644
index 00000000..1d1112f7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
@@ -0,0 +1,236 @@
+##
+## Copyright (c) 2019-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+from kubernetes import client, config
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+import logging
+from logging import handlers
+
+from rapid_k8s_pod import Pod
+
+class K8sDeployment:
+ """Deployment class to create containers for test execution in Kubernetes
+ environment.
+ """
+ LOG_FILE_NAME = "createrapidk8s.log"
+ SSH_PRIVATE_KEY = "./rapid_rsa_key"
+ SSH_USER = "rapid"
+
+ POD_YAML_TEMPLATE_FILE_NAME = "pod-rapid.yaml"
+
+ _log = None
+ _create_config = None
+ _runtime_config = None
+ _total_number_of_pods = 0
+ _namespace = "rapid-testing"
+ _pods = []
+
+ def __init__(self):
+ # Configure logger
+ self._log = logging.getLogger("k8srapid")
+ self._log.setLevel(logging.DEBUG)
+
+ console_formatter = logging.Formatter("%(message)s")
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(logging.DEBUG)
+ console_handler.setFormatter(console_formatter)
+
+ file_formatter = logging.Formatter("%(asctime)s - "
+ "%(levelname)s - "
+ "%(message)s")
+ file_handler = logging.handlers.RotatingFileHandler(self.LOG_FILE_NAME,
+ backupCount=10)
+ file_handler.setLevel(logging.DEBUG)
+ file_handler.setFormatter(file_formatter)
+
+ self._log.addHandler(file_handler)
+ self._log.addHandler(console_handler)
+
+ # Initialize k8s plugin
+ try:
+ config.load_kube_config()
+ except:
+ config.load_incluster_config()
+
+ Pod.k8s_CoreV1Api = client.CoreV1Api()
+
+ def load_create_config(self, config_file_name):
+ """Read and parse configuration file for the test environment.
+ """
+ self._log.info("Loading configuration file %s", config_file_name)
+ self._create_config = configparser.RawConfigParser()
+ try:
+ self._create_config.read(config_file_name)
+ except Exception as e:
+ self._log.error("Failed to read config file!\n%s\n" % e)
+ return -1
+
+ # Now parse config file content
+ # Parse [DEFAULT] section
+ if self._create_config.has_option("DEFAULT", "total_number_of_pods"):
+ self._total_number_of_pods = self._create_config.getint(
+ "DEFAULT", "total_number_of_pods")
+ else:
+ self._log.error("No option total_number_of_pods in DEFAULT section")
+ return -1
+
+ self._log.debug("Total number of pods %d" % self._total_number_of_pods)
+
+ if self._create_config.has_option("DEFAULT", "namespace"):
+ self._namespace = self._create_config.get(
+ "DEFAULT", "namespace")
+ else:
+ self._log.error("No option namespace in DEFAULT section")
+ return -1
+
+ self._log.debug("Using namespace %s" % self._total_number_of_pods)
+
+ # Parse [PODx] sections
+ for i in range(1, int(self._total_number_of_pods) + 1):
+ # Search for POD name
+ if self._create_config.has_option("POD%d" % i,
+ "name"):
+ pod_name = self._create_config.get(
+ "POD%d" % i, "name")
+ else:
+ pod_name = "prox-pod-%d" % i
+
+ # Search for POD hostname
+ if self._create_config.has_option("POD%d" % i,
+ "nodeSelector_hostname"):
+ pod_nodeselector_hostname = self._create_config.get(
+ "POD%d" % i, "nodeSelector_hostname")
+ else:
+ pod_nodeselector_hostname = None
+
+ # Search for POD spec
+ if self._create_config.has_option("POD%d" % i,
+ "spec_file_name"):
+ pod_spec_file_name = self._create_config.get(
+ "POD%d" % i, "spec_file_name")
+ else:
+ pod_spec_file_name = K8sDeployment.POD_YAML_TEMPLATE_FILE_NAME
+
+ # Search for POD dataplane static IP
+ if self._create_config.has_option("POD%d" % i,
+ "dp_ip"):
+ pod_dp_ip = self._create_config.get(
+ "POD%d" % i, "dp_ip")
+ else:
+ pod_dp_ip = None
+
+ # Search for POD dataplane subnet
+ if self._create_config.has_option("POD%d" % i,
+ "dp_subnet"):
+ pod_dp_subnet = self._create_config.get(
+ "POD%d" % i, "dp_subnet")
+ else:
+ pod_dp_subnet = "24"
+
+ pod = Pod(pod_name, self._namespace)
+ pod.set_nodeselector(pod_nodeselector_hostname)
+ pod.set_spec_file_name(pod_spec_file_name)
+ pod.set_dp_ip(pod_dp_ip)
+ pod.set_dp_subnet(pod_dp_subnet)
+ pod.set_id(i)
+
+ # Add POD to the list of PODs which need to be created
+ self._pods.append(pod)
+
+ return 0
+
+ def create_pods(self):
+ """ Create test PODs and wait for them to start.
+ Collect information for tests to run.
+ """
+ self._log.info("Creating PODs...")
+
+ # Create PODs using template from yaml file
+ for pod in self._pods:
+ self._log.info("Creating POD %s...", pod.get_name())
+ pod.create_from_yaml()
+
+ # Wait for PODs to start
+ for pod in self._pods:
+ pod.wait_for_start()
+
+ # Collect information from started PODs for test execution
+ for pod in self._pods:
+ pod.set_ssh_credentials(K8sDeployment.SSH_USER, K8sDeployment.SSH_PRIVATE_KEY)
+ pod.get_sriov_dev_mac()
+ pod.get_qat_dev()
+
+ def save_runtime_config(self, config_file_name):
+ self._log.info("Saving config %s for runrapid script...",
+ config_file_name)
+ self._runtime_config = configparser.RawConfigParser()
+
+ # Section [DEFAULT]
+# self._runtime_config.set("DEFAULT",
+# "total_number_of_test_machines",
+# self._total_number_of_pods)
+
+ # Section [ssh]
+ self._runtime_config.add_section("ssh")
+ self._runtime_config.set("ssh",
+ "key",
+ K8sDeployment.SSH_PRIVATE_KEY)
+ self._runtime_config.set("ssh",
+ "user",
+ K8sDeployment.SSH_USER)
+
+ # Section [rapid]
+ self._runtime_config.add_section("rapid")
+ self._runtime_config.set("rapid",
+ "total_number_of_machines",
+ self._total_number_of_pods)
+
+ # Export information about each pod
+ # Sections [Mx]
+ for pod in self._pods:
+ self._runtime_config.add_section("M%d" % pod.get_id())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "admin_ip", pod.get_admin_ip())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_mac1", pod.get_dp_mac())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_pci_dev", pod.get_dp_pci_dev())
+ if (pod.get_qat_pci_dev()):
+ for qat_index, qat_device in enumerate(pod.get_qat_pci_dev()):
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "qat_pci_dev%d" % qat_index, qat_device)
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_ip1", pod.get_dp_ip() + "/" +
+ pod.get_dp_subnet())
+
+ # Section [Varia]
+ self._runtime_config.add_section("Varia")
+ self._runtime_config.set("Varia",
+ "vim",
+ "kubernetes")
+
+ # Write runtime config file
+ with open(config_file_name, "w") as file:
+ self._runtime_config.write(file)
+
+ def delete_pods(self):
+ for pod in self._pods:
+ pod.terminate()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
new file mode 100644
index 00000000..beaedd69
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
@@ -0,0 +1,264 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from os import path
+import time, yaml
+import logging
+from kubernetes import client, config
+
+from rapid_sshclient import SSHClient
+
+class Pod:
+ """Class which represents test pods.
+ For example with traffic gen, forward/swap applications, etc
+ """
+ k8s_CoreV1Api = None
+
+ _log = None
+
+ _name = "pod"
+ _namespace = "default"
+ _nodeSelector_hostname = None
+ _spec_filename = None
+ _last_status = None
+ _id = None
+ _admin_ip = None
+ _dp_ip = None
+ _dp_subnet = None
+
+ _ssh_client = None
+
+ _sriov_vf = None
+ _sriov_vf_mac = None
+
+ def __init__(self, name, namespace = "default", logger_name = "k8srapid"):
+ self._log = logging.getLogger(logger_name)
+
+ self._name = name
+ self._namespace = namespace
+ self._ssh_client = SSHClient(logger_name = logger_name)
+ self.qat_vf = []
+
+ def __del__(self):
+ """Destroy POD. Do a cleanup.
+ """
+ if self._ssh_client is not None:
+ self._ssh_client.disconnect()
+
+ def create_from_yaml(self):
+ """Load POD description from yaml file.
+ """
+ with open(path.join(path.dirname(__file__),
+ self._spec_filename)) as yaml_file:
+ self.body = yaml.safe_load(yaml_file)
+
+ self.body["metadata"]["name"] = self._name
+
+ if (self._nodeSelector_hostname is not None):
+ if ("nodeSelector" not in self.body["spec"]):
+ self.body["spec"]["nodeSelector"] = {}
+ self.body["spec"]["nodeSelector"]["kubernetes.io/hostname"] = \
+ self._nodeSelector_hostname
+ self._log.debug("Creating POD, body:\n%s" % self.body)
+
+ try:
+ self.k8s_CoreV1Api.create_namespaced_pod(body = self.body,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't create POD %s!\n%s\n" % (self._name,
+ e))
+
+ def terminate(self):
+ """Terminate POD. Close SSH connection.
+ """
+ if self._ssh_client is not None:
+ self._ssh_client.disconnect()
+
+ try:
+ self.k8s_CoreV1Api.delete_namespaced_pod(name = self._name,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ if e.reason != "Not Found":
+ self._log.error("Couldn't delete POD %s!\n%s\n" % (self._name, e.reason))
+
+ def update_admin_ip(self):
+ """Check for admin IP address assigned by k8s.
+ """
+ try:
+ pod = self.k8s_CoreV1Api.read_namespaced_pod_status(name = self._name, namespace = self._namespace)
+ self._admin_ip = pod.status.pod_ip
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't update POD %s admin IP!\n%s\n" % (self._name, e))
+
+ def wait_for_start(self):
+ """Wait for POD to start.
+ """
+ self._log.info("Waiting for POD %s to start..." % self._name)
+ while True:
+ self.get_status()
+ if (self._last_status == "Running" or self._last_status == "Failed"
+ or self._last_status == "Unknown"):
+ break
+ else:
+ time.sleep(3)
+
+ self.update_admin_ip()
+
+ return self._last_status
+
+ def ssh_run_cmd(self, cmd):
+ """Execute command for POD via SSH connection.
+ SSH credentials should be configured before use of this function.
+ """
+ self._ssh_client.run_cmd(cmd)
+
+ def get_name(self):
+ return self._name
+
+ def get_admin_ip(self):
+ return self._admin_ip
+
+ def get_dp_ip(self):
+ return self._dp_ip
+
+ def get_dp_subnet(self):
+ return self._dp_subnet
+
+ def get_dp_mac(self):
+ return self._sriov_vf_mac
+
+ def get_dp_pci_dev(self):
+ return self._sriov_vf
+
+ def get_qat_pci_dev(self):
+ return self.qat_vf
+
+ def get_id(self):
+ return self._id
+
+ def get_status(self):
+ """Get current status fro the pod.
+ """
+ try:
+ pod = self.k8s_CoreV1Api.read_namespaced_pod_status(name = self._name,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't read POD %s status!\n%s\n" % (self._name, e))
+
+ self._last_status = pod.status.phase
+ return self._last_status
+
+ def get_qat_dev(self):
+ """Get qat devices if any, assigned by k8s QAT device plugin.
+ """
+ self._log.info("Checking assigned QAT VF for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_qat_device_plugin_envs")
+ if ret != 0:
+ self._log.error("Failed to check assigned QAT VF!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+
+ if cmd_output:
+ self._log.debug("Before: Using QAT VF %s" % self.qat_vf)
+ self._log.debug("Environment variable %s" % cmd_output)
+ for line in cmd_output.splitlines():
+ self.qat_vf.append(line.split("=")[1])
+ self._log.debug("Using QAT VF %s" % self.qat_vf)
+ else:
+ self._log.debug("No QAT devices for this pod")
+ self.qat_vf = None
+
+ def get_sriov_dev_mac(self):
+ """Get assigned by k8s SRIOV network device plugin SRIOV VF devices.
+ Return 0 in case of sucessfull configuration.
+ Otherwise return -1.
+ """
+ self._log.info("Checking assigned SRIOV VF for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_sriov_device_plugin_envs")
+ if ret != 0:
+ self._log.error("Failed to check assigned SRIOV VF!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug("Environment variable %s" % cmd_output)
+
+ # Parse environment variable
+ cmd_output = cmd_output.split("=")[1]
+ self._sriov_vf = cmd_output.split(",")[0]
+ self._log.debug("Using first SRIOV VF %s" % self._sriov_vf)
+
+ # find DPDK version
+ self._log.info("Checking DPDK version for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/dpdk_version")
+ if ret != 0:
+ self._log.error("Failed to check DPDK version"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+ dpdk_version = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug("DPDK version %s" % dpdk_version)
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+
+ self._log.info("Getting MAC address for assigned SRIOV VF %s" % \
+ self._sriov_vf)
+ self._ssh_client.run_cmd("sudo /opt/rapid/port_info_app -n 4 \
+ --{} {}".format(allow_parameter, self._sriov_vf))
+ if ret != 0:
+ self._log.error("Failed to get MAC address!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ # Parse MAC address
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug(cmd_output)
+ cmd_output = cmd_output.splitlines()
+ for line in cmd_output:
+ if line.startswith("Port 0 MAC: "):
+ self._sriov_vf_mac = line[12:]
+
+ self._log.debug("MAC %s" % self._sriov_vf_mac)
+
+ def set_dp_ip(self, dp_ip):
+ self._dp_ip = dp_ip
+
+ def set_dp_subnet(self, dp_subnet):
+ self._dp_subnet = dp_subnet
+
+ def set_id(self, pod_id):
+ self._id = pod_id
+
+ def set_nodeselector(self, hostname):
+ """Set hostname on which POD will be executed.
+ """
+ self._nodeSelector_hostname = hostname
+
+ def set_spec_file_name(self, file_name):
+ """Set pod spec filename.
+ """
+ self._spec_filename = file_name
+
+ def set_ssh_credentials(self, user, rsa_private_key):
+ """Set SSH credentials for the SSH connection to the POD.
+ """
+ self.update_admin_ip()
+ self._ssh_client.set_credentials(ip = self._admin_ip,
+ user = user,
+ rsa_private_key = rsa_private_key)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
new file mode 100644
index 00000000..1ad54273
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import logging
+from logging.handlers import RotatingFileHandler
+from logging import handlers
+import os
+import sys
+import time
+
+class bcolors(object):
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+ FLASH = '\033[5m'
+
+class RapidLog(object):
+ """
+ Class to deal with rapid logging
+ """
+ log = None
+
+ @staticmethod
+ def log_init(log_file, loglevel, screenloglevel, version):
+ log = logging.getLogger(__name__)
+ makeFileHandler = True
+ makeStreamHandler = True
+ if len(log.handlers) > 0:
+ for handler in log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ makeFileHandler = False
+ elif isinstance(handler, logging.StreamHandler):
+ makeStreamHandler = False
+ if makeStreamHandler:
+ # create formatters
+ screen_formatter = logging.Formatter("%(message)s")
+ # create a console handler
+ # and set its log level to the command-line option
+ #
+ console_handler = logging.StreamHandler(sys.stdout)
+ #console_handler.setLevel(logging.INFO)
+ numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
+ if not isinstance(numeric_screenlevel, int):
+ raise ValueError('Invalid screenlog level: %s' % screenloglevel)
+ console_handler.setLevel(numeric_screenlevel)
+ console_handler.setFormatter(screen_formatter)
+ # add handler to the logger
+ #
+ log.addHandler(console_handler)
+ if makeFileHandler:
+ # create formatters
+ file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
+ # get a top-level logger,
+ # set its log level,
+ # BUT PREVENT IT from propagating messages to the root logger
+ #
+ numeric_level = getattr(logging, loglevel.upper(), None)
+ if not isinstance(numeric_level, int):
+ raise ValueError('Invalid log level: %s' % loglevel)
+ log.setLevel(numeric_level)
+ log.propagate = 0
+
+
+ # create a file handler
+ # and set its log level
+ #
+ file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
+ file_handler.setLevel(numeric_level)
+ file_handler.setFormatter(file_formatter)
+
+ # add handler to the logger
+ #
+ log.addHandler(file_handler)
+
+ # Check if log exists and should therefore be rolled
+ needRoll = os.path.isfile(log_file)
+
+
+ # This is a stale log, so roll it
+ if needRoll:
+ # Add timestamp
+ log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
+
+ # Roll over on application start
+ file_handler.doRollover()
+
+ # Add timestamp
+ log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
+
+ log.debug("rapid version: " + version)
+ RapidLog.log = log
+
+ @staticmethod
+ def log_close():
+ for handler in RapidLog.log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.close()
+ RapidLog.log.removeHandler(handler)
+
+ @staticmethod
+ def exception(exception_info):
+ RapidLog.log.exception(exception_info)
+ exit(1)
+
+ @staticmethod
+ def critical(critical_info):
+ RapidLog.log.critical(critical_info)
+ exit(1)
+
+ @staticmethod
+ def error(error_info):
+ RapidLog.log.error(error_info)
+
+ @staticmethod
+ def debug(debug_info):
+ RapidLog.log.debug(debug_info)
+
+ @staticmethod
+ def info(info):
+ RapidLog.log.info(info)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
new file mode 100644
index 00000000..47f858d0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from prox_ctrl import prox_ctrl
+import os
+import re
+import uuid
+
+class RapidMachine(object):
+ """
+ Class to deal with a PROX instance (VM, bare metal, container)
+ """
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly):
+ self.name = machine_params['name']
+ self.ip = machine_params['admin_ip']
+ self.key = key
+ self.user = user
+ self.password = password
+ self.rundir = rundir
+ self.resultsdir = resultsdir
+ self.dp_ports = []
+ self.dpdk_port_index = []
+ self.configonly = configonly
+ index = 1
+ while True:
+ ip_key = 'dp_ip{}'.format(index)
+ mac_key = 'dp_mac{}'.format(index)
+ if ip_key in machine_params.keys():
+ if mac_key in machine_params.keys():
+ dp_port = {'ip': machine_params[ip_key], 'mac' : machine_params[mac_key]}
+ else:
+ dp_port = {'ip': machine_params[ip_key], 'mac' : None}
+ self.dp_ports.append(dict(dp_port))
+ self.dpdk_port_index.append(index - 1)
+ index += 1
+ else:
+ break
+ self.machine_params = machine_params
+ self.vim = vim
+ self.cpu_mapping = None
+ if 'config_file' in self.machine_params.keys():
+ PROXConfigfile = open (self.machine_params['config_file'], 'r')
+ PROXConfig = PROXConfigfile.read()
+ PROXConfigfile.close()
+ self.all_tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",PROXConfig))
+
+ def get_cores(self):
+ return (self.machine_params['cores'])
+
+ def expand_list_format(self, list):
+ """Expand cpuset list format provided as comma-separated list of
+ numbers and ranges of numbers. For more information please see
+ https://man7.org/linux/man-pages/man7/cpuset.7.html
+ """
+ list_expanded = []
+ for num in list.split(','):
+ if '-' in num:
+ num_range = num.split('-')
+ list_expanded += range(int(num_range[0]), int(num_range[1]) + 1)
+ else:
+ list_expanded.append(int(num))
+ return list_expanded
+
+ def read_cpuset(self):
+ """Read list of cpus on which we allowed to execute
+ """
+ cpu_set_file = '/sys/fs/cgroup/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ cpu_set_file = '/sys/fs/cgroup/cpuset/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ RapidLog.critical('{Cannot determine cpuset')
+ cpuset_cpus = self._client.run_cmd(cmd).decode().rstrip()
+ RapidLog.debug('{} ({}): Allocated cpuset: {}'.format(self.name, self.ip, cpuset_cpus))
+ self.cpu_mapping = self.expand_list_format(cpuset_cpus)
+ RapidLog.debug('{} ({}): Expanded cpuset: {}'.format(self.name, self.ip, self.cpu_mapping))
+
+ # Log CPU core mapping for user information
+ cpu_mapping_str = ''
+ for i in range(len(self.cpu_mapping)):
+ cpu_mapping_str = cpu_mapping_str + '[' + str(i) + '->' + str(self.cpu_mapping[i]) + '], '
+ cpu_mapping_str = cpu_mapping_str[:-2]
+ RapidLog.debug('{} ({}): CPU mapping: {}'.format(self.name, self.ip, cpu_mapping_str))
+
+ def remap_cpus(self, cpus):
+ """Convert relative cpu ids provided as function parameter to match
+ cpu ids from allocated list
+ """
+ cpus_remapped = []
+ for cpu in cpus:
+ cpus_remapped.append(self.cpu_mapping[cpu])
+ return cpus_remapped
+
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (mcore, cores)
+ """
+ if self.cpu_mapping is None:
+ RapidLog.debug('{} ({}): cpu mapping is not defined! Please check the configuration!'.format(self.name, self.ip))
+ return
+
+ if 'mcore' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['mcore'])
+ RapidLog.debug('{} ({}): mcore {} remapped to {}'.format(self.name, self.ip, self.machine_params['mcore'], cpus_remapped))
+ self.machine_params['mcore'] = cpus_remapped
+
+ if 'cores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['cores'])
+ RapidLog.debug('{} ({}): cores {} remapped to {}'.format(self.name, self.ip, self.machine_params['cores'], cpus_remapped))
+ self.machine_params['cores'] = cpus_remapped
+
+ if 'altcores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['altcores'])
+ RapidLog.debug('{} ({}): altcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['altcores'], cpus_remapped))
+ self.machine_params['altcores'] = cpus_remapped
+
+ def devbind(self):
+ # Script to bind the right network interface to the poll mode driver
+ for index, dp_port in enumerate(self.dp_ports, start = 1):
+ DevBindFileName = self.rundir + '/devbind-{}-port{}.sh'.format(self.ip, index)
+ self._client.scp_put('./devbind.sh', DevBindFileName)
+ cmd = 'sed -i \'s/MACADDRESS/' + dp_port['mac'] + '/\' ' + DevBindFileName
+ result = self._client.run_cmd(cmd)
+ RapidLog.debug('devbind.sh MAC updated for port {} on {} {}'.format(index, self.name, result))
+ if ((not self.configonly) and self.machine_params['prox_launch_exit']):
+ result = self._client.run_cmd(DevBindFileName)
+ RapidLog.debug('devbind.sh running for port {} on {} {}'.format(index, self.name, result))
+
+ def generate_lua(self, appendix = ''):
+ self.LuaFileName = 'parameters-{}.lua'.format(self.ip)
+ with open(self.LuaFileName, "w") as LuaFile:
+ LuaFile.write('require "helper"\n')
+ LuaFile.write('name="%s"\n'% self.name)
+ for index, dp_port in enumerate(self.dp_ports, start = 1):
+ LuaFile.write('local_ip{}="{}"\n'.format(index, dp_port['ip']))
+ LuaFile.write('local_hex_ip{}=convertIPToHex(local_ip{})\n'.format(index, index))
+ if self.vim in ['kubernetes']:
+ cmd = 'cat /opt/rapid/dpdk_version'
+ dpdk_version = self._client.run_cmd(cmd).decode().rstrip()
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+ eal_line = 'eal=\"--file-prefix {}{} --{} {} --force-max-simd-bitwidth=512'.format(
+ self.name, str(uuid.uuid4()), allow_parameter,
+ self.machine_params['dp_pci_dev'])
+ looking_for_qat = True
+ index = 0
+ while (looking_for_qat):
+ if 'qat_pci_dev{}'.format(index) in self.machine_params:
+ eal_line += ' --{} {}'.format(allow_parameter,
+ self.machine_params['qat_pci_dev{}'.format(index)])
+ index += 1
+ else:
+ looking_for_qat = False
+ eal_line += '"\n'
+ LuaFile.write(eal_line)
+ else:
+ LuaFile.write("eal=\"\"\n")
+ if 'mcore' in self.machine_params.keys():
+ LuaFile.write('mcore="%s"\n'% ','.join(map(str,
+ self.machine_params['mcore'])))
+ if 'cores' in self.machine_params.keys():
+ LuaFile.write('cores="%s"\n'% ','.join(map(str,
+ self.machine_params['cores'])))
+ if 'altcores' in self.machine_params.keys():
+ LuaFile.write('altcores="%s"\n'% ','.join(map(str,
+ self.machine_params['altcores'])))
+ if 'ports' in self.machine_params.keys():
+ LuaFile.write('ports="%s"\n'% ','.join(map(str,
+ self.machine_params['ports'])))
+ if 'dest_ports' in self.machine_params.keys():
+ for index, dest_port in enumerate(self.machine_params['dest_ports'], start = 1):
+ LuaFile.write('dest_ip{}="{}"\n'.format(index, dest_port['ip']))
+ LuaFile.write('dest_hex_ip{}=convertIPToHex(dest_ip{})\n'.format(index, index))
+ if dest_port['mac']:
+ LuaFile.write('dest_hex_mac{}="{}"\n'.format(index ,
+ dest_port['mac'].replace(':',' ')))
+ if 'gw_vm' in self.machine_params.keys():
+ for index, gw_ip in enumerate(self.machine_params['gw_ips'],
+ start = 1):
+ LuaFile.write('gw_ip{}="{}"\n'.format(index, gw_ip))
+ LuaFile.write('gw_hex_ip{}=convertIPToHex(gw_ip{})\n'.
+ format(index, index))
+ LuaFile.write(appendix)
+ self._client.scp_put(self.LuaFileName, self.rundir + '/parameters.lua')
+ self._client.scp_put('helper.lua', self.rundir + '/helper.lua')
+
+ def start_prox(self, autostart=''):
+ if self.machine_params['prox_socket']:
+ self._client = prox_ctrl(self.ip, self.key, self.user,
+ self.password)
+ self._client.test_connection()
+ if self.vim in ['OpenStack']:
+ self.devbind()
+ if self.vim in ['kubernetes']:
+ self.read_cpuset()
+ self.remap_all_cpus()
+ _, prox_config_file_name = os.path.split(self.
+ machine_params['config_file'])
+ if self.machine_params['prox_launch_exit']:
+ self.generate_lua()
+ self._client.scp_put(self.machine_params['config_file'], '{}/{}'.
+ format(self.rundir, prox_config_file_name))
+ if not self.configonly:
+ cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(self.rundir,
+ autostart, self.rundir, prox_config_file_name)
+ RapidLog.debug("Starting PROX on {}: {}".format(self.name,
+ cmd))
+ result = self._client.run_cmd(cmd)
+ RapidLog.debug("Finished PROX on {}: {}".format(self.name,
+ cmd))
+
+ def close_prox(self):
+ if (not self.configonly) and self.machine_params[
+ 'prox_socket'] and self.machine_params['prox_launch_exit']:
+ self.socket.quit_prox()
+ self._client.scp_get('/prox.log', '{}/{}.prox.log'.format(
+ self.resultsdir, self.name))
+
+ def connect_prox(self):
+ if self.machine_params['prox_socket']:
+ self.socket = self._client.connect_socket()
+
+ def start(self):
+ self.socket.start(self.get_cores())
+
+ def stop(self):
+ self.socket.stop(self.get_cores())
+
+ def reset_stats(self):
+ self.socket.reset_stats()
+
+ def core_stats(self):
+ return (self.socket.core_stats(self.get_cores(), self.all_tasks_for_this_cfg))
+
+ def multi_port_stats(self):
+ return (self.socket.multi_port_stats(self.dpdk_port_index))
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
new file mode 100644
index 00000000..143323b8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from past.utils import old_div
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+import ast
+inf = float("inf")
+
+class RapidConfigParser(object):
+ """
+ Class to deal with rapid configuration files
+ """
+ @staticmethod
+ def parse_config(test_params):
+ testconfig = configparser.RawConfigParser()
+ testconfig.read(test_params['test_file'])
+ test_params['required_number_of_test_machines'] = int(testconfig.get(
+ 'TestParameters', 'total_number_of_test_machines'))
+ test_params['number_of_tests'] = int(testconfig.get('TestParameters',
+ 'number_of_tests'))
+ test_params['TestName'] = testconfig.get('TestParameters', 'name')
+ if testconfig.has_option('TestParameters', 'lat_percentile'):
+ test_params['lat_percentile'] = old_div(float(
+ testconfig.get('TestParameters', 'lat_percentile')),100.0)
+ else:
+ test_params['lat_percentile'] = 0.99
+ RapidLog.info('Latency percentile at {:.0f}%'.format(
+ test_params['lat_percentile']*100))
+ if testconfig.has_option('TestParameters', 'sleep_time'):
+ test_params['sleep_time'] = int(testconfig.get('TestParameters', 'sleep_time'))
+ if test_params['sleep_time'] < 2:
+ test_params['sleep_time'] = 2
+ else:
+ test_params['sleep_time'] = 2
+
+ if testconfig.has_option('TestParameters', 'ipv6'):
+ test_params['ipv6'] = testconfig.getboolean('TestParameters','ipv6')
+ else:
+ test_params['ipv6'] = False
+ config = configparser.RawConfigParser()
+ config.read(test_params['environment_file'])
+ test_params['vim_type'] = config.get('Varia', 'vim')
+ test_params['user'] = config.get('ssh', 'user')
+ if config.has_option('ssh', 'key'):
+ test_params['key'] = config.get('ssh', 'key')
+ else:
+ test_params['key'] = None
+ if config.has_option('ssh', 'password'):
+ test_params['password'] = config.get('ssh', 'password')
+ else:
+ test_params['password'] = None
+ test_params['total_number_of_machines'] = int(config.get('rapid',
+ 'total_number_of_machines'))
+ tests = []
+ test = {}
+ for test_index in range(1, test_params['number_of_tests']+1):
+ test.clear()
+ section = 'test%d'%test_index
+ options = testconfig.options(section)
+ for option in options:
+ if option in ['imix','imixs','flows', 'warmupimix']:
+ test[option] = ast.literal_eval(testconfig.get(section,
+ option))
+ elif option in ['maxframespersecondallingress','stepsize',
+ 'flowsize','warmupflowsize','warmuptime', 'steps']:
+ test[option] = int(testconfig.get(section, option))
+ elif option in ['startspeed', 'step', 'drop_rate_threshold',
+ 'generator_threshold','lat_avg_threshold','lat_perc_threshold',
+ 'lat_max_threshold','accuracy','maxr','maxz',
+ 'ramp_step','warmupspeed','mis_ordered_threshold']:
+ test[option] = float(testconfig.get(section, option))
+ else:
+ test[option] = testconfig.get(section, option)
+ tests.append(dict(test))
+ for test in tests:
+ if test['test'] in ['flowsizetest', 'TST009test', 'increment_till_fail']:
+ if 'drop_rate_threshold' not in test.keys():
+ test['drop_rate_threshold'] = 0
+ thresholds = ['generator_threshold','lat_avg_threshold', \
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']
+ for threshold in thresholds:
+ if threshold not in test.keys():
+ test[threshold] = inf
+ test_params['tests'] = tests
+ if test_params['required_number_of_test_machines'] > test_params[
+ 'total_number_of_machines']:
+ RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
+ raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
+ map_info = test_params['machine_map_file'].strip('[]').split(',')
+ map_info_length = len(map_info)
+ # If map_info is a list where the first entry is numeric, we assume we
+ # are dealing with a list of machines and NOT the machine.map file
+ if map_info[0].isnumeric():
+ if map_info_length < test_params[
+ 'required_number_of_test_machines']:
+ RapidLog.exception('Not enough machine indices in --map \
+ parameter: {}. Needing {} entries'.format(map_info,
+ test_params['required_number_of_test_machines']))
+ machine_index = list(map(int,map_info))
+ else:
+ machine_map = configparser.RawConfigParser()
+ machine_map.read(test_params['machine_map_file'])
+ machine_index = []
+ for test_machine in range(1,
+ test_params['required_number_of_test_machines']+1):
+ machine_index.append(int(machine_map.get(
+ 'TestM%d'%test_machine, 'machine_index')))
+ machine_map = configparser.RawConfigParser()
+ machine_map.read(test_params['machine_map_file'])
+ machines = []
+ machine = {}
+ for test_machine in range(1, test_params[
+ 'required_number_of_test_machines']+1):
+ machine.clear()
+ section = 'TestM%d'%test_machine
+ options = testconfig.options(section)
+ for option in options:
+ if option in ['prox_socket','prox_launch_exit','monitor']:
+ machine[option] = testconfig.getboolean(section, option)
+ elif option in ['mcore', 'cores', 'gencores', 'latcores',
+ 'altcores']:
+ machine[option] = ast.literal_eval(testconfig.get(
+ section, option))
+ elif option in ['bucket_size_exp']:
+ machine[option] = int(testconfig.get(section, option))
+ if machine[option] < 11:
+ RapidLog.exception(
+ "Minimum Value for bucket_size_exp is 11")
+ else:
+ machine[option] = testconfig.get(section, option)
+ for key in ['prox_socket','prox_launch_exit']:
+ if key not in machine.keys():
+ machine[key] = True
+ if 'monitor' not in machine.keys():
+ machine['monitor'] = True
+ section = 'M%d'%machine_index[test_machine-1]
+ options = config.options(section)
+ for option in options:
+ machine[option] = config.get(section, option)
+ machines.append(dict(machine))
+ for machine in machines:
+ dp_ports = []
+ if 'dest_vm' in machine.keys():
+ index = 1
+ while True:
+ dp_ip_key = 'dp_ip{}'.format(index)
+ dp_mac_key = 'dp_mac{}'.format(index)
+ if dp_ip_key in machines[int(machine['dest_vm'])-1].keys():
+ if dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
+ else:
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : None}
+ dp_ports.append(dict(dp_port))
+ index += 1
+ else:
+ break
+ machine['dest_ports'] = list(dp_ports)
+ gw_ips = []
+ if 'gw_vm' in machine.keys():
+ index = 1
+ while True:
+ gw_ip_key = 'dp_ip{}'.format(index)
+ if gw_ip_key in machines[int(machine['gw_vm'])-1].keys():
+ gw_ip = machines[int(machine['gw_vm'])-1][gw_ip_key]
+ gw_ips.append(gw_ip)
+ index += 1
+ else:
+ break
+ machine['gw_ips'] = list(gw_ips)
+ test_params['machines'] = machines
+ return (test_params)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
new file mode 100644
index 00000000..8157ddf2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class PortStatsTest(RapidTest):
+ """
+ Class to manage the portstatstesting
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ RapidLog.info("+---------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring port statistics on 1 or more PROX instances |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ RapidLog.info("| PROX ID | Time | RX | TX | no MBUFS | ierr&imiss |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ duration = float(self.test['runtime'])
+ old_rx = []; old_tx = []; old_no_mbufs = []; old_errors = []; old_tsc = []
+ new_rx = []; new_tx = []; new_no_mbufs = []; new_errors = []; new_tsc = []
+ machines_to_go = len (self.machines)
+ for machine in self.machines:
+ machine.reset_stats()
+ old_rx.append(0); old_tx.append(0); old_no_mbufs.append(0); old_errors.append(0); old_tsc.append(0)
+ old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[-1] = machine.multi_port_stats()
+ new_rx.append(0); new_tx.append(0); new_no_mbufs.append(0); new_errors.append(0); new_tsc.append(0)
+ while (duration > 0):
+ time.sleep(0.5)
+ # Get statistics after some execution time
+ for i, machine in enumerate(self.machines, start=0):
+ new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[i] = machine.multi_port_stats()
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
+ errors = new_errors[i] - old_errors[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ machines_to_go -= 1
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_no_mbufs[i] = new_no_mbufs[i]
+ old_errors[i] = new_errors[i]
+ old_tsc[i] = new_tsc[i]
+ RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NoMbufs': no_mbufs,
+ 'iErrMiss': errors}
+ result_details = self.post_data(result_details)
+ if machines_to_go == 0:
+ duration = duration - 1
+ machines_to_go = len (self.machines)
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
new file mode 100644
index 00000000..6ecdb277
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
@@ -0,0 +1,49 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuGS2Cg/tcP8zZHvzk8/lkR
+85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2ugWxp2FaePHKp0FBkMgup
+GHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5ypEg19ywtZmDooiXz6fWoa
+WgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsCyMFvCIIhrm/YIs8pwkqJ
+sPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4ZDIrMtbKBQPHP90Ku+C5i
+jY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwSL3PmUGl02JdWJgYG/B37
+fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN1qbIYYGgpIuLEgQUKC39
+/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJIuDmwtqxUHwCuKNThUVh
+A3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WOv08tiRJL1lrfMis8Z9so
+fYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqPSggS+dNjDacl0v1e6ByB
+kAAAdQW2kXgltpF4IAAAAHc3NoLXJzYQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuG
+S2Cg/tcP8zZHvzk8/lkR85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2u
+gWxp2FaePHKp0FBkMgupGHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5yp
+Eg19ywtZmDooiXz6fWoaWgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsC
+yMFvCIIhrm/YIs8pwkqJsPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4Z
+DIrMtbKBQPHP90Ku+C5ijY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwS
+L3PmUGl02JdWJgYG/B37fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN
+1qbIYYGgpIuLEgQUKC39/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJ
+IuDmwtqxUHwCuKNThUVhA3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WO
+v08tiRJL1lrfMis8Z9sofYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqP
+SggS+dNjDacl0v1e6ByBkAAAADAQABAAACABLHepSv96vSnFwHxzcZnyk9SJRBLECWmfB2
+fwcwtjrmGsVbopS/eIPNsBcaOR+v0+239v4RB80AWLBrtk7yAfU+AfoTiiY0SSC/lqgxrs
+fFNUlbxbeLd5BGmreqN9LJ2UHZZxzLUfOKQ2J/Mt0kg/ehO00Ngej1n8ydw5gaPPwT+QpN
+DO2SPhmbt+u3+D7H2DUPbLhBXMcM/xNyOBl4PMbTGifCfdqx+5MTX11v+GwpZIjuMnNBY7
+baSu/pnE7OZbO14wWuUugbd8PCr7mAbtNj5Jn5JGv/SDEWCMPHYauYVU+hZTgitUX+xRnn
+unXC/uffXYivZfLwlyRp6Zsd0r2z3dY+bjhZ/SBheAmP3FaKy4ZA1ggn7VHCM/RWywJJlP
+/xdKHWQs2j/kF+s84Z5+eb6r1p3xBS7Dv3Lt9KQPN/nLciJNWYwUHiVXo3BtFw4IRosP+k
+W4Km3bfmfs0yrgrAdypUeLHbD9fyYu/BjhdcDqCj9ntlxUnDfo4WQga1J1kY/5zUDOpVCV
+LYit6y4SCvFM1H8mIHX9n3jxEfs1fdx52OhcahfGc7Qg8EbMJFt3CqXcc4ErVkUxC61sWX
+7mfFqzp0eho1QrGU5a+1l9UaVTJhN1B0ruhEfdBm1FahcQ91ZEn2m6Wf1P0+RImI7m0cH1
+FZ0WDdX+DETUWNHr0BAAABAGEBn6UfyzTYtk/HWW8Px+ae60U4BJCcQ8m/ARSMGGLds2f3
+5NJjm6KliZJ+b7sdN4UYj2hm9zxjef+kwFXUEYmYVm16NufQRR1svF7YqLzNnOQ7eXluZS
+S3SEj1siziCveQ6kyLYrfedNtX/TErdR5SFqcbuanMzd7mqw1vMpejoEGKriSpYOSohsZW
+7Rkcej3XSR4jt5pzxfzUObcKrm5mWAYddINbflAYVswpT/LxNl7jduUsQd3Ul6fOBX4sBK
+rWYMv3Qo4z25oShqvWOJbvvQ1voTOiDF8LTOu60/YbbOfF116J6BcWTHbwe8z+Du8SxdVi
+1N4tFcadL7HqsZEAAAEBAN4ma7nbSI0fA3QM1IK9h5cN/h0qMk91Syh7+vFyNfe/DILFnJ
+0TGNaYhAow1jNMOQKeyEJOfuZkeMdR9/ohtfwSvzSJml/k0JV9aIZHehncZOMt93Gi6WtC
++Os2owyhcXMJN7MbKo1e3Ln21OyaAJi6TAdwSDivFSytvNCKoX8NncQu/UIPzNQVJcrvJn
+SZ+0AHFeuZVl9HgxZY1fUvIs24m9QnYH3HpMiYc2p8UT1hEOqq1bJpgKx9WHhj0fNCBsZ1
+6zTnCDa/HiDADHmlif6pyEu7nD+3MHAeGxS7LJjmMSvtbH/ltrYaz6wFSowlr/RiX7Z8pT
+Ib1lf7KPYulYUAAAEBAMcxzoKSEZt/eYz5w4h9Bs6tdBEBnmSzwni8P0DTv1q0sDan1g4Q
++Mcuo42lSXS9aTmfI+hJDRSuRraLE9xzmxUJ+R2bQkpOLgG6QOF1uU36ZtMoxtptII8pXT
+yQtIW2sHSz9Kgv16PFp98EaEfwzmdk/C8A6NxoGW7EpzAXzXZYLRSwgAr6wVE83jUsbIu5
+lAN6DG6vIm62PLsxmpDZuS5idQwxP8DP4itHMMRh2jE0+msQAWHRQ514nCTqeuy/ORbNSO
+4A1yMy1KxXBH6hQ/oE8ZXqtBqJ3CbINPEyuLK9PYj9e2zABoEOcXTaJcvmVve97xhhw6om
+zVgd4qw70oUAAAAVeWt5bHVsaW5AMGJkODI0NDk5MTYwAQIDBAUG
+-----END OPENSSH PRIVATE KEY-----
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
new file mode 100644
index 00000000..c735d178
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCs2xZMUPvSWOMv5addzQZuU3sqWJyKK4ZLYKD+1w/zNke/OTz+WRHzkRxcam9iseRMXV1pkJtBTLeKzQq8/bs6GiWvFW/0cCfgGQDa6BbGnYVp48cqnQUGQyC6kYcVWHN6DiEDigW2Oloz3kBMxxZMH77ue6Xp1b5/n++HHPJrnKkSDX3LC1mYOiiJfPp9ahpaBKqPLQ2IsWgkSg0TkliPPZcdOAEMpntKv6gC2j39w57cbkmwLIwW8IgiGub9gizynCSomw8wQ+DoNsbo/pLVhucvqsz8GzBWOnUihak9VuZmENuGU1KT/hkMisy1soFA8c/3Qq74LmKNjpk0niAPsLCb5kunip0iomg2h7OYbwLRi87xa98wtGjJQKHBIvc+ZQaXTYl1YmBgb8Hft8lBubzR3oc68ATmu87ln11vAG/NkLRinl1fXfQ601qOk6F8FE3WpshhgaCki4sSBBQoLf3+LWEYw1M5UOMngY1uJJSFSRtzriPSTYH65iz3ju1+7Cui026Yki4ObC2rFQfAK4o1OFRWEDdTW1uUJYxLoTFVjjNrASVcFP+mt6IBhoWXMW5igePO5Zc3RY6/Ty2JEkvWWt8yKzxn2yh9hzeoFKqUuACxbl20Hk8wjLcnFdaqMVAh2COXFZnngG723l+o9KCBL502MNpyXS/V7oHIGQ== default@default
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
new file mode 100644
index 00000000..d8aeacc1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
@@ -0,0 +1,164 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import paramiko
+from scp import SCPClient
+import logging
+
+class SSHClient:
+ """Wrapper class for paramiko module to connect via SSH
+ """
+ _log = None
+
+ _ip = None
+ _user = None
+ _rsa_private_key = None
+ _timeout = None
+ _ssh = None
+ _connected = False
+
+ _output = None
+ _error = None
+
+ def __init__(self, ip=None, user=None, rsa_private_key=None, timeout=15,
+ logger_name=None, password = None):
+ self._ip = ip
+ self._user = user
+ self._password = password
+ self._rsa_private_key = rsa_private_key
+ self._timeout = timeout
+
+ if (logger_name is not None):
+ self._log = logging.getLogger(logger_name)
+
+ self._connected = False
+
+ def set_credentials(self, ip, user, rsa_private_key, password = None):
+ self._ip = ip
+ self._user = user
+ self._password = password
+ self._rsa_private_key = rsa_private_key
+
+ def connect(self):
+
+ if self._connected:
+ if (self._log is not None):
+ self._log.debug("Already connected!")
+ return
+ if ((self._ip is None) or (self._user is None) or
+ ((self._rsa_private_key is None) ==
+ (self._password is None))):
+ if (self._log is not None):
+ self._log.error("Wrong parameter! IP %s, user %s, RSA private key %s"
+ % (self._ip, self._user, self._rsa_private_key))
+ self._connected = False
+ return
+
+ self._ssh = paramiko.SSHClient()
+ self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if (self._rsa_private_key is not None):
+ private_key = paramiko.RSAKey.from_private_key_file(self._rsa_private_key)
+ else:
+ private_key = None
+
+ try:
+ self._ssh.connect(hostname = self._ip, username = self._user,
+ password = self._password, pkey = private_key)
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to connect to the host! IP %s, user %s, RSA private key %s\n%s"
+ % (self._ip, self._user, self._rsa_private_key, e))
+ self._connected = False
+ self._ssh.close()
+ return
+
+ self._connected = True
+
+ def disconnect(self):
+ if self._connected:
+ self._connected = False
+ self._ssh.close()
+
+ def run_cmd(self, cmd):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ _stdin, stdout, stderr = self._ssh.exec_command(cmd, timeout = self._timeout)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def scp_put(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.put(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def scp_get(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.get(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def get_output(self):
+ return self._output
+
+ def get_error(self):
+ return self._error
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
new file mode 100644
index 00000000..deba695f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import yaml
+import requests
+import time
+import os
+import copy
+from past.utils import old_div
+from rapid_log import RapidLog
+from rapid_log import bcolors
+inf = float("inf")
+from datetime import datetime as dt
+
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
+
+class RapidTest(object):
+ """
+ Class to manage the testing
+ """
+ def __init__(self, test_param, runtime, testname, environment_file ):
+ self.test = test_param
+ self.test['runtime'] = runtime
+ self.test['testname'] = testname
+ self.test['environment_file'] = environment_file
+ if 'maxr' not in self.test.keys():
+ self.test['maxr'] = 1
+ if 'maxz' not in self.test.keys():
+ self.test['maxz'] = inf
+ with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
+ self.data_format = yaml.load(f, Loader=yaml.FullLoader)
+
+ @staticmethod
+ def get_percentageof10Gbps(pps_speed,size):
+ # speed is given in pps, returning % of 10Gb/s
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (pps_speed / 1000000.0 * 0.08 * (size+20))
+
+ @staticmethod
+ def get_pps(speed,size):
+ # speed is given in % of 10Gb/s, returning Mpps
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (speed * 100.0 / (8*(size+20)))
+
+ @staticmethod
+ def get_speed(packet_speed,size):
+ # return speed in Gb/s
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (packet_speed / 1000.0 * (8*(size+20)))
+
+ @staticmethod
+ def set_background_flows(background_machines, number_of_flows):
+ for machine in background_machines:
+ _ = machine.set_flows(number_of_flows)
+
+ @staticmethod
+ def set_background_speed(background_machines, speed):
+ for machine in background_machines:
+ machine.set_generator_speed(speed)
+
+ @staticmethod
+ def set_background_size(background_machines, imix):
+ # imixs is a list of packet sizes
+ for machine in background_machines:
+ machine.set_udp_packet_size(imix)
+
+ @staticmethod
+ def start_background_traffic(background_machines):
+ for machine in background_machines:
+ machine.start()
+
+ @staticmethod
+ def stop_background_traffic(background_machines):
+ for machine in background_machines:
+ machine.stop()
+
+ @staticmethod
+ def parse_data_format_dict(data_format, variables):
+ for k, v in data_format.items():
+ if type(v) is dict:
+ RapidTest.parse_data_format_dict(v, variables)
+ else:
+ if v in variables.keys():
+ data_format[k] = variables[v]
+
+ def post_data(self, variables):
+ test_type = type(self).__name__
+ var = copy.deepcopy(self.data_format)
+ self.parse_data_format_dict(var, variables)
+ if var.keys() >= {'URL', test_type, 'Format'}:
+ URL=''
+ for value in var['URL'].values():
+ URL = URL + value
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
+ if var['Format'] == 'PushGateway':
+ data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
+ response = requests.post(url=URL, data=data,headers=HEADERS)
+ elif var['Format'] == 'Xtesting':
+ data = var[test_type]
+ response = requests.post(url=URL, json=data)
+ if (response.status_code >= 300):
+ RapidLog.info('Cannot send metrics to {}'.format(URL))
+ RapidLog.info(data)
+ return (var[test_type])
+
+ @staticmethod
+ def report_result(flow_number, size, data, prefix):
+ if flow_number < 0:
+ flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
+ else:
+ flow_number_str = '|{:>7} |'.format(flow_number)
+ if data['pps_req_tx'] is None:
+ pps_req_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
+ if data['pps_tx'] is None:
+ pps_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
+ if data['pps_sut_tx'] is None:
+ pps_sut_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
+ if data['pps_rx'] is None:
+ pps_rx_str = '{0: >25}'.format('NA |')
+ else:
+ pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
+ RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
+ if data['abs_dropped'] is None:
+ tot_drop_str = ' | NA | '
+ else:
+ tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
+ if data['lat_perc'] is None:
+ lat_perc_str = '|{:^10.10}|'.format('NA')
+ elif data['lat_perc_max'] == True:
+ lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
+ else:
+ lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
+ if data['actual_duration'] is None:
+ elapsed_time_str = ' NA |'
+ else:
+ elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
+ if data['mis_ordered'] is None:
+ mis_ordered_str = ' NA '
+ else:
+ mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
+ return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
+ + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
+ pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
+ ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
+ + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
+ ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
+ tot_drop_str + prefix['drop_rate'] +
+ '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
+ prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
+ ' |' + elapsed_time_str)
+
+ def run_iteration(self, requested_duration, flow_number, size, speed):
+ BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
+ sleep_time = self.test['sleep_time']
+ LAT_PERCENTILE = self.test['lat_percentile']
+ iteration_data= {}
+ time_loop_data= {}
+ iteration_data['r'] = 0;
+
+ while (iteration_data['r'] < self.test['maxr']):
+ self.gen_machine.start_latency_cores()
+ time.sleep(sleep_time)
+ # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
+ t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
+ t1_dp_rx = t1_rx - t1_non_dp_rx
+ t1_dp_tx = t1_tx - t1_non_dp_tx
+ self.gen_machine.set_generator_speed(0)
+ self.gen_machine.start_gen_cores()
+ self.set_background_speed(self.background_machines, 0)
+ self.start_background_traffic(self.background_machines)
+ if 'ramp_step' in self.test.keys():
+ ramp_speed = self.test['ramp_step']
+ else:
+ ramp_speed = speed
+ while ramp_speed < speed:
+ self.gen_machine.set_generator_speed(ramp_speed)
+ self.set_background_speed(self.background_machines, ramp_speed)
+ time.sleep(2)
+ ramp_speed = ramp_speed + self.test['ramp_step']
+ self.gen_machine.set_generator_speed(speed)
+ self.set_background_speed(self.background_machines, speed)
+ iteration_data['speed'] = speed
+ time_loop_data['speed'] = speed
+ time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
+ start_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
+ bg_gen_stat = {
+ "bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc
+ }
+ start_bg_gen_stats.append(dict(bg_gen_stat))
+ if self.sut_machine!= None:
+ t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
+ t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
+ tx = t2_tx - t1_tx
+ iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
+ iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
+ if tx == 0:
+ RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
+ if iteration_data['abs_tx'] == 0:
+ RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
+ # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
+ # Measure latency statistics per second
+ iteration_data.update(self.gen_machine.lat_stats())
+ t2_lat_tsc = iteration_data['lat_tsc']
+ sample_count = 0
+ for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
+ iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
+ time_loop_data['bucket_size'] = iteration_data['bucket_size']
+ iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
+ if self.test['test'] == 'fixed_rate':
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['pps_rx'] = None
+ iteration_data['lat_perc'] = None
+ iteration_data['actual_duration'] = None
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size,
+ iteration_data, iteration_prefix ))
+ tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
+ iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
+ tot_lat_measurement_duration = float(0)
+ iteration_data['actual_duration'] = float(0)
+ tot_sut_core_measurement_duration = float(0)
+ tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
+ lat_avail = core_avail = sut_avail = False
+ while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
+ time.sleep(0.5)
+ time_loop_data.update(self.gen_machine.lat_stats())
+ # Get statistics after some execution time
+ if time_loop_data['lat_tsc'] != t2_lat_tsc:
+ single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
+ # A second has passed in between to lat_stats requests. Hence we need to process the results
+ tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
+ if iteration_data['lat_min'] > time_loop_data['lat_min']:
+ iteration_data['lat_min'] = time_loop_data['lat_min']
+ if iteration_data['lat_max'] < time_loop_data['lat_max']:
+ iteration_data['lat_max'] = time_loop_data['lat_max']
+ iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
+ iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
+ sample_count = 0
+ for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
+ break
+ time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
+ time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
+ iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
+ t2_lat_tsc = time_loop_data['lat_tsc']
+ lat_avail = True
+ t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
+ if t3_tsc != t2_tsc:
+ time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
+ delta_rx = t3_rx - t2_rx
+ tot_rx += delta_rx
+ delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
+ tot_non_dp_rx += delta_non_dp_rx
+ delta_tx = t3_tx - t2_tx
+ tot_tx += delta_tx
+ delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
+ tot_non_dp_tx += delta_non_dp_tx
+ delta_dp_tx = delta_tx -delta_non_dp_tx
+ delta_dp_rx = delta_rx -delta_non_dp_rx
+ time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
+ iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
+ delta_drop = t3_drop - t2_drop
+ tot_drop += delta_drop
+ t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
+ core_avail = True
+ if self.sut_machine!=None:
+ t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
+ if t3_sut_tsc != t2_sut_tsc:
+ single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
+ tot_sut_rx += t3_sut_rx - t2_sut_rx
+ tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
+ delta_sut_tx = t3_sut_tx - t2_sut_tx
+ tot_sut_tx += delta_sut_tx
+ delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
+ tot_sut_non_dp_tx += delta_sut_non_dp_tx
+ t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
+ sut_avail = True
+ if self.test['test'] == 'fixed_rate':
+ if lat_avail == core_avail == True:
+ lat_avail = core_avail = False
+ time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
+ time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
+ if self.sut_machine != None and sut_avail:
+ time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
+ sut_avail = False
+ else:
+ time_loop_data['pps_sut_tx'] = None
+ time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
+ time_loop_data['abs_tx'] = delta_dp_tx
+ time_loop_data['abs_rx'] = delta_dp_rx
+ time_loop_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size, time_loop_data,
+ time_loop_prefix))
+ time_loop_data['test'] = self.test['testname']
+ time_loop_data['environment_file'] = self.test['environment_file']
+ time_loop_data['Flows'] = flow_number
+ time_loop_data['Size'] = size
+ time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
+ _ = self.post_data(time_loop_data)
+ end_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
+ bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc,
+ "bg_hz" : bg_hz
+ }
+ end_bg_gen_stats.append(dict(bg_gen_stat))
+ self.stop_background_traffic(self.background_machines)
+ i = 0
+ bg_rates =[]
+ while i < len(end_bg_gen_stats):
+ bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
+ start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
+ start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
+ i += 1
+ if len(bg_rates):
+ iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
+ RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
+ else:
+ iteration_data['avg_bg_rate'] = None
+ #Stop generating
+ self.gen_machine.stop_gen_cores()
+ time.sleep(3.5)
+ self.gen_machine.stop_latency_cores()
+ iteration_data['r'] += 1
+ iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
+ iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
+ t4_tsc = t2_tsc
+ while t4_tsc == t2_tsc:
+ t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
+ if self.test['test'] == 'fixed_rate':
+ iteration_data['lat_tsc'] = t2_lat_tsc
+ while iteration_data['lat_tsc'] == t2_lat_tsc:
+ iteration_data.update(self.gen_machine.lat_stats())
+ sample_count = 0
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
+ delta_rx = t4_rx - t2_rx
+ delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
+ delta_tx = t4_tx - t2_tx
+ delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
+ delta_dp_tx = delta_tx -delta_non_dp_tx
+ delta_dp_rx = delta_rx -delta_non_dp_rx
+ iteration_data['abs_tx'] = delta_dp_tx
+ iteration_data['abs_rx'] = delta_dp_rx
+ iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
+ iteration_data['actual_duration'] = None
+ break ## Not really needed since the while loop will stop when evaluating the value of r
+ else:
+ sample_count = 0
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
+ iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
+ iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
+ if self.sut_machine != None and sut_avail:
+ iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
+ else:
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
+ iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
+ iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
+ if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
+ break
+ self.gen_machine.stop_latency_cores()
+ iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
+ return (iteration_data)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
new file mode 100644
index 00000000..a86ce806
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class WarmupTest(RapidTest):
+ """
+ Class to manage the warmup testing
+ """
+ def __init__(self, test_param, gen_machine):
+ self.test = test_param
+ self.gen_machine = gen_machine
+
+ def run(self):
+ # Running at low speed to make sure the ARP messages can get through.
+ # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
+ # Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
+ # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
+ self.gen_machine.set_generator_speed(WARMUPSPEED)
+ self.gen_machine.set_udp_packet_size(imix)
+ # gen_machine['socket'].set_value(gencores,0,56,1,1)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
+ self.gen_machine.start()
+ time.sleep(WARMUPTIME)
+ self.gen_machine.stop()
+ # gen_machine['socket'].set_value(gencores,0,56,50,1)
+ time.sleep(WARMUPTIME)
+ return (True, None)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
new file mode 100644
index 00000000..2f6b9443
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python3
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# pylint: disable=missing-docstring
+
+import json
+import os
+import sys
+import time
+
+from xtesting.core import testcase
+from runrapid import RapidTestManager
+from rapid_cli import RapidCli
+from rapid_log import RapidLog
+
+class RapidXt(testcase.TestCase):
+
+ def run(self, **kwargs):
+ try:
+ test_params = RapidTestManager.get_defaults()
+ for key in kwargs:
+ test_params[key] = kwargs[key]
+ os.makedirs(self.res_dir, exist_ok=True)
+ test_params['resultsdir'] = self.res_dir
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(
+ test_params['environment_file'])
+ log_file = '{}/RUN{}.{}.log'.format(self.res_dir,
+ environment_file_name, test_file_name)
+ RapidLog.log_init(log_file, test_params['loglevel'],
+ test_params['screenloglevel'] , test_params['version'] )
+ test_manager = RapidTestManager()
+ self.start_time = time.time()
+ self.result, self.details = test_manager.run_tests(test_params)
+ self.stop_time = time.time()
+ RapidLog.log_close()
+
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
new file mode 100755
index 00000000..7ec270a1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python3
+
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+from __future__ import print_function
+from __future__ import print_function
+from __future__ import division
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+import os
+import sys
+import concurrent.futures
+from concurrent.futures import ALL_COMPLETED
+from rapid_cli import RapidCli
+from rapid_log import RapidLog
+from rapid_parser import RapidConfigParser
+from rapid_defaults import RapidDefaults
+from rapid_machine import RapidMachine
+from rapid_generator_machine import RapidGeneratorMachine
+from rapid_flowsizetest import FlowSizeTest
+from rapid_corestatstest import CoreStatsTest
+from rapid_portstatstest import PortStatsTest
+from rapid_impairtest import ImpairTest
+from rapid_irqtest import IrqTest
+from rapid_warmuptest import WarmupTest
+
+class RapidTestManager(object):
+ """
+ RapidTestManager Class
+ """
+ def __init__(self):
+ """
+ Init Function
+ """
+ self.machines = []
+
+ def __del__(self):
+ for machine in self.machines:
+ machine.close_prox()
+
+ @staticmethod
+ def get_defaults():
+ return (RapidDefaults.test_params)
+
+ def run_tests(self, test_params):
+ test_params = RapidConfigParser.parse_config(test_params)
+ monitor_gen = monitor_sut = False
+ background_machines = []
+ sut_machine = gen_machine = None
+ configonly = test_params['configonly']
+ machine_names = []
+ machine_counter = {}
+ for machine_params in test_params['machines']:
+ if machine_params['name'] not in machine_names:
+ machine_names.append(machine_params['name'])
+ machine_counter[machine_params['name']] = 1
+ else:
+ machine_counter[machine_params['name']] += 1
+ machine_params['name'] = '{}_{}'.format(machine_params['name'],
+ machine_counter[machine_params['name']])
+ if 'gencores' in machine_params.keys():
+ machine = RapidGeneratorMachine(test_params['key'],
+ test_params['user'], test_params['password'],
+ test_params['vim_type'], test_params['rundir'],
+ test_params['resultsdir'], machine_params, configonly,
+ test_params['ipv6'])
+ if machine_params['monitor']:
+ if monitor_gen:
+ RapidLog.exception("Can only monitor 1 generator")
+ raise Exception("Can only monitor 1 generator")
+ else:
+ monitor_gen = True
+ gen_machine = machine
+ else:
+ background_machines.append(machine)
+ else:
+ machine = RapidMachine(test_params['key'], test_params['user'],
+ test_params['password'], test_params['vim_type'],
+ test_params['rundir'], test_params['resultsdir'],
+ machine_params, configonly)
+ if machine_params['monitor']:
+ if monitor_sut:
+ RapidLog.exception("Can only monitor 1 sut")
+ raise Exception("Can only monitor 1 sut")
+ else:
+ monitor_sut = True
+ if machine_params['prox_socket']:
+ sut_machine = machine
+ self.machines.append(machine)
+ RapidLog.debug(test_params)
+ try:
+ prox_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ self.future_to_prox = {prox_executor.submit(machine.start_prox): machine for machine in self.machines}
+ if configonly:
+ concurrent.futures.wait(self.future_to_prox,return_when=ALL_COMPLETED)
+ sys.exit()
+ socket_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ future_to_connect_prox = {socket_executor.submit(machine.connect_prox): machine for machine in self.machines}
+ concurrent.futures.wait(future_to_connect_prox,return_when=ALL_COMPLETED)
+ result = 0
+ for test_param in test_params['tests']:
+ RapidLog.info(test_param['test'])
+ if test_param['test'] in ['flowsizetest', 'TST009test',
+ 'fixed_rate', 'increment_till_fail']:
+ test = FlowSizeTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines,
+ test_params['sleep_time'])
+ elif test_param['test'] in ['corestatstest']:
+ test = CoreStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['portstatstest']:
+ test = PortStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['impairtest']:
+ test = ImpairTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines)
+ elif test_param['test'] in ['irqtest']:
+ test = IrqTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['warmuptest']:
+ test = WarmupTest(test_param,
+ gen_machine)
+ else:
+ RapidLog.debug('Test name ({}) is not valid:'.format(
+ test_param['test']))
+ single_test_result, result_details = test.run()
+ result = result + single_test_result
+ for machine in self.machines:
+ machine.close_prox()
+ concurrent.futures.wait(self.future_to_prox,
+ return_when=ALL_COMPLETED)
+ except (ConnectionError, KeyboardInterrupt) as e:
+ result = result_details = None
+ socket_executor.shutdown(wait=False)
+ socket_executor._threads.clear()
+ prox_executor.shutdown(wait=False)
+ prox_executor._threads.clear()
+ concurrent.futures.thread._threads_queues.clear()
+ RapidLog.error("Test interrupted: {} {}".format(
+ type(e).__name__,e))
+ return (result, result_details)
+
+def main():
+ """Main function.
+ """
+ test_params = RapidTestManager.get_defaults()
+ # When no cli is used, the process_cli can be replaced by code modifying
+ # test_params
+ test_params = RapidCli.process_cli(test_params)
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(test_params['environment_file'])
+ if 'resultsdir' in test_params:
+ res_dir = test_params['resultsdir']
+ log_file = '{}/RUN{}.{}.log'.format(res_dir,environment_file_name,
+ test_file_name)
+ else:
+ log_file = 'RUN{}.{}.log'.format(environment_file_name, test_file_name)
+ RapidLog.log_init(log_file, test_params['loglevel'],
+ test_params['screenloglevel'] , test_params['version'] )
+ test_manager = RapidTestManager()
+ test_result, _ = test_manager.run_tests(test_params)
+ RapidLog.log_close()
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
new file mode 100644
index 00000000..bac49bd5
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
@@ -0,0 +1,16 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+package_dir = .
+
+[options.data_files]
+. = format.yaml
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
+[options.packages.find]
+where = .
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
new file mode 100644
index 00000000..fa9d59ac
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh
new file mode 100755
index 00000000..c2c4ab07
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh
@@ -0,0 +1,31 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+## This code will help in using tshark to decode packets that were dumped
+## in the prox.log file as a result of dump, dump_tx or dump_rx commands
+
+#egrep '^[0-9]{4}|^[0-9]+\.' prox.log | text2pcap -q - - | tshark -r -
+while read -r line ; do
+ if [[ $line =~ (^[0-9]{4}\s.*) ]] ;
+ then
+ echo "$line" >> tempshark.log
+ fi
+ if [[ $line =~ (^[0-9]+\.[0-9]+)(.*) ]] ;
+ then
+ date -d@"${BASH_REMATCH[1]}" -u +%H:%M:%S.%N >> tempshark.log
+ fi
+done < <(cat prox.log)
+text2pcap -t "%H:%M:%S." -q tempshark.log - | tshark -r -
+rm tempshark.log
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
new file mode 100755
index 00000000..7038ab66
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import os_client_config
+import heatclient
+from heatclient.client import Client as Heat_Client
+from keystoneclient.v3 import Client as Keystone_Client
+from heatclient.common import template_utils
+from novaclient import client as NovaClient
+import yaml
+import os
+import time
+import sys
+from collections import OrderedDict
+from rapid_log import RapidLog
+
+class StackDeployment(object):
+ """Deployment class to create VMs for test execution in OpenStack
+ environment.
+ """
+ def __init__(self, cloud_name):
+# RapidLog.log_init('CREATEStack.log', 'DEBUG', 'INFO', '2020.05.05')
+ self.dp_ips = []
+ self.dp_macs = []
+ self.mngmt_ips = []
+ self.names = []
+ self.number_of_servers = 0
+ self.cloud_name = cloud_name
+ self.heat_template = 'L6_heat_template.yaml'
+ self.heat_param = 'params_rapid.yaml'
+ self.cloud_config = os_client_config.OpenStackConfig().get_all_clouds()
+ ks_client = None
+ for cloud in self.cloud_config:
+ if cloud.name == self.cloud_name:
+ ks_client = Keystone_Client(**cloud.config['auth'])
+ break
+ if ks_client == None:
+ sys.exit()
+ heat_endpoint = ks_client.service_catalog.url_for(service_type='orchestration',
+ endpoint_type='publicURL')
+ self.heatclient = Heat_Client('1', heat_endpoint, token=ks_client.auth_token)
+ self.nova_client = NovaClient.Client(2, **cloud.config['auth'])
+
+ def generate_paramDict(self):
+ for output in self.stack.output_list()['outputs']:
+ output_value = self.stack.output_show(output['output_key'])['output']['output_value']
+ for server_group_output in output_value:
+ if (output['output_key'] == 'number_of_servers'):
+ self.number_of_servers += int (server_group_output)
+ elif (output['output_key'] == 'mngmt_ips'):
+ for ip in server_group_output:
+ self.mngmt_ips.append(ip)
+ elif (output['output_key'] == 'data_plane_ips'):
+ for dps in server_group_output:
+ self.dp_ips.append(dps)
+ elif (output['output_key'] == 'data_plane_macs'):
+ for mac in server_group_output:
+ self.dp_macs.append(mac)
+ elif (output['output_key'] == 'server_name'):
+ for name in server_group_output:
+ self.names.append(name)
+
+ def print_paramDict(self, user, dataplane_subnet_mask):
+ if not(len(self.dp_ips) == len(self.dp_macs) == len(self.mngmt_ips)):
+ sys.exit()
+ _ENV_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
+ env_file = os.path.join(_ENV_FILE_DIR, self.stack.stack_name)+ '.env'
+ with open(env_file, 'w') as env_file:
+ env_file.write('[rapid]\n')
+ env_file.write('total_number_of_machines = {}\n'.format(str(self.number_of_servers)))
+ env_file.write('\n')
+ for count in range(self.number_of_servers):
+ env_file.write('[M' + str(count+1) + ']\n')
+ env_file.write('name = {}\n'.format(str(self.names[count])))
+ env_file.write('admin_ip = {}\n'.format(str(self.mngmt_ips[count])))
+ if type(self.dp_ips[count]) == list:
+ for i, dp_ip in enumerate(self.dp_ips[count], start = 1):
+ env_file.write('dp_ip{} = {}/{}\n'.format(i, str(dp_ip),
+ dataplane_subnet_mask))
+ else:
+ env_file.write('dp_ip1 = {}/{}\n'.format(str(self.dp_ips[count]),
+ dataplane_subnet_mask))
+ if type(self.dp_macs[count]) == list:
+ for i, dp_mac in enumerate(self.dp_macs[count], start = 1):
+ env_file.write('dp_mac{} = {}\n'.format(i, str(dp_mac)))
+ else:
+ env_file.write('dp_mac1 = {}\n'.format(str(self.dp_macs[count])))
+ env_file.write('\n')
+ env_file.write('[ssh]\n')
+ env_file.write('key = {}\n'.format(self.key_name))
+ env_file.write('user = {}\n'.format(user))
+ env_file.write('\n')
+ env_file.write('[Varia]\n')
+ env_file.write('vim = OpenStack\n')
+ env_file.write('stack = {}\n'.format(self.stack.stack_name))
+
+ def create_stack(self, stack_name, stack_file_path, heat_parameters):
+ files, template = template_utils.process_template_path(stack_file_path)
+ stack_created = self.heatclient.stacks.create(stack_name = stack_name,
+ template = template, parameters = heat_parameters,
+ files = files)
+ stack = self.heatclient.stacks.get(stack_created['stack']['id'],
+ resolve_outputs=True)
+ # Poll at 5 second intervals, until the status is no longer 'BUILD'
+ while stack.stack_status == 'CREATE_IN_PROGRESS':
+ print('waiting..')
+ time.sleep(5)
+ stack = self.heatclient.stacks.get(stack_created['stack']['id'], resolve_outputs=True)
+ if stack.stack_status == 'CREATE_COMPLETE':
+ return stack
+ else:
+ RapidLog.exception('Error in stack deployment')
+
+ def create_key(self):
+ if os.path.exists(self.key_name):
+ public_key_file = "{}.pub".format(self.key_name)
+ if not os.path.exists(public_key_file):
+ RapidLog.critical('Keypair {}.pub does not exist'.format(
+ self.key_name))
+ with open(public_key_file, mode='rb') as public_file:
+ public_key = public_file.read()
+ else:
+ public_key = None
+ keypair = self.nova_client.keypairs.create(name = self.key_name,
+ public_key = public_key)
+ # Create a file for writing that can only be read and written by owner
+ if not os.path.exists(self.key_name):
+ fp = os.open(self.key_name, os.O_WRONLY | os.O_CREAT, 0o600)
+ with os.fdopen(fp, 'w') as f:
+ f.write(keypair.private_key)
+ RapidLog.info('Keypair {} created'.format(self.key_name))
+
+ def IsDeployed(self, stack_name):
+ for stack in self.heatclient.stacks.list():
+ if stack.stack_name == stack_name:
+ RapidLog.info('Stack already existing: {}'.format(stack_name))
+ self.stack = stack
+ return True
+ return False
+
+ def IsKey(self):
+ keypairs = self.nova_client.keypairs.list()
+ if next((x for x in keypairs if x.name == self.key_name), None):
+ RapidLog.info('Keypair {} already exists'.format(self.key_name))
+ return True
+ return False
+
+ def deploy(self, stack_name, heat_template, heat_param):
+ heat_parameters_file = open(heat_param)
+ heat_parameters = yaml.load(heat_parameters_file,
+ Loader=yaml.BaseLoader)['parameters']
+ heat_parameters_file.close()
+ self.key_name = heat_parameters['PROX_key']
+ if not self.IsDeployed(stack_name):
+ if not self.IsKey():
+ self.create_key()
+ self.stack = self.create_stack(stack_name, heat_template,
+ heat_parameters)
+
+ def generate_env_file(self, user = 'centos', dataplane_subnet_mask = '24'):
+ self.generate_paramDict()
+ self.print_paramDict(user, dataplane_subnet_mask)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
new file mode 100755
index 00000000..78772dd2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+function save_k8s_envs()
+{
+ printenv | grep "PCIDEVICE" > /opt/rapid/k8s_sriov_device_plugin_envs
+ printenv | grep "QAT[0-9]" > /opt/rapid/k8s_qat_device_plugin_envs
+}
+
+function create_tun()
+{
+ mkdir -p /dev/net
+ mknod /dev/net/tun c 10 200
+ chmod 600 /dev/net/tun
+}
+
+save_k8s_envs
+create_tun
+
+# Ready for testing
+touch /opt/rapid/system_ready_for_rapid
+
+# Start SSH server in background
+echo "mkdir -p /var/run/sshd" >> /etc/rc.local
+service ssh start
+
+echo "rapid ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
+
+sleep infinity
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
new file mode 100644
index 00000000..9e26fdb1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
@@ -0,0 +1,194 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# This README is describing the format of all the rapid test files that you can
+# find in this directory.
+# These files can be specified as a parameter for the runrapid.py script, using
+# the --test [testfile] option. The default file name is specified in
+# rapid_defaults.py and is basicrapid.test.
+#
+# There are 3 types of sections in this config file:
+# - the [TestParameters] section, which defines how many [TestMx] sections and
+# how many [testy] sections need to be present in this file.
+# - at least one TestMachine section [TestMx], where x is the index of the Test
+# machines starting at index 1
+# - at least one tests definition section [testy], where y is the index of the
+# test to be run. Index starts at 1.
+
+[TestParameters]
+# The name of this test. Can be chosen freely to describe this test
+name = BasicSwapTesting
+
+# Defines how may different tests will be executed when running this test file.
+# This is usually set to 1. You need to define as many [testy] sections as
+# defined in this parameter.
+number_of_tests = 1
+
+# The next parameter defines how many PROX instance are needed to run this test.
+# You need to define as many [TestMx] sections as defined in this parameter.
+total_number_of_test_machines = 2
+
+# Some rapid tests are reporting the latency percentile statistics. This
+# parameter defines which latency percentile will be used for this test.
+lat_percentile = 99
+
+# When doing ipv6 testing, this parameter needs to be set to True, default is
+# False. This is used by the generator code to calculate the proper packet
+# header length offsets.
+ipv6 = True
+
+# The following section describes the role of the first Test Machine. Note that
+# the connection details for each PROX instance are defined in the environment
+# file (default: rapid.env). There is a --map parameter for runrapid.py that
+# specifies how the Test machines are mapped onto the available PROX instances.
+[TestM1]
+# Name can be freely chosen
+name = Generator
+# the PROX configuration files that will be used to start PROX on this Test
+# machine. This configuration file will define the role that PROX will play in
+# this Test machine.
+config_file = configs/gen.cfg
+# The values of the remaining parameters in this section are passed on to the
+# PROX configuration file through a file called parameters.lua
+#
+# The next parameter defines the destination Test machine index. This will be
+# used by a generator to define which destination MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+dest_vm = 2
+# The next parameter defines the GW Test machine index. This will be
+# used by a generator to define which GW MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+#gw_vm = 2
+# mcore defines whichmaster core PROX will use. It is not advised to change
+# this. The PROX instances are optimized to use core 0 for the master and all
+# other cores for DPDK usage.
+mcore = [0]
+# gencores defines which cores will be used to generate packets. If the
+# generator is not able to generate enough packets, you migth want to assign
+# more cores to the generator. Make sure not to use more cores in these
+# variables than you have available in your PROX instance.
+gencores = [1]
+# latcores defines that cores that will do the task of measuring latency,
+# reordering and other statistics.
+latcores = [3]
+# Non generator Test machines only require the cores parameter to find out on
+# which cores they need to place the PROX tasks.
+# cores = [1-3]
+# cores = [1,2,3]
+# The bucket_size_exp parameter is only needed for generator machines when
+# collecting percentile latency statistics. PROX is assigning every packet to
+# one of the 128 latency buckets. The size of the latency buckets depends on
+# the processor frequency and this parameter using some complicated formula.
+# iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) /
+# (old_div(float(iteration_data['lat_hz']),float(10**6)))
+# Teh result is expressing the width of each bucket in micro-seconds.
+# The minimum value (which is also the default value) for this parameter is 11.
+# For a processor with a frequency of 2Ghz, and a parameter of 11, this results
+# in a bucket size of 1.024 us. Since we have 128 buckets, the maximum latency
+# that can be stored in the buckets is in theory 128 * 1.024 = 131.072 us. We
+# will however place every measurement with a latency higher than 131.072 us in
+# the last bucket. When you are dealing with higher latency, you will have to
+# increase this parameter. Each time you increase this parameter by 1, you will
+# double the bucket size.
+#bucket_size_exp = 12
+# We can only monitor one generator and one reflector (swap) Test machine.
+# Monitoring means that we will sue the statistics coming from these Test
+# machines to report statistics and make decisions on the success of a test.
+# Test machines not playing a role in this process, need to have the monitor
+# parameter set to false. You can only have 1 generator machines and 1 SUT Test
+# machine. The parameter can be set to false for background traffic Test
+# machines, GW Test machines, etc... Default is true
+#monitor = false
+# The prox_socket parameter instruct the rapid scripts to connect to the PROX
+# instance and collect statistics. Default is true. If set to none, we will not
+# collect any statistics from this machine
+#prox_socket = false
+# The prox_launch_exit parameter instructs the script to actually start PROX at
+# the beginning of a test, and to stop it at the end. The default is true. You
+# can set this parameter to false in case you want to start PROX manually and
+# inspect the PROX UI, while the rapid scripts are dringing the testing.
+#prox_launch_exit = false
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = false
+#prox_launch_exit = false
+
+# The following section describes the first test that will run. You need at
+# least 1 test section. In most cases, you will only have one.
+[test1]
+# The test that we will run. A limited set of tests are available: you need to
+# select from the available tests as you can see in the runrapid.py code.
+# At the moment of the writing of this text, we have the following tests
+# available: flowsizetest, TST009test, fixed_rate, increment_till_fail,
+# corestatstest, portstatstest, impairtest, irqtest, warmuptest
+test=flowsizetest
+# The next warmup parameters, are used to warm up the system before the actual
+# test is started. This is to make sure ARP is being resolved in PROX and in the
+# underlying infrastructure so that this does not influence the results.
+# warmupflowsize instruct how many parallel flows need to be generated during
+# warmup
+warmupflowsize=512
+# Give the imix packet size that will be used during warmup. It is a list of
+# packet sizes
+warmupimix=[64, 300, 250, 250, 80]
+# The speed at whcih we will generate packets during the warmup phase. The speed
+# is expressed as a percentage of 10Gb/s. You could say this is expressed in
+# units of 100Mb/s.
+warmupspeed=1
+# warmuptime is the time this warmup phase will run. It is expressed in seconds.
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+# Each element in this list will result in an seperate test.
+flows=[64,500000]
+# The drop_rate_threshold defines the maximum amount of packets that can be
+# dropped without decalring the test as failed. This number is expressed as a
+# percentage of the total amount of packets being sent by the generator. If this
+# number is set to 0, the test will only be declared succesful, if zero packets
+# were dropped during this test
+drop_rate_threshold = 0.1
+# Setting one of the following thresholds to infinity (inf), results in the
+# criterion not being evaluated to rate the test as succesful. The latency
+# tresholds are expressed in micro-seconds.
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+# When we run binary searches, we are always trying at a new speed, halfway
+# between the last failed speed and the last succesful speed (initially, we
+# consider 0 as that last succesful speed). When stop doing this binary search
+# when the difference between the last speed and the news speed is less than
+# what is defined by accuracy, expressed in percentages.
+accuracy = 1
+# Speed at which we will start the binary search, expressed in percentage of
+# 10Gb/s.
+startspeed = 50
+# When using ramp_step, we will at the beginning of each measurement, increase
+# the traffic slowly, till we reach the requested speed. Can be used with
+# certain soft switches that are reconfiguring the resource usage, based on the
+# actual traffic. In order not the influence the measurement, we then slowly go
+# to the requested traffic rate.
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
new file mode 100644
index 00000000..8b765e7d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
@@ -0,0 +1,54 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[8,1024]
+drop_rate_threshold = 0
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
new file mode 100644
index 00000000..27794a12
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[64]
+drop_rate_threshold = 0
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
new file mode 100644
index 00000000..69e4ebc7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64,256,64,1024,64,128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[16384]
+drop_rate_threshold = 0
+lat_avg_threshold = 120
+lat_perc_threshold = 220
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
new file mode 100644
index 00000000..ff902de6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
@@ -0,0 +1,61 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+ipv6 = True
+
+[TestM1]
+name = Generator
+config_file = configs/genv6.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swapv6.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[8,1024]
+drop_rate_threshold = 0
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
new file mode 100644
index 00000000..803c65e7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
@@ -0,0 +1,51 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+[TestParameters]
+name = BareTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = Generator
+config_file = configs/l2gen_bare.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = Swap
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=10
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512,1]
+drop_rate_threshold = 0
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
new file mode 100644
index 00000000..9874de47
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
@@ -0,0 +1,65 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
new file mode 100644
index 00000000..a876a049
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
@@ -0,0 +1,73 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapWithGatewayTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+gw_vm = 2
+dest_vm = 3
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Gateway
+monitor = false
+prox_socket = false
+prox_launch_exit = false
+
+[TestM3]
+name = Swap
+config_file = configs/swap_gw.cfg
+gw_vm = 2
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
new file mode 100644
index 00000000..927ecf35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
@@ -0,0 +1,63 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = CGNATTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+dest_vm = 3
+gw_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = CGNAT
+config_file = configs/cgnat.cfg
+dest_vm = 3
+mcore = [0]
+cores = [1]
+monitor = false
+prox_socket = true
+prox_launch_exit = true
+
+[TestM3]
+name = PublicSide
+config_file = configs/public_server.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512]
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
new file mode 100644
index 00000000..660f79b0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
@@ -0,0 +1,31 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = CoreStatistics
+number_of_tests = 1
+total_number_of_test_machines = 1
+
+[TestM1]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=corestatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
new file mode 100644
index 00000000..bc5e96b8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
@@ -0,0 +1,70 @@
+##
+## Copyright (c) 2023 luc.provoost@gmail.com
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = EncryptionDecryption
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+bucket_size_exp = 16
+#prox_launch_exit = false
+
+[TestM2]
+name = Encrypt
+config_file = configs/esp.cfg
+dest_vm = 1
+mcore = [0]
+cores = [1]
+altcores=[2]
+#prox_socket = true
+#prox_launch_exit = false
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+#imixs=[[64],[64,250,800,800]]
+imixs=[[1500],[512],[256],[128]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.5
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+accuracy = 5
+startspeed = 250
+#ramp_step = 1
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
index 3042e722..898062c9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,43 +13,50 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
+[TestParameters]
name = impairTesting
-number_of_tests = 1
+number_of_tests = 2
total_number_of_test_machines = 3
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 1
-accuracy = 0.01
-
[TestM1]
name = Generator
-machine_index = 1
-config_file = gen_gw.cfg
+config_file = configs/gen_gw.cfg
gw_vm = 2
dest_vm = 3
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = ImpairGW
-machine_index = 2
-config_file = impair.cfg
-group1cores = [1]
+config_file = configs/impair.cfg
+mcore = [0]
+cores = [1]
+monitor = False
[TestM3]
name = Swap
-machine_index = 3
-config_file = swap.cfg
-group1cores = [1]
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[2])
+test=warmuptest
+warmupflowsize=1024
+warmupimix=[64]
+warmupspeed=10
+warmuptime=2
+
+[test2]
+test=impairtest
+steps=5
+imix=[64]
+flowsize=64
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 5
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
new file mode 100644
index 00000000..cb673de2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
@@ -0,0 +1,64 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = IncrementTillFailTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=increment_till_fail
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+step = 0.5
+startspeed = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
new file mode 100644
index 00000000..f0330589
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
@@ -0,0 +1,65 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+ipv6 = True
+
+[TestM1]
+name = Generator
+config_file = configs/genv6.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swapv6.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[84]
+warmupspeed=1
+warmuptime=2
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[250]]
+# Number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
new file mode 100644
index 00000000..77c9cbec
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
@@ -0,0 +1,37 @@
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+[TestParameters]
+name = IRQTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = InterruptTestMachine1
+config_file = configs/irq.cfg
+mcore = [0]
+cores = [1,2,3]
+monitor = False
+
+[TestM2]
+name = InterruptTestMachine2
+config_file = configs/irq.cfg
+mcore = [0]
+cores = [1,2,3]
+monitor = False
+
+[test1]
+test=irqtest
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
index 1ea7f0a2..542fe634 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,41 +13,30 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
+[TestParameters]
name = L2BasicSwapTesting
-number_of_tests = 3
+number_of_tests = 1
total_number_of_test_machines = 2
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0
-accuracy = 0.1
[TestM1]
name = Generator
-machine_index = 1
-config_file = l2gen.cfg
+config_file = configs/l2gen.cfg
dest_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = Swap
-machine_index = 2
-config_file = l2swap.cfg
-group1cores = [1]
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[1])
-[test2]
-cmd=run_sizetest(sock[0],sock[1])
-[test3]
-cmd=run_flowtest(sock[0],sock[1])
-
+test=fixed_rate
+startspeed = 10
+imixs=[[256]]
+flows=[64]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
new file mode 100644
index 00000000..d3a2ba7c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
@@ -0,0 +1,60 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = L2BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = Generator
+config_file = configs/l2gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = Swap
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[512]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0
+lat_avg_threshold = 500
+lat_perc_threshold = 800
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
index 6a9998c6..f0db6b28 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,42 +13,37 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
-name = BasicSwapTesting
-number_of_tests = 3
+[TestParameters]
+name = L3FrameRateTesting
+number_of_tests = 1
total_number_of_test_machines = 2
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0.1
-accuracy = 0.01
-
[TestM1]
name = Generator
-machine_index = 1
-config_file = gen.cfg
+config_file = configs/gen.cfg
dest_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = Swap
-machine_index = 2
-config_file = swap.cfg
-group1cores = [1]
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[1])
-[test2]
-cmd=run_sizetest(sock[0],sock[1])
-[test3]
-cmd=run_flowtest(sock[0],sock[1])
-
+test=fixed_rate
+warmupflowsize=64
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[1,64]
+startspeed=5
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
new file mode 100644
index 00000000..20d66209
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
@@ -0,0 +1,32 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = PortStats
+number_of_tests = 1
+total_number_of_test_machines = 1
+
+[TestM1]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+ports = [0]
+
+[test1]
+test=portstatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
new file mode 100644
index 00000000..e4bddad0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
@@ -0,0 +1,60 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = GWTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+dest_vm = 3
+gw_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = GW1
+config_file = configs/secgw1.cfg
+dest_vm = 3
+mcore = [0]
+cores = [1]
+
+[TestM3]
+name = GW2
+config_file = configs/secgw2.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512]
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
new file mode 100644
index 00000000..8a092def
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
@@ -0,0 +1,28 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+FROM opnfv/xtesting
+
+RUN apk upgrade --update
+
+ENV RAPID_TEST =rapid_tst009_throughput
+
+RUN git clone https://git.opnfv.org/samplevnf /samplevnf
+WORKDIR /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid
+RUN chmod 400 /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+RUN apk add python3-dev openssh-client && cd /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/ && git init && pip3 install .
+CMD ["run_tests", "-t", "all"]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
new file mode 100644
index 00000000..92fc7b4c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: rapidxt
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: rapidxt
+ tests:
+ - rapid_tst009
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
new file mode 100644
index 00000000..3cdda7d7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
@@ -0,0 +1,54 @@
+---
+tiers:
+ -
+ name: IRQ_rapid_benchmarking
+ order: 1
+ description: 'IRQ Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_irq
+ project_name: rapidxt
+ criteria: 499500
+ # Criterium for irq is defined as 500000 - the maximal allowed interrupt time per PMD loop (in us)
+ blocking: true
+ clean_flag: false
+ description: 'IRQ test'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/irq.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ name: TST009_rapid_benchmarking
+ order: 2
+ description: 'TST009 Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_tst009_64b_64f
+ project_name: rapidxt
+ criteria: 0.5
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, 64 byte packets, 64 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_64B_64F.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ case_name: rapid_tst009_acaeab_16384f
+ project_name: rapidxt
+ criteria: 0.2
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, imix acaeab, 16384 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_acaeab_16384F.test
+ runtime: 5
+ environment_file: config/rapid.env