summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/helper-scripts
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX/helper-scripts')
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py53
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py74
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/README41
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/config.py178
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py35
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py243
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py229
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py140
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/progress.py67
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/prox.py253
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py61
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py258
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py34
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py54
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py69
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py58
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py210
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py84
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py61
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py73
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py176
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py39
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py60
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl271
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl266
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/README57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py218
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh24
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh24
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py445
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml105
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg51
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/start_vm.py143
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py457
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py681
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py681
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl90
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py57
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/trailing.sh69
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/vm-cores.py20
42 files changed, 6351 insertions, 0 deletions
diff --git a/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py
new file mode 100644
index 00000000..f9250d21
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py
@@ -0,0 +1,53 @@
+#!/bin/env python2.7
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import socket
+
+class prox:
+ def __init__(self, ip):
+ self._ip = ip;
+ self._dat = ""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self._ip, 8474))
+ except:
+ raise Exception("Failed to connect to PROX on " + self._ip + ":8474")
+ self._sock = sock;
+
+ def send(self, msg):
+ self._sock.sendall(msg + "\n");
+ return self
+ def recv(self):
+ ret_str = "";
+ done = 0;
+ while done == 0:
+ if (len(self._dat) == 0):
+ self._dat = self._sock.recv(256);
+
+ while(len(self._dat)):
+ if (self._dat[0] == '\n'):
+ done = 1
+ self._dat = self._dat[1:]
+ break;
+ else:
+ ret_str += self._dat[0];
+ self._dat = self._dat[1:]
+ return ret_str;
+
+ def wait_cmd_finished(self):
+ self.send("stats hz").recv();
diff --git a/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py
new file mode 100644
index 00000000..112b583a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py
@@ -0,0 +1,74 @@
+#!/bin/env python2.7
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from prox import *
+from decimal import *
+from time import *
+
+class data_point:
+ value = 0;
+ tsc = 0;
+ def __init__(self, value, tsc):
+ self.value = value;
+ self.tsc = tsc;
+
+def measure_tx(prox_instance, port_id):
+ port_tx_pkt = "port(" + str(port_id) + ").tx.packets"
+ port_tsc = "port(" + str(port_id) + ").tsc";
+ cmd = "stats " + port_tx_pkt + "," + port_tsc;
+ reply = prox_instance.send(cmd).recv().split(",");
+
+ return data_point(int(reply[0]), int(reply[1]));
+
+def get_rate(first, second, hz):
+ tsc_diff = second.tsc - first.tsc;
+ value_diff = second.value - first.value;
+
+ return int(Decimal(value_diff * hz) / tsc_diff)
+
+# make sure that prox has been started with the -t parameter
+prox_instance = prox("127.0.0.1")
+print "Connected to prox"
+
+hz = int(prox_instance.send("stats hz").recv());
+
+print "System is running at " + str(hz) + " Hz"
+
+print "Showing TX pps on port 0"
+
+update_interval = 0.1
+
+print "Requesting new data every " + str(update_interval) + "s"
+
+measure = measure_tx(prox_instance, 0);
+while (True):
+ sleep(update_interval)
+ measure2 = measure_tx(prox_instance, 0);
+
+ # since PROX takes measurements at a configured rate (through
+ # update interval command or throw -r command line parameter), it
+ # might be possible that two consecutive measurements report the
+ # same. To get updates at a frequency higher than 1 Hz,
+ # reconfigure prox as mentioned above.
+
+ if (measure.tsc == measure2.tsc):
+ continue;
+
+ print get_rate(measure, measure2, hz);
+
+ measure = measure2;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/README b/VNFs/DPPD-PROX/helper-scripts/dpi/README
new file mode 100644
index 00000000..f1100757
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/README
@@ -0,0 +1,41 @@
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+The scripts in this directory characterize flow a DPI-enabled VNF. The
+characeterization is split up into two steps. The first step (dpi1.py)
+searches for the traffic profile parameter boundaries. The second step
+(dpi2.py) takes as input the output of the first step and searches for
+the maximum sustainable throughput of a DPI-enabled VNF.
+
+To run the first script, use:
+
+ python2.7 ./dpi1.py -t TEST_SYSTEM_DESCRIPTIONS -o OUTPUT1
+
+TEST_SYSTEM_DESCRIPTIONS is a comma-separated list of systems where
+the syntax of defining each system is shown below:
+
+ user@ip:proxDir:cfgDir
+
+To run the second script, use:
+
+ python2.7 ./dpi2.py -t TEST_SYSTEM_DESCRIPTIONS \
+ -s SYSTEM_UNDER_TEST_DESCRIPTIONS \
+ -o OUTPUT2 -d \
+ -i OUTPUT1
+
+Finally, the results can be processed using the following command:
+
+ python2.7 ./maketable.py -i OUTPUT1 -j OUTPUT2 -o FINAL_TABLE
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/config.py b/VNFs/DPPD-PROX/helper-scripts/dpi/config.py
new file mode 100644
index 00000000..ee3f04c6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/config.py
@@ -0,0 +1,178 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import getopt
+import sys
+from systemconfig import *
+
+class Config:
+ _debug = False;
+ _test_systems = [];
+ _output_file_name = None;
+ _input_file_name = None
+ _input_file_name2 = None
+ _max_port_rate = 0.85
+ _sut = None
+ _accuracy = 2;
+ _threshold = 0.95
+ _once = None
+ _skipTime = 10
+ _testLength = 120
+ _dpiCoreList = range(1, 5)
+ _checkConditions = False;
+ _interCheckDuration = float(1)
+
+ def getInputFileName(self):
+ return self._input_file_name
+
+ def getInputFileName2(self):
+ return self._input_file_name2
+
+ def toString(self):
+ ret = ""
+ ret += "Test systems: \n"
+ for ts in self._test_systems:
+ ret += ts.toString();
+
+ if (self._sut is not None):
+ ret += "SUT: \n"
+ ret += self._sut.toString();
+
+ ret += "Output file name: " + str(self._output_file_name) + "\n"
+ ret += "Max port rate: " + str(self._max_port_rate) + "\n"
+ ret += "Accuracy: " + str(self._accuracy) + " digits after point"
+ return ret
+
+ def getErrorTestOne(self):
+ if (len(self._test_systems) == 0):
+ return "Missing test systems";
+ if (self._output_file_name is None):
+ return "No output file or input file defined";
+ return None
+
+ def getErrorTestTwo(self):
+ if (self._input_file_name is None):
+ return "Input file is missing"
+ if (self._input_file_name == self._output_file_name):
+ return "Input file and output file are the same"
+ return self.getErrorTestOne();
+
+ def getErrorMakeTable(self):
+ if (self._input_file_name is None):
+ return "Missing input file"
+ if (self._input_file_name2 is None):
+ return "Missing file with performance resuilts"
+ if (self._output_file_name is None):
+ return "No output file or input file defined";
+ if (self._input_file_name2 == self._input_file_name):
+ return "Input file used multiple times"
+ if (self._input_file_name == self._output_file_name):
+ return "output file is the same as the input file"
+ if (self._input_file_name2 == self._output_file_name):
+ return "output file is the same as the input file 2"
+
+ return None
+
+ def usageAndExit(self, argv0):
+ print "Usage: " + str(argv0)
+ print "-t Add a test system, syntax: " + SystemConfig.expectedSyntax()
+ print "-s Add SUT, syntax: " + SystemConfig.expectedSyntax()
+ print "-o Ouput file name"
+ print "-a Accuracy, number of digits after point"
+ print "-i Input file"
+ print "-j File with performance results"
+ print "-m Maximum per port rate, by default 0.85 (85%)"
+ print "-d Enable debugging"
+ print "-w Fraction of connections to reach, by default is 0.95 (95%)"
+ print "-h Show help"
+ print "-q Run a single test iteration, syntax of argument "
+ print "-b Skip time, by default 10 sec"
+ print "-l Test length, by default 120 sec"
+ print "-n Maximum number of DPI cores to test"
+ print "-k Period between checking conditions, 1 second by default"
+ print "-c Check conditions during 10 second period after convergence"
+ print " is msr,conn,ss (i.e. -q 4000,100000,38.91)"
+ exit(-1);
+
+ def parse(self, programName, args):
+ try:
+ opts, args = getopt.getopt(args, "t:s:o:a:i:q:m:dhw:j:b:l:n:k:c")
+ except getopt.GetoptError as err:
+ print str(err)
+ return;
+ for option, arg in opts:
+ if(option == "-t"):
+ for ts in arg.split(","):
+ syntaxErr = SystemConfig.checkSyntax(ts)
+ if (syntaxErr != ""):
+ print syntaxErr
+ exit(-1);
+ self._test_systems.append(SystemConfig(ts));
+ elif(option == "-s"):
+ syntaxErr = SystemConfig.checkSyntax(ts)
+ if (syntaxErr != ""):
+ print syntaxErr
+ exit(-1);
+ self._sut = SystemConfig(arg);
+ elif(option == "-w"):
+ self._threshold = float(arg)
+ elif(option == "-o"):
+ self._output_file_name = arg;
+ elif(option == '-a'):
+ self._accuracy = int(arg);
+ elif(option == "-i"):
+ self._input_file_name = arg;
+ elif(option == "-j"):
+ self._input_file_name2 = arg;
+ elif(option == "-q"):
+ self._once = arg.split(",")
+ elif(option == "-c"):
+ self._checkConditions = True;
+ elif(option == "-m"):
+ self._max_port_rate = float(arg);
+ elif(option == "-k"):
+ self._interCheckDuration = float(arg);
+ elif(option == "-d"):
+ self._debug = True
+ elif(option == '-h'):
+ self.usageAndExit(programName)
+ elif(option == '-b'):
+ self._skipTime = int(arg)
+ elif(option == '-l'):
+ self._testLength = int(arg)
+ elif(option == '-n'):
+ self._dpiCoreList = self.strToList(arg)
+ else:
+ self.usageAndExit(programName);
+
+ def strToList(self, arg):
+ elements = [];
+ tokens = arg.split(",");
+
+ for a in tokens:
+ if (a.count('-') == 0):
+ elements.append(int(a))
+ elif (a.count('-') == 1):
+ beg = int(a.split('-')[0]);
+ end = int(a.split('-')[1]);
+ if (beg > end):
+ raise Exception("Invalid list input format")
+ elements += range(beg, end + 1);
+ else:
+ raise Exception("Invalid list input format")
+ return elements;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py b/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py
new file mode 100644
index 00000000..b0b650dc
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py
@@ -0,0 +1,78 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from decimal import *
+
+class CsvReaderError:
+ def __init__(self, msg):
+ self._msg = msg;
+
+ def __str__(self):
+ return self._msg;
+
+class CsvReader:
+ def __init__(self, fieldTypes = None):
+ self._file_name = None;
+ self._fieldTypes = fieldTypes;
+
+ def open(self, file_name):
+ self._file = open(file_name, 'r');
+ self._file_name = file_name;
+
+ def read(self):
+ line = "#"
+ while (len(line) != 0 and line[0] == "#"):
+ line = self._file.readline();
+
+ if (len(line) != 0):
+ return self._lineToEntry(line)
+ else:
+ return None;
+
+ def _lineToEntry(self, line):
+ split = line.strip().split(',');
+ if (self._fieldTypes is None):
+ return split;
+ have = len(split)
+ expected = len(self._fieldTypes)
+ if (have != expected):
+ raise CsvReaderError("Invalid number of fields %d != %d" % (have, expected))
+
+ entry = {};
+ for i in range(len(self._fieldTypes)):
+ curFieldType = self._fieldTypes[i][1]
+ curFieldName = self._fieldTypes[i][0];
+ if (curFieldType == "int"):
+ entry[curFieldName] = int(split[i])
+ elif (curFieldType == "Decimal"):
+ entry[curFieldName] = Decimal(split[i])
+ else:
+ raise CsvReaderError("Invalid field type %s" % curFieldType);
+ return entry;
+
+ def readAll(self):
+ ret = []
+ line = self.read();
+ while (line != None):
+ ret.append(line);
+ line = self.read();
+ return ret;
+
+ def close(self):
+ self._file.close();
+ self._file = None;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py b/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py
new file mode 100644
index 00000000..a5f055e8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py
@@ -0,0 +1,35 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+class CsvWriter:
+ def __init__(self):
+ self._file_name = None;
+
+ def open(self, file_name):
+ self._file = open(file_name, 'w');
+ self._file_name = file_name;
+
+ def write(self, elements):
+ elements_str = map(lambda x: str(x), elements);
+ line = ",".join(elements_str);
+ self._file.write(line + "\n");
+ self._file.flush();
+
+ def close(self):
+ self._file.close();
+ self._file = None;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py
new file mode 100644
index 00000000..ec3e4a03
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py
@@ -0,0 +1,243 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from testerset import *
+from time import sleep
+from time import time
+from decimal import *
+import copy
+from os import system
+import socket
+from itertools import chain
+from math import *
+from csvwriter import *
+from config import *
+from progress import *
+from proxmaxssprobe import *
+
+def runTest(minSetupRate, testParam):
+ print "Running test with following parameters:"
+ print testParam.toString();
+
+ testers = testerSet(config._test_systems, config._max_port_rate, testParam);
+
+ thresh = testParam.getConnections();
+ p = Progress(thresh, ["connections", "setup rate", "reTX"], False);
+ loop_count = 0;
+ converged = False;
+
+ testers.startForkJoin();
+ testers.wait_links_up();
+ testers.start_cores();
+
+ print "Running until convergence (%s connections)" % str(thresh)
+ while (not converged):
+ sleep(config._interCheckDuration)
+ testers.update_stats();
+ tot = testers.get_total_connections();
+ tot_retx = testers.get_total_retx();
+ rates = testers.get_rates();
+ curSetupRate = testers.get_setup_rate();
+ ierrors = testers.getIerrors();
+
+ converged = tot >= thresh;
+ if (not converged):
+ if (loop_count > 0 and curSetupRate < minSetupRate):
+ reason = str(curSetupRate) + " < " + str(minSetupRate);
+ print "Current setup rate is lower than min setup rate: " + reason
+ testers.killProx();
+ return False, [];
+ if (not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ testers.killProx();
+ return False, [];
+
+ if (config._debug):
+ p.setProgress(tot, [tot, curSetupRate, tot_retx]);
+ print p.toString();
+ loop_count += 1;
+ print "converged"
+
+ skipTime = config._skipTime
+ print "Connection threshold reached, waiting for " + str(skipTime) + "s, conditions checked = " + str(config._checkConditions)
+ while (skipTime > 0):
+ skipTime -= config._interCheckDuration
+ sleep(config._interCheckDuration)
+ testers.update_stats();
+ if (config._checkConditions and not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ testers.killProx();
+ return False, [];
+
+ testers.tx_rate_meassurement();
+
+ testLength = config._testLength
+ print "Waiting final " + str(testLength) + "s"
+ while (testLength > 0):
+ testLength -= config._interCheckDuration
+ sleep(config._interCheckDuration)
+ testers.update_stats();
+ if (not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ testers.killProx();
+ return False, [];
+
+ rates = testers.tx_rate_meassurement();
+
+ testers.killProx();
+ return True, rates;
+
+def find_ss(tot_conn, maxSetupRate, ss_max):
+ iterationCount = 0;
+ valid_ss = []
+ speed_ss = [];
+
+ # The setup rate must be in [0.2% of total connections, maxSetupRate]
+ # Also, it must not be hihger than 50% of the total connections
+ min_setup_rate = tot_conn / 500;
+
+ if (min_setup_rate > maxSetupRate):
+ print "min setup rate > max setup rate: " + str(min_setup_rate) + " > " + str(maxSetupRate);
+ return valid_ss, speed_ss;
+ if (maxSetupRate > tot_conn / 2):
+ print "maximum setup rate (" + str(maxSetupRate) + ") is more than 50% of " + str(tot_conn)
+ return valid_ss, speed_ss;
+
+ accuracy = 10**config._accuracy
+ ss_lo = 1
+ ss_hi = int(round(ss_max * accuracy,0))
+
+ iterationOverride = [ss_hi, ss_lo];
+ # Binary search for highest speed scaling
+ while (ss_lo <= ss_hi):
+ if (iterationCount < len(iterationOverride)):
+ ss = iterationOverride[iterationCount]
+ else:
+ ss = (ss_lo + ss_hi)/2;
+
+ testParam = TestParameters(maxSetupRate, tot_conn, float(ss)/accuracy);
+
+ success, rates = runTest(min_setup_rate, testParam);
+ print "success = " + str(success) + ", rates = " + str(rates)
+ if (success == True):
+ valid_ss.append(float(ss)/accuracy);
+ speed_ss.append(sum(rates)/len(rates))
+ ss_lo = ss + 1
+ else:
+ ss_hi = ss - 1;
+ iterationCount += 1
+ return valid_ss, speed_ss;
+
+def get_highest_ss_and_speed(valid_ss, speed_ss):
+ highest_ss = None;
+ highest_speed = None;
+
+ for i in range(len(valid_ss)):
+ if(highest_ss == None or highest_ss < valid_ss[i]):
+ highest_ss = valid_ss[i];
+ highest_speed = speed_ss[i];
+ return highest_ss, highest_speed;
+
+def get_max_ss():
+ ts = config._test_systems[0];
+ test_system = ProxMaxSSProbe(ts);
+ max_ss = test_system.getMaxSS();
+
+ return floor((max_ss * (10**config._accuracy)))/(10**config._accuracy)
+
+config = Config();
+config.parse(sys.argv[0], sys.argv[1:])
+
+err = config.getErrorTestOne();
+if (err is not None):
+ print "Invalid configuration: " + err;
+ exit(-1);
+else:
+ print config.toString()
+
+if (config._once is not None):
+ maxSetupRate = int(config._once[0])
+ minSetupRate = maxSetupRate/500
+ connections = int(config._once[1])
+ speedScaling = float(config._once[2])
+
+ testParam = TestParameters(maxSetupRate, connections, speedScaling)
+ success, rates = runTest(minSetupRate, testParam)
+ print "success = " + str(success) + ", port rates = " + str(rates)
+ exit(0);
+
+msr_list = []
+msr_list += range(4000, 20000, 2000)
+msr_list += range(20000, 100000, 20000)
+msr_list += range(100000, 300000, 50000)
+msr_list += range(300000, 800001, 100000);
+
+conn_list = [1*10**5, 2*10**5, 4*10**5, 8*10**5, 1*10**6, 2*10**6]
+
+summary_file = CsvWriter()
+summary_file.open(config._output_file_name)
+
+tot_it = 0;
+for tot_conn in conn_list:
+ for msr in msr_list:
+ if (msr >= tot_conn/2):
+ break;
+ tot_it += 1
+
+cnt = -1;
+print "Search will include " + str(tot_it) + " parameter combinations"
+print "Will search for highest link utilization"
+
+# If the lowest msr was a for n connections, then the lowest msr
+# for n + 1 connections can't be lower than a.
+low_sr = msr_list[0];
+
+max_ss = get_max_ss()
+
+high_ss = Decimal(max_ss)
+
+globalProgress = Progress(tot_it)
+globalProgress.setProgress(0);
+for tot_conn in conn_list:
+ had_success = False;
+ all_ss = []
+ for msr in msr_list:
+ globalProgress.incrProgress();
+
+ if (msr < low_sr):
+ print "skipping " + str(msr) + " since it is lower than " + str(low_sr)
+ continue;
+
+ print globalProgress.toString();
+
+ valid_ss, speed_ss = find_ss(tot_conn, msr, high_ss)
+ print "valid ss = " + str(valid_ss)
+ print "valid speeds = " + str(speed_ss)
+
+ if (len(valid_ss) > 0):
+ highest_ss, highest_speed = get_highest_ss_and_speed(valid_ss, speed_ss);
+ summary_file.write([msr, tot_conn, highest_ss, highest_speed]);
+
+ if (not had_success):
+ low_sr = msr;
+
+ had_success = True;
+ all_ss = all_ss + valid_ss;
+
+ if (len(all_ss) > 0):
+ high_ss = max(all_ss);
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py
new file mode 100644
index 00000000..65473f61
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py
@@ -0,0 +1,229 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from testerset import *
+from proxdpisut import *
+from statsconsfile import *
+from time import sleep
+from time import time
+from decimal import *
+import copy
+from os import system
+import socket
+from itertools import chain
+from math import *
+from csvwriter import *
+from csvreader import *
+from config import *
+from progress import *
+from resultprocessor import *
+
+def runTest(coreCount, testParam):
+ print "Running test with following parameters:"
+ print testParam.toString();
+
+
+ testers = testerSet(config._test_systems, config._max_port_rate, testParam);
+
+ ret = TestResult(testers.getCount());
+ thresh = testParam.getConnections() * config._threshold;
+ converged = False;
+
+ sut = ProxDpiSut(config._sut, coreCount);
+
+ testers.startFork();
+ sut.startFork();
+ testers.startJoin();
+ sut.startJoin();
+ testers.wait_links_up();
+ sut.startAllCores();
+ sut.waitCmdFinished();
+ testers.start_cores();
+
+ ret.addTimeTS(testers.getTsc());
+ ret.addTimeSUT(sut.getTsc());
+
+ print "Running until convergence (%s connections)" % str(thresh)
+ p = Progress(thresh, ["connections", "setup rate", "reTX"], False);
+ while (not converged):
+ sleep(config._interCheckDuration)
+ testers.update_stats();
+
+ tot = testers.get_total_connections();
+ tot_retx = testers.get_total_retx();
+ rates = testers.get_rates();
+ cur_setup_rate = testers.get_setup_rate();
+ ierrors = testers.getIerrors();
+ converged = tot >= thresh;
+
+ if (not converged and not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ sut.forceQuit();
+ sut.killProx();
+ testers.killProx();
+ return None;
+
+ if (sut.getIerrors() != 0):
+ testers.killProx();
+ print "Sending quit"
+ try:
+ sut.forceQuit();
+ except:
+ print "Sending quit failed"
+ sut.killProx();
+ return None;
+
+ if (config._debug):
+ p.setProgress(tot, [tot, cur_setup_rate, tot_retx]);
+ print p.toString();
+
+ skipTime = config._skipTime
+ print "Connection threshold reached, waiting for " + str(skipTime) + "s, conditions checked = " + str(config._checkConditions)
+ while (skipTime > 0):
+ skipTime -= config._interCheckDuration
+ sleep(config._interCheckDuration)
+ testers.update_stats();
+ if (config._checkConditions and not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ sut.forceQuit();
+ sut.killProx();
+ testers.killProx();
+ return False, [];
+
+ ret.addTimeTS(testers.getTsc());
+ ret.addTimeSUT(sut.getTsc());
+
+ testers.tx_rate_meassurement();
+
+ testLength = config._testLength
+ print "Waiting final " + str(testLength) + "s"
+ while (testLength > 0):
+ testLength -= config._interCheckDuration
+ testers.update_stats();
+ if (not testers.conditionsGood()):
+ print "conditions are bad: " + testers.getReason();
+ sut.forceQuit();
+ sut.killProx();
+ testers.killProx();
+ return None;
+
+ if (sut.getIerrors() != 0):
+ testers.killProx();
+ print "Sending quit"
+ try:
+ sut.forceQuit();
+ except:
+ print "Sending quit failed"
+ sut.killProx();
+ return None;
+
+ sleep(config._interCheckDuration)
+
+ rates = testers.tx_rate_meassurement();
+ ret.addTimeTS(testers.getTsc());
+ ret.addTimeSUT(sut.getTsc());
+
+ print "Quiting Prox on SUT"
+ # make sure stats are flushed
+ sut.quitProx();
+ print "Quiting Prox on test system(s)"
+ testers.quitProx()
+
+ ret.rates = rates
+
+ sutStatsDump = "stats_dump_sut"
+ tsStatsDumpBaseName = "stats_dump_ts"
+
+ sut.scpStatsDump(sutStatsDump);
+ tsStatsDump = testers.scpStatsDump(tsStatsDumpBaseName);
+
+ ret.setTSStatsDump(tsStatsDump);
+ ret.setSUTStatsDump(sutStatsDump);
+ return ret
+
+def meassurePerf(coreCount, maxSetupRate, total_connections, ss_hi):
+ iterationCount = 0;
+ accuracy = 10**config._accuracy
+ ss_lo = 1
+ ss_hi = int(round(ss_hi * accuracy, 0))
+ success = True;
+
+ downrate = float(0)
+ highest_ss = 0
+ iterationOverride = [ss_hi, ss_lo];
+ while (ss_lo <= ss_hi):
+ if (iterationCount < len(iterationOverride)):
+ ss = iterationOverride[iterationCount]
+ else:
+ ss = (ss_lo + ss_hi)/2;
+
+ testParam = TestParameters(maxSetupRate, total_connections, float(ss)/accuracy);
+
+ result = runTest(coreCount, testParam);
+
+ if (result is None):
+ success = False
+ else:
+ rp = ResultProcessor(result)
+ rp.process();
+ success = rp.percentHandled() > 0.99999
+
+ print "test result = " + str(success)
+ if (success):
+ ss_lo = ss + 1;
+ highest_ss = max(highest_ss, ss);
+ print result.rates
+ downrate = sum(result.rates)/len(result.rates)
+ else:
+ ss_hi = ss - 1;
+ iterationCount += 1
+
+ return downrate, float(highest_ss)/accuracy
+
+config = Config();
+config.parse(sys.argv[0], sys.argv[1:])
+
+err = config.getErrorTestTwo();
+if (err is not None):
+ print "Invalid configuration: " + err;
+ exit(-1);
+else:
+ print config.toString()
+
+infileFields = []
+infileFields += [("msr", "int")]
+infileFields += [("conn", "int")]
+infileFields += [("ss", "Decimal")]
+infileFields += [("bw", "Decimal")]
+
+infile = CsvReader(infileFields);
+infile.open(config.getInputFileName())
+inputs = infile.readAll()
+infile.close();
+
+summary = CsvWriter();
+summary.open(config._output_file_name);
+
+print "Will test up SUT config with " + str(config._dpiCoreList) + " DPI cores"
+
+for a in inputs:
+ for coreCount in config._dpiCoreList:
+ downrate, ss = meassurePerf(coreCount, a["msr"], a["conn"], a["ss"]);
+ summary.write([coreCount, a["msr"], a["conn"], ss, downrate]);
+
+summary.close()
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py b/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py
new file mode 100644
index 00000000..f8b7bdc0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py
@@ -0,0 +1,140 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+from config import *
+from csvreader import *
+from sets import Set
+from csvwriter import *
+
+class ResultEntry:
+ def __init__(self):
+ self.boundary = None;
+ self.cores = {}
+
+ def setBoundary(self, val):
+ self.boundary = val;
+
+ def addCoreResult(self, core, val):
+ self.cores[core] = val
+
+ def getCoreResult(self, core):
+ if (core in self.cores):
+ return self.cores[core];
+ return None;
+
+ def getBoundary(self):
+ return self.boundary;
+
+ def getCores(self):
+ return self.cores
+
+ def getMsr(self):
+ return self.msr;
+
+class DictEntry:
+ def __init__(self, key):
+ self.dictionary = {}
+ self.entries = []
+ self.key = key;
+
+config = Config();
+config.parse(sys.argv[0], sys.argv[1:])
+
+err = config.getErrorMakeTable();
+
+if (err is not None):
+ print err
+ exit(-1);
+
+if (config._debug):
+ print "Performance data: " + config.getInputFileName2()
+ print "Boundaries: " + config.getInputFileName()
+
+allData = {}
+
+infileFields = []
+infileFields += [("msr", "int")]
+infileFields += [("conn", "int")]
+infileFields += [("ss", "Decimal")]
+infileFields += [("bw", "Decimal")]
+
+boundariesFile = CsvReader(infileFields)
+boundariesFile.open(config.getInputFileName());
+boundaries = boundariesFile.readAll();
+
+cores = Set()
+
+orderedResults = []
+finalResults = {}
+
+for a in boundaries:
+ key = a["conn"]
+ if (key not in finalResults):
+ newDict = DictEntry(key)
+ finalResults[key] = newDict
+ orderedResults.append(newDict)
+
+for a in boundaries:
+ table = finalResults[a["conn"]]
+ key = a["msr"]
+ value = ResultEntry()
+ value.msr = a["msr"]
+ value.conn = a["conn"]
+ value.boundary = a["bw"]
+ table.dictionary[key] = value
+ table.entries.append(value)
+
+infileFields2 = []
+infileFields2 += [("cores", "int")]
+infileFields2 += [("msr", "int")]
+infileFields2 += [("conn", "int")]
+infileFields2 += [("ss", "Decimal")]
+infileFields2 += [("down", "Decimal")]
+
+resultsFile = CsvReader(infileFields2)
+resultsFile.open(config.getInputFileName2())
+
+for a in resultsFile.readAll():
+ table = finalResults[a["conn"]]
+ key = a["msr"]
+ table.dictionary[key].addCoreResult(a["cores"], a["down"])
+ cores.add(a["cores"]);
+
+
+outputFile = CsvWriter()
+
+outputFile.open(config._output_file_name)
+
+title = ["setup rate", "maximum"]
+for e in sorted(cores):
+ title += [str(e)]
+
+for a in orderedResults:
+ outputFile.write(["connections = " + str(a.key)])
+ outputFile.write(title)
+
+ for e in a.entries:
+ line = [str(e.getMsr())]
+ line += [str(e.getBoundary())]
+ for c in sorted(cores):
+ if (e.getCoreResult(c) is not None):
+ line += [str(e.getCoreResult(c))]
+ else:
+ line += [""]
+ outputFile.write(line)
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py b/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py
new file mode 100644
index 00000000..5e44c678
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py
@@ -0,0 +1,67 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from decimal import *
+from time import time
+
+class Progress:
+ def __init__(self, limit, fieldNames = [], overallETA = True):
+ self._fieldNames = fieldNames;
+ self._limit = limit;
+ self._progress = 0;
+ self._prevProgress = 0;
+ self._prevTime = 0;
+ self._progressSetCount = 0;
+ self._time = 0;
+ self._overallETA = overallETA;
+
+ def setProgress(self, progress, fieldValues = []):
+ self._fieldValues = fieldValues;
+ if (self._overallETA == True):
+ self._progress = progress
+ self._time = time();
+ if (self._progressSetCount == 0):
+ self._prevProgress = self._progress;
+ self._prevTime = self._time;
+ else:
+ self._prevProgress = self._progress;
+ self._prevTime = self._time;
+ self._progress = progress;
+ self._time = time();
+ self._progressSetCount += 1
+
+ def incrProgress(self):
+ self.setProgress(self._progress + 1);
+
+ def toString(self):
+ ret = ""
+ ret += str(self._getETA()) + " seconds left"
+ for f,v in zip(self._fieldNames, self._fieldValues):
+ ret += ", %s=%s" % (str(f),str(v))
+ return ret;
+
+ def _getETA(self):
+ if (self._progressSetCount < 2):
+ return "N/A"
+ diff = self._progress - self._prevProgress;
+ t_diff = Decimal(self._time - self._prevTime);
+ if (t_diff < 0.001 or diff <= 0):
+ return "N/A"
+ rate = Decimal(diff)/t_diff
+ remaining = Decimal(self._limit - self._progress);
+ return round(remaining/rate, 2);
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py b/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py
new file mode 100644
index 00000000..60ef7592
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py
@@ -0,0 +1,253 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import threading
+from time import *
+from proxsocket import *
+from remotesystem import *
+
+class ProxStarter:
+ def __init__(self, remoteSystem, cmd):
+ self._remoteSystem = remoteSystem
+ self._cmd = cmd
+ self._thread = None
+ self._prox = None;
+ self._result = None;
+ self._startDuration = None
+
+ def startThreaded(self):
+ self._start_thread = threading.Thread(target = self._run, args = (self, 1))
+ self._start_thread.start();
+
+ def joinThreaded(self):
+ self._start_thread.join();
+ return self._result;
+
+ def getResult(self):
+ return self._result;
+
+ def getStartDuration(self):
+ return self._startDuration;
+ def getProx(self):
+ return self._prox;
+
+ def _run(self, a, b):
+ before = time.time()
+ self._remoteSystem.run("sudo killall -w -q -9 prox")
+
+ self._result = self._remoteSystem.run(self._cmd);
+
+ sleep(1)
+ after = time.time()
+ self._startDuration = after - before;
+
+class StatsCmd(object):
+ def __init__(self, prox):
+ self._cmd = ""
+ self._parts = []
+ self._beforeParts = []
+ self._prox = prox;
+
+ def sendRecv(self):
+ cmd = self.getCmd()
+ reply = self._prox._send(cmd)._recv()
+ self.setReply(reply)
+
+ def add(self, stats):
+ if (len(self._cmd) != 0):
+ self._cmd += ","
+ self._cmd += stats
+
+ if (len(self._parts) == 0):
+ self._beforeParts += [0]
+ else:
+ before = self._parts[-1] + self._beforeParts[-1];
+ self._beforeParts += [before]
+
+ self._parts += [stats.count(",") + 1];
+
+ def getCmd(self):
+ return "stats " + self._cmd;
+
+ def setReply(self, reply):
+ self._reply = reply.split(",");
+
+ def getResult(self, idx):
+ start = self._beforeParts[idx];
+ end = start + self._parts[idx];
+ return self._reply[start:end]
+
+class Prox(object):
+ def __init__(self, systemConfig):
+ self._systemConfig = systemConfig;
+ self._proxStarter = None
+
+ user = self._systemConfig._user
+ ip = self._systemConfig._ip
+ self._remoteSystem = remoteSystem(user, ip);
+
+ self.resetArguments()
+
+ def resetArguments(self):
+ self._args = []
+
+ def addArgument(self, arg):
+ self._args.append(arg);
+
+ def startFork(self):
+ cmd = self.getCmd();
+ self._proxStarter = ProxStarter(self._remoteSystem, cmd)
+ self._proxStarter.startThreaded();
+
+ def startJoin(self):
+ ret = self.startJoinNoConnect();
+ self._connectSocket();
+ self._querySetup();
+ return self._proxStarter.getStartDuration();
+
+ def startJoinNoConnect(self):
+ return self._proxStarter.joinThreaded();
+
+ def getCmd(self):
+ proxDir = self._systemConfig.getProxDir();
+ cfgFile = self._systemConfig.getCfgFile();
+
+ cmd = "cd " + proxDir + "; "
+ cmd += "sudo ./build/prox "
+ cmd += "-f " + cfgFile
+
+ for arg in self._args:
+ cmd += " " + arg
+ return cmd
+
+ def getLog(self):
+ proxDir = self._systemConfig.getProxDir()
+ cmd = "cat " + proxDir + "/prox.log";
+ return self._remoteSystem.run(cmd)["out"];
+
+ def getIP(self):
+ return self._systemConfig._ip;
+
+ def getHz(self):
+ return self._hz;
+
+ def getBeg(self):
+ return self._beg;
+
+ def getPorts(self):
+ return self._ports;
+
+ def getIerrors(self):
+ sc = StatsCmd(self)
+ sc.add(self._buildIerrorsCmd());
+ sc.sendRecv()
+ return self._parseIerrorsReply(sc.getResult(0));
+
+ def _parseIerrorsReply(self, rep):
+ tot_ierrors = 0;
+ for e in rep:
+ tot_ierrors += int(e);
+ return tot_ierrors;
+
+ def _buildIerrorsCmd(self):
+ cmd = ""
+ for port in self._ports:
+ if (len(cmd)):
+ cmd += ","
+ cmd += "port(%s).ierrors" % str(port)
+ return cmd;
+
+ def waitCmdFinished(self):
+ self._send("stats hz")._recv();
+
+ def waitAllLinksUp(self):
+ link_down = True;
+ while (link_down):
+ link_down = False;
+ for port in self._ports:
+ cmd = "port link state %s" % str(port)
+ link_state = self._send(cmd)._recv();
+ if (link_state == "down"):
+ link_down = True;
+ print "Link down on port " + str(port) + ", waiting one second"
+ break;
+ sleep(1);
+
+ def startAllCores(self):
+ self._send("start all");
+
+ def stopAllCores(self):
+ self._send("stop all");
+
+ def forceQuit(self):
+ self._send("quit_force")._recv();
+
+ def killProx(self):
+ self._remoteSystem.run("sudo killall -w -q -9 prox")
+
+ def getTsc(self):
+ return self._getTsc();
+
+ def _getTsc(self):
+ return int(self._send("stats global.tsc")._recv());
+
+ def scpStatsDump(self, dst):
+ proxDir = self._systemConfig.getProxDir()
+
+ src = proxDir + "/stats_dump";
+ print "Copying " + src + " to " + dst
+ self._remoteSystem.scp(src, dst);
+
+ def _querySetup(self):
+ print "Query setup on " + str(self.getIP())
+ self._queryHz()
+ self._queryBeg()
+ self._queryPorts()
+ self._querySetup2()
+
+ def _querySetup2(self):
+ print "running query 2"
+ pass
+
+ def quitProx(self):
+ self._send("quit")._recv();
+
+ def _queryHz(self):
+ self._hz = int(self._send("stats hz")._recv());
+
+ def _queryBeg(self):
+ self._beg = self._getTsc();
+
+ def _queryPorts(self):
+ self._ports = []
+ port_info_all = self._send("port info all")._recv();
+ port_info_list = port_info_all.split(',');
+
+ for port_info in port_info_list:
+ if (len(port_info) > 0):
+ self._ports.append(int(port_info.split(":")[0]));
+
+ def _connectSocket(self):
+ self._proxSocket = ProxSocket(self.getIP())
+
+ def _send(self, msg):
+ self._proxSocket.send(msg);
+ return self
+
+ def _recv(self):
+ return self._proxSocket.recv();
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py
new file mode 100644
index 00000000..aae900b0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py
@@ -0,0 +1,61 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from prox import *
+from remotesystem import *
+from time import *
+from decimal import *
+
+class ProxDpiSut(Prox):
+ def __init__(self, ts, coreCount):
+ super(ProxDpiSut, self).__init__(ts)
+
+ self._setDefaultArguments();
+ self._setDpiCoreCount(coreCount);
+
+ def _setDefaultArguments(self):
+ self.addArgument("-e");
+ self.addArgument("-t");
+ self.addArgument("-k");
+ self.addArgument("-d");
+ self.addArgument("-r 0.01");
+
+ def _setDpiCoreCount(self, count):
+ self.addArgument("-q dpi_core_count=" + str(count))
+
+ def _querySetup2(self):
+ self._query_cores();
+
+ def _query_cores(self):
+ print "querying cores"
+ self._wk = self._get_core_list("$wk");
+
+ def _get_core_list(self, var):
+ ret = []
+ result = self._send("echo " + var)._recv();
+ for e in result.split(","):
+ ret += [e];
+ return ret;
+
+ def getTsc(self):
+ cmd = "stats task.core(%s).task(0).tsc" % self._wk[-1]
+ res = int(self._send(cmd)._recv());
+ if (res == 0):
+ return self._getTsc();
+ else:
+ return res;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py
new file mode 100644
index 00000000..19b08c92
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py
@@ -0,0 +1,258 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from prox import *
+from remotesystem import *
+from time import *
+from decimal import *
+from timeseriespoint import *
+
+class TestParameters:
+ def __init__(self, max_setup_rate, total_connections, ss):
+ self.max_setup_rate = max_setup_rate;
+ self.total_connections = total_connections;
+ self.ss = ss;
+
+ def toString(self):
+ ret = ""
+ ret += "\tMaximum setup rate = %d\n" % self.max_setup_rate
+ ret += "\tTotal number of connections = %d\n" % self.total_connections
+ ret += "\tSpeed scaling = %s\n" % str(self.ss)
+ return ret;
+
+ def getPerSystem(self, count):
+ msr = self.max_setup_rate / count
+ cnn = self.total_connections / count
+ return TestParameters(msr, cnn, self.ss);
+
+ def getConnections(self):
+ return self.total_connections;
+
+class ProxDpiTester(Prox):
+ TENGIGABITBYTESPERSECOND = 1250000000
+
+ def __init__(self, ts, testParam, ID):
+ super(ProxDpiTester, self).__init__(ts)
+
+ self._sc = None
+ self._lastTot = None
+ self._prevTot = None;
+ self._prevBytesClient = None
+ self._lastBytesClient = None
+ self._prevBytesTxMeassurement = None
+ self._lastBytesTxMeassurement = None
+
+ self._setDefaultArguments();
+ self._setMsr(testParam.max_setup_rate)
+ self._setConnections(testParam.total_connections);
+ self._setSpeedScaling(testParam.ss);
+ self._setID(ID);
+
+ def _setDefaultArguments(self):
+ self.addArgument("-e")
+ self.addArgument("-t")
+ self.addArgument("-k")
+ self.addArgument("-d")
+ self.addArgument("-r 0.01");
+
+ def _setMsr(self, msr):
+ self.addArgument("-q max_setup_rate=" + str(msr))
+
+ def _setConnections(self, connections):
+ self.addArgument("-q connections=" + str(connections))
+
+ def _setID(self, ID):
+ self.addArgument("-q test_system_id=" + str(ID))
+
+ def _setSpeedScaling(self, ss):
+ self.addArgument("-q ss=" + str(ss))
+
+ def _querySetup2(self):
+ self._query_client_ports();
+ self._query_server_ports();
+ self._query_cores();
+
+ def _query_client_ports(self):
+ self._client_ports = []
+ for i in range(0, len(self._ports), 2):
+ self._client_ports.append(self._ports[i]);
+
+ def _query_server_ports(self):
+ self._server_ports = []
+ for i in range(1, len(self._ports), 2):
+ self._server_ports.append(self._ports[i]);
+
+ def _query_cores(self):
+ self._query_ld();
+ self._query_servers();
+ self._query_clients();
+
+ def _query_ld(self):
+ self._ld = self._get_core_list("$all_ld");
+
+ def _query_servers(self):
+ self._servers = self._get_core_list("$all_servers")
+
+ def _query_clients(self):
+ self._clients = self._get_core_list("$all_clients")
+
+ def _get_core_list(self, var):
+ ret = []
+ result = self._send("echo " + var)._recv();
+ for e in result.split(","):
+ ret += [e];
+ return ret;
+
+ def start_all_ld(self):
+ self._send("start $all_ld");
+
+ def start_all_workers(self):
+ self._send("start $all_workers");
+
+ def stop_all_ld(self):
+ self._send("stop $all_ld");
+
+ def stop_all_workers(self):
+ self._send("stop $all_workers");
+
+ def update_stats(self):
+ if (self._sc is None):
+ self._sc = StatsCmd(self)
+ self._sc.add(self._buildTotalConnectionsCmd())
+ self._sc.add(self._buildReTXCmd())
+ self._sc.add(self._buildIerrorsCmd())
+ self._sc.add(self._buildBytesPerPortCmd(self._client_ports, "rx"));
+
+ self._sc.sendRecv()
+
+ self._updateTotalConnections(self._sc.getResult(0))
+ self._updateReTX(self._sc.getResult(1))
+ self._updateIerrors(self._sc.getResult(2))
+ self._update_rates_client_ports(self._sc.getResult(3));
+
+ def _buildTotalConnectionsCmd(self):
+ cmd = "l4gen(%s).tsc" % str(self._clients[0])
+
+ for core in self._clients:
+ if (len(cmd) > 0):
+ cmd += ","
+ cmd += "l4gen(%s).created,l4gen(%s).finished" % (str(core), str(core))
+ return cmd;
+
+ def _updateTotalConnections(self, rep):
+ instant = Decimal(int(rep[0]) - self._beg)/self._hz
+ rep = rep[1:]
+ tot = 0;
+ for i in range(0,len(rep), 2):
+ tot += int(rep[i]) - int(rep[i + 1]);
+
+ prev = self._lastTot;
+ last = TimeSeriesPoint(tot, instant);
+
+ if (prev == None):
+ prev = last;
+
+ self._prevTot = prev
+ self._lastTot = last;
+
+ def _buildReTXCmd(self):
+ cmd = ""
+ for core in self._clients + self._servers:
+ if (len(cmd) > 0):
+ cmd += ","
+ cmd += "l4gen(%s).retx" % str(core)
+ return cmd;
+
+ def _updateReTX(self, rep):
+ retx = 0;
+ for i in rep:
+ retx += int(i);
+ self._retx = retx;
+
+ def _updateIerrors(self, rep):
+ self._ierrors = self._parseIerrorsReply(rep)
+
+ def get_total_connections(self):
+ return self._lastTot.getValue()
+
+ def getCurrentSetupRate(self):
+ return int(self._lastTot.getRateOfChange(self._prevTot));
+
+ def get_total_retx(self):
+ return self._retx
+
+ def get_rates_client_ports(self):
+ return self._calcLinkUtilization(self._prevBytesClient, self._lastBytesClient);
+
+ def getIerrorsCached(self):
+ return self._ierrors;
+
+ def _update_rates_client_ports(self, rep):
+ prevBytes = self._lastBytesClient
+ lastBytes = self._parseTimeSeries(rep);
+
+ if (prevBytes == None):
+ prevBytes = lastBytes;
+
+ self._prevBytesClient = prevBytes;
+ self._lastBytesClient = lastBytes;
+
+ def _getBytesPerPort(self, ports, rxOrTx):
+ sc = StatsCmd(self);
+ sc.add(self._buildBytesPerPortCmd(ports, rxOrTx))
+ sc.sendRecv();
+
+ rep = sc.getResult(0);
+
+ return self._parseTimeSeries(rep);
+
+ def _buildBytesPerPortCmd(self, ports, rxOrTx):
+ cmd = ""
+ for port in ports:
+ if (len(cmd) > 0):
+ cmd += ","
+ cmd += "port(%s).%s.bytes,port(%s).tsc" % (str(port), rxOrTx, str(port));
+ return cmd
+
+ def tx_rate_meassurement(self):
+ prev = self._lastBytesTxMeassurement
+ last = self._getBytesPerPort(self._server_ports, "tx");
+
+ if (prev == None):
+ prev = last;
+
+ self._prevBytesTxMeassurement = prev
+ self._lastBytesTxMeassurement = last
+
+ return self._calcLinkUtilization(prev, last);
+
+ def _parseTimeSeries(self, rep):
+ ret = []
+ for i in range(0, len(rep), 2):
+ val = int(rep[0])
+ instant = Decimal(int(rep[1]) - self._beg)/self._hz
+ ret.append(TimeSeriesPoint(val, instant));
+ return ret;
+
+ def _calcLinkUtilization(self, prev, last):
+ ret = []
+ for i in range(0, len(prev)):
+ bytesPerSecond = last[i].getRateOfChange(prev[i]);
+ linkFraction = Decimal(bytesPerSecond)/self.TENGIGABITBYTESPERSECOND
+ ret.append(round(linkFraction,2));
+ return ret;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py
new file mode 100644
index 00000000..27c470c4
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py
@@ -0,0 +1,34 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from decimal import *
+from prox import *
+
+class ProxMaxSSProbe(Prox):
+ def __init__(self, ts):
+ super(ProxMaxSSProbe, self).__init__(ts)
+
+ def getMaxSS(self):
+ self.addArgument("-q max_ss_and_quit=true");
+ self.addArgument("-q test_system_id=0");
+ self.startFork();
+ ret = self.startJoinNoConnect();
+ last_occur = ret["out"].rfind("\n") + 1;
+ last_line = ret["out"][last_occur:];
+
+ return Decimal(last_line.split("=")[1])
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py
new file mode 100644
index 00000000..fd4cc737
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py
@@ -0,0 +1,54 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import socket
+
+class ProxSocket:
+ def __init__(self, ip):
+ self._ip = ip;
+ self._dat = ""
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self._ip, 8474))
+ except:
+ raise Exception("Failed to connect to prox on " + self._ip)
+ self._sock = sock;
+
+ def send(self, msg):
+ self._sock.sendall(msg + "\n");
+ return self
+
+ def recv(self):
+ ret_str = "";
+ done = 0;
+ while done == 0:
+ if (len(self._dat) == 0):
+ self._dat = self._sock.recv(256);
+ if (self._dat == ''):
+ return '';
+
+ while(len(self._dat)):
+ if (self._dat[0] == '\n'):
+ done = 1
+ self._dat = self._dat[1:]
+ break;
+ else:
+ ret_str += self._dat[0];
+ self._dat = self._dat[1:]
+ return ret_str;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py b/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py
new file mode 100644
index 00000000..41d8ad53
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py
@@ -0,0 +1,69 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+from decimal import *
+
+def usage(progName):
+ print "usage: " + progName + " config [up|down]"
+ print " The script reads a lua configuration "
+ print " and outputs a histogram wit 21 buckets."
+ print " The first 20 buckets contain 70th percentile."
+ print " The last bucket contains the remaining items."
+ exit(-1);
+
+if (len(sys.argv) != 3):
+ usage(sys.argv[0])
+
+if (sys.argv[2] == "down"):
+ match = "dn_bps"
+elif (sys.argv[2] == "up"):
+ match = "up_bps"
+else:
+ usage(sys.argv[0])
+
+values = []
+for line in open(sys.argv[1]).readlines():
+ line = line.strip();
+
+ if line.find(match) != -1:
+ v = line.split(" = ")[1].strip(",")
+ values.append(Decimal(v));
+
+values = sorted(values)
+
+treshold = values[int(len(values)*0.7)]
+
+buckets = [0]*21;
+
+for v in values:
+ if (v > treshold):
+ buckets[20] += 1
+ else:
+ buckets[int(v * 20 / treshold)] += 1
+
+stepSize = treshold / 20;
+
+print "# bucket range, count"
+for i in range(len(buckets) - 1):
+ beg = str(int(i * stepSize))
+ end = str(int((i + 1) * stepSize - 1))
+ print beg + "-" + end + "," + str(buckets[i])
+
+i = len(buckets) - 1
+print beg + "+," + str(buckets[i])
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py b/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py
new file mode 100644
index 00000000..adbb288c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py
@@ -0,0 +1,58 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import os
+import time
+import socket
+
+def ssh(user, ip, cmd):
+ # print cmd;
+ ssh_options = ""
+ ssh_options += "-o StrictHostKeyChecking=no "
+ ssh_options += "-o UserKnownHostsFile=/dev/null "
+ ssh_options += "-o LogLevel=quiet "
+ running = os.popen("ssh " + ssh_options + " " + user + "@" + ip + " \"" + cmd + "\"");
+ ret = {};
+ ret['out'] = running.read().strip();
+ ret['ret'] = running.close();
+ if (ret['ret'] == None):
+ ret['ret'] = 0;
+
+ return ret;
+
+def ssh_check_quit(obj, user, ip, cmd):
+ ret = ssh(user, ip, cmd);
+ if (ret['ret'] != 0):
+ obj._err = True;
+ obj._err_str = ret['out'];
+ exit(-1);
+
+class remoteSystem:
+ def __init__(self, user, ip):
+ self._ip = ip;
+ self._user = user;
+
+ def run(self, cmd):
+ return ssh(self._user, self._ip, cmd);
+
+ def scp(self, src, dst):
+ running = os.popen("scp " + self._user + "@" + self._ip + ":" + src + " " + dst);
+ return running.close();
+
+ def getIP(self):
+ return self._ip
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py b/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py
new file mode 100644
index 00000000..ad196035
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py
@@ -0,0 +1,210 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from sutstatsconsfile import *
+from tsstatsconsfile import *
+from csvwriter import *
+
+class TestResult:
+ class Times:
+ def __init__(self):
+ self.serie = []
+ def addTime(self, val):
+ self.serie.append(val)
+ def getTime(self, i):
+ return self.serie[i]
+
+ def __init__(self, testSystemCount):
+ self.rates = None;
+ self.tsStatsDump = [];
+ self.tsTimes = [];
+ for i in range(testSystemCount):
+ self.tsStatsDump.append("");
+ self.tsTimes.append(TestResult.Times());
+
+ self.sutStatsDump = None;
+ self.sutTime = TestResult.Times();
+
+ def getTSCount(self):
+ return len(self.tsTimes)
+
+ def setTSStatsDump(self, filePaths):
+ self.tsStatsDump = filePaths;
+
+ def setSUTStatsDump(self, filePath):
+ self.sutStatsDump = filePath;
+
+ def getTSStatsDump(self):
+ return self.tsStatsDump;
+
+ def getSUTStatsDump(self):
+ return self.sutStatsDump;
+
+ def addTimeTS(self, times):
+ for i in range(len(times)):
+ self.tsTimes[i].addTime(times[i])
+
+ def addTimeSUT(self, time):
+ self.sutTime.addTime(time);
+
+
+class ResultProcessor:
+ def __init__(self, testResult):
+ self._testResults = testResult;
+
+ def process(self):
+ self._readStatsConsLogs();
+ self._mergeTsStats();
+ self._calcSetupRate();
+
+ def percentHandled(self):
+ converged_tsc = self._testResults.sutTime.getTime(1) - self._testResults.sutTime.getTime(0)
+ end_tsc = self._testResults.sutTime.getTime(2) - self._testResults.sutTime.getTime(0)
+
+ converged = converged_tsc/Decimal(self._sutHz)
+ end = end_tsc/Decimal(self._sutHz);
+
+ rx_converged = -1
+ tx_converged = -1
+ rx_end = -1
+ tx_end = -1
+
+ for entry in self._sutStats:
+ timeStamp = entry[3]
+ if (rx_converged == -1):
+ if (timeStamp > converged):
+ rx_converged = entry[0]
+ tx_converged = entry[1] - entry[2]
+ else:
+ continue;
+ else:
+ if (timeStamp > end):
+ rx_end = entry[0]
+ tx_end = entry[1] - entry[2]
+ break;
+ return (tx_end - tx_converged)/Decimal(rx_end - rx_converged)
+
+ def toFile(self, fileName):
+ outFile = CsvWriter();
+
+ outFile.open(fileName)
+
+ for entry in self._sutStats:
+ timeStamp = round(entry[3], 3);
+ rx = entry[0]
+ tx = entry[1]
+ drop = entry[2]
+
+ outFile.write([timeStamp, rx, tx, drop, "", ""])
+
+ for entry in self._tsStats:
+ timeStamp = round(entry[-1], 3);
+ connections = entry[0]
+ setupRate = entry[3]
+ outFile.write([timeStamp,"","","", connections, setupRate]);
+ outFile.close();
+
+ def _readStatsConsLogs(self):
+ print "Reading SUT stats"
+ self._sutStats = self._readSutStats();
+ print "Reading TS stats"
+ self._tsAllStats = self._readAllTSStats();
+
+ def _mergeTsStats(self):
+ # The first test system is the reference system. The totals
+ # will be accumulated by repeatedly taking the closest
+ # available data from other systems
+ ret = []
+ for entry in self._tsAllStats[0]:
+ ret.append(entry)
+
+ interSampleTime = ret[1][-1] - ret[0][-1];
+
+ mergedSampleCount = 0;
+ if (len(self._tsAllStats) == 1):
+ mergedSampleCount = len(ret)
+
+ for i in range(0, len(self._tsAllStats) - 1):
+ prev = 0;
+ for entry in ret:
+ timeStamp = entry[-1]
+ found = False;
+
+ for idx in range(prev, len(self._tsAllStats[i])):
+ diff = abs(self._tsAllStats[i][idx][-1] - timeStamp)
+ if (diff < interSampleTime):
+ found = True;
+ prev = idx;
+ break;
+
+ if (found):
+ entry[0] += self._tsAllStats[i][prev][0]
+ entry[1] += self._tsAllStats[i][prev][1]
+ mergedSampleCount += 1;
+ else:
+ break;
+
+ self._tsStats = ret[0: mergedSampleCount];
+
+ def _calcSetupRate(self):
+ for i in range(0, len(self._tsStats)):
+ prevCreated = 0
+ prevTime = 0
+ if (i > 0):
+ prevCreated = self._tsStats[i - 1][1];
+ prevTime = self._tsStats[i - 1][-1];
+ curCreated = self._tsStats[i][1];
+ curTime = self._tsStats[i][-1];
+
+ setupRate = (curCreated - prevCreated)/(curTime - prevTime)
+
+ self._tsStats[i].append(setupRate);
+
+
+ def _readSutStats(self):
+ ret = []
+ fileName = self._testResults.getSUTStatsDump();
+ beg = self._testResults.sutTime.getTime(0);
+ f = SutStatsConsFile(fileName, beg);
+ entry = f.readNext();
+ self._sutHz = f.getHz();
+ while (entry is not None):
+ ret.append(entry);
+ entry = f.readNext();
+ f.close();
+ return ret;
+
+ def _readAllTSStats(self):
+ stats = []
+ for i in range(self._testResults.getTSCount()):
+ fileName = self._testResults.getTSStatsDump()[i]
+ beg = self._testResults.tsTimes[i].getTime(0)
+ tsStat = self._readTSStats(fileName, beg)
+ stats.append(tsStat);
+ return stats;
+
+ def _readTSStats(self, fileName, beg):
+ ret = []
+ f = TSStatsConsFile(fileName, beg)
+
+ entry = f.readNext()
+ while (entry is not None):
+ ret.append(entry);
+ entry = f.readNext();
+ f.close()
+ return ret;
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py
new file mode 100644
index 00000000..a25c1232
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py
@@ -0,0 +1,84 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import os
+import struct
+
+class StatsConsFile:
+ def __init__(self, file_name, tsc = None):
+ self._file = open(file_name, "rb");
+ try:
+ data = self._file.read(4*8);
+ dataUnpacked = struct.unpack("<qqqq", data);
+
+ self._hz = dataUnpacked[0]
+ if (tsc is None):
+ self._tsc = dataUnpacked[1]
+ else:
+ self._tsc = tsc;
+
+ self._entryCount = dataUnpacked[2]
+ fieldCount = dataUnpacked[3]
+
+ data = self._file.read(fieldCount);
+ fmt = "b" * fieldCount;
+
+ dataUnpacked = struct.unpack("<" + fmt, data);
+ self._entryFmt = "<";
+ self._entrySize = 0;
+
+ for e in dataUnpacked:
+ if (e == 4):
+ self._entryFmt += "i"
+ elif (e == 8):
+ self._entryFmt += "q"
+ else:
+ raise Exception("Unknown field format: " + str(e))
+ self._entrySize += e
+ except:
+ print "except"
+ self._file.close();
+
+ def setBeg(self, tsc):
+ self._tsc = tsc
+
+ def getBeg(self):
+ return self._tsc;
+
+ def getHz(self):
+ return self._hz
+
+ def readNext(self):
+ ret = []
+ for i in range(self._entryCount):
+ entry = self._readNextEntry()
+ if (entry == None):
+ return None;
+ ret.append(entry);
+ return ret;
+
+ def _readNextEntry(self):
+ try:
+ entry = self._file.read(self._entrySize);
+ entryUnpacked = struct.unpack(self._entryFmt, entry);
+ return list(entryUnpacked)
+ except:
+ return None;
+
+ def close(self):
+ self._file.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py
new file mode 100644
index 00000000..82bca9a8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py
@@ -0,0 +1,61 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from statsconsfile import *
+from decimal import *
+
+class SutStatsConsFile:
+ def __init__(self, fileName, offset):
+ self.offset = offset;
+ self.statsConsFile = StatsConsFile(fileName)
+
+ def readNext(self):
+ entry = self._readNextEntry();
+
+ if (entry is None):
+ return None;
+
+ while (entry is not None and entry[-1] <= 0):
+ entry = self._readNextEntry();
+ return entry;
+
+ def getHz(self):
+ return self.statsConsFile.getHz();
+
+ def _readNextEntry(self):
+ entry = self.statsConsFile.readNext();
+ if (entry is None):
+ return None;
+
+ rx = 0;
+ tx = 0;
+ drop = 0;
+ last_tsc = 0;
+
+ for i in range(0, len(entry), 2):
+ rx += entry[i][2]
+ tx += entry[i][3]
+ drop += entry[i][4]
+ last_tsc = entry[i][5]
+
+ last_tsc -= self.offset;
+ last_tsc = Decimal(last_tsc) / self.statsConsFile.getHz();
+ return [rx, tx, drop, last_tsc];
+
+ def close(self):
+ self.statsConsFile.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py b/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py
new file mode 100644
index 00000000..9e35576f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py
@@ -0,0 +1,73 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+class SystemConfig:
+ _user = None
+ _ip = None
+ _proxDir = None
+ _cfgFile = None
+ def __init__(self, user, ip, proxDir, configDir):
+ self._user = user;
+ self._ip = ip;
+ self._proxDir = proxDir;
+ self._cfgFile = configDir;
+ def __init__(self, text):
+ self._user = text.split("@")[0];
+ text = text.split("@")[1];
+ self._ip = text.split(":")[0];
+ self._proxDir = text.split(":")[1];
+ self._cfgFile = text.split(":")[2];
+
+ def getUser(self):
+ return self._user;
+
+ def getIP(self):
+ return self._ip;
+
+ def getProxDir(self):
+ return self._proxDir;
+
+ def getCfgFile(self):
+ return self._cfgFile;
+
+ @staticmethod
+ def checkSyntax(text):
+ split = text.split("@");
+ if (len(split) != 2):
+ return SystemConfig.getSyntaxError(text);
+ after = split[1].split(":");
+ if (len(after) != 3):
+ return SystemConfig.getSyntaxError(text);
+ return ""
+ def toString(self):
+ ret = "";
+ ret += " " + self._user + "@" + self._ip + "\n"
+ ret += " " + "prox dir: " + self._proxDir + "\n"
+ ret += " " + "cfg dir: " + self._cfgFile + "\n"
+ return ret;
+
+ @staticmethod
+ def getSyntaxError(text):
+ ret = "Invaild system syntax"
+ ret += ", got: " + str(text)
+ ret += ", expected: " + str(SystemConfig.expectedSyntax())
+ return ret;
+
+ @staticmethod
+ def expectedSyntax():
+ return "user@ip:proxDir:cfgFile"
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py b/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py
new file mode 100644
index 00000000..fe3dce72
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py
@@ -0,0 +1,176 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from proxdpitester import *
+
+class testerSet:
+ def __init__(self, test_systems, maxRate, testParam):
+ self._test_systems = [];
+ self._reason = ""
+ self._maxRate = maxRate
+
+ testParamPerSystem = testParam.getPerSystem(len(test_systems));
+
+ for i in range(len(test_systems)):
+ ts = test_systems[i];
+ to_add = ProxDpiTester(ts, testParamPerSystem, i);
+ self.add_test_system(to_add);
+
+ def getCount(self):
+ return len(self._test_systems);
+
+ def add_test_system(self, test_system):
+ self._test_systems.append(test_system);
+
+ def startFork(self):
+ print "Starting test systems:"
+ for ts in self._test_systems:
+ print "\t" + str(ts.getIP())
+ ts.startFork();
+
+ def startJoin(self):
+ for ts in self._test_systems:
+ elapsed = ts.startJoin();
+ if (elapsed == None):
+ print "Failed to start on " + str(ts.getIP())
+ else:
+ print "Started on " + str(ts.getIP())
+ sleep(1);
+
+ def startForkJoin(self):
+ self.startFork();
+ self.startJoin();
+
+ def update_stats(self):
+ for ts in self._test_systems:
+ ts.update_stats();
+
+ def wait_links_up(self):
+ for ts in self._test_systems:
+ ts.waitAllLinksUp();
+ sleep(1);
+
+ def start_cores(self):
+ for ts in self._test_systems:
+ ts.start_all_ld();
+ ts.waitCmdFinished();
+ for ts in self._test_systems:
+ ts.start_all_workers();
+ for ts in self._test_systems:
+ ts.waitCmdFinished();
+
+ def stop_cores(self):
+ for ts in self._test_systems:
+ ts.stop_all_workers();
+ ts.stop_all_ld();
+
+ for ts in self._test_systems:
+ ts.waitCmdFinished();
+
+ def getTsc(self):
+ ret = []
+ for ts in self._test_systems:
+ ret += [ts.getTsc()]
+ return ret;
+
+ def get_setup_rate(self):
+ total = 0;
+ for ts in self._test_systems:
+ total += ts.getCurrentSetupRate();
+ return total
+
+ def get_total_connections(self):
+ total = 0;
+ for ts in self._test_systems:
+ ts_tot_conn = ts.get_total_connections();
+ total += ts_tot_conn
+
+ return total;
+
+ def get_total_retx(self):
+ total = 0;
+ for ts in self._test_systems:
+ total += ts.get_total_retx();
+ return total;
+
+ def getIerrors(self):
+ total = 0;
+ for ts in self._test_systems:
+ total += ts.getIerrorsCached();
+ return total;
+
+ def get_rates(self):
+ rates = [];
+ for ts in self._test_systems:
+ rates += ts.get_rates_client_ports();
+ return rates;
+
+ def tx_rate_meassurement(self):
+ rates = []
+ for ts in self._test_systems:
+ rates += ts.tx_rate_meassurement();
+ return rates;
+
+ def scpStatsDump(self, dst):
+ ret = []
+ for i in range(len(self._test_systems)):
+ dstFileName = dst + str(i);
+ ret.append(dstFileName);
+ self._test_systems[i].scpStatsDump(dstFileName)
+ return ret;
+
+ def conditionsGood(self):
+ tot_retx = self.get_total_retx();
+ rates = self.get_rates();
+ ierrors = self.getIerrors();
+
+ if (tot_retx > 100):
+ self._reason = "Too many reTX (" + str(tot_retx) + ")"
+ return False;
+ if (ierrors > 0):
+ self._reason = "Too many ierrors (" + str(ierrors) + ")"
+ return False;
+ for i in range(0, len(rates)):
+ if (rates[i] > self._maxRate):
+ self._setReason(i, rates)
+ return False;
+ return True;
+
+ def _setReason(self, port, rates):
+ portStr = str(port);
+ rateStr = str(rates[port])
+ maxRateStr = str(self._maxRate);
+ allRatesStr = str(rates);
+
+ fmt = "Rate on port %s = %s > %s, rate on all = %s"
+ self._reason = fmt % (portStr, rateStr, maxRateStr, allRatesStr)
+
+ def getReason(self):
+ return self._reason;
+
+ def quitProx(self):
+ for ts in self._test_systems:
+ ts.quitProx();
+
+ def killProx(self):
+ for ts in self._test_systems:
+ ts.stop_all_workers();
+ for ts in self._test_systems:
+ ts.stop_all_ld();
+ for ts in self._test_systems:
+ ts.killProx();
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py b/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py
new file mode 100644
index 00000000..521a0893
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py
@@ -0,0 +1,39 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from decimal import *
+
+class TimeSeriesPoint:
+ def __init__(self, value, instant):
+ self._value = value;
+ self._instant = instant;
+
+ def getValue(self):
+ return self._value;
+
+ def getInstant(self):
+ return self._instant;
+
+ def getRateOfChange(self, other):
+ diff = self.getValue() - other.getValue();
+ t_diff = self.getInstant() - other.getInstant();
+
+ if (diff == 0 or abs(t_diff) <= 0.00001):
+ return Decimal(0)
+ else:
+ return Decimal(diff)/t_diff
diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py
new file mode 100644
index 00000000..10e48a68
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py
@@ -0,0 +1,60 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from statsconsfile import *
+from decimal import *
+
+class TSStatsConsFile:
+ def __init__(self, fileName, offset):
+ self.offset = offset;
+ self.statsConsFile = StatsConsFile(fileName)
+
+ def readNext(self):
+ entry = self._readNextEntry();
+ if (entry is None):
+ return None;
+
+ while (entry is not None and entry[-1] <= 0):
+ entry = self._readNextEntry();
+
+ return entry;
+
+ def _readNextEntry(self):
+ entry = self.statsConsFile.readNext();
+ if (entry is None):
+ return None;
+
+ rx = 0;
+ tx = 0;
+ active = 0;
+ created = 0;
+ last_tsc = 0;
+ for i in range(0, len(entry), 2):
+ active += entry[i][2]
+ created += entry[i][3]
+ rx += entry[i][4]
+ tx += entry[i][5]
+ last_tsc = entry[i][6]
+
+ last_tsc -= self.offset;
+ last_tsc = Decimal(last_tsc) / self.statsConsFile.getHz();
+
+ return [active, created, rx, tx, last_tsc];
+
+ def close(self):
+ self.statsConsFile.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl
new file mode 100755
index 00000000..8e42eeba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl
@@ -0,0 +1,271 @@
+#!/usr/bin/perl
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+use strict vars;
+use Getopt::Long;
+use Pod::Usage;
+use Net::Pcap;
+use Net::Frame::Layer;
+use Net::Frame::Layer::ETH qw(:consts);
+use Net::Frame::Layer::IPv6 qw(:consts);
+use Net::Frame::Layer::IPv4 qw(:consts);
+use Net::Frame::Layer::UDP;
+use Socket qw(AF_INET AF_INET6 inet_ntop inet_pton);
+
+use constant NUM_PACKETS => 30000;
+
+use constant ETHER_ADDR_LEN => 6;
+use constant ETHER_TYPE_LEN => 2;
+use constant ETHER_HDR_LEN => ( 2 * ETHER_ADDR_LEN ) + ETHER_TYPE_LEN;
+use constant ETHER_STATIC_MAC => "78acdddddddd";
+
+use constant UDP_HDR_LEN => 8;
+use constant UDP_STATIC_PORT => 0x6666;
+
+use constant IPv6_HOP_LIMIT => 4;
+use constant IPv6_STATIC_IP => "2222:2222:2222:2222:2222:2222:2222:2222";
+
+use constant IPv4_TIME_TO_LIVE => 32;
+use constant IPv4_STATIC_IP => "68.68.68.68";
+
+srand;
+
+my $type = 'tun';
+my $pkt_count = NUM_PACKETS;
+
+GetOptions(
+ 'inet' => sub { $type = 'inet'},
+ 'tun' => sub { $type = 'tun'},
+ 'count=i' => \$pkt_count,
+ 'in=s' => \(my $in = 'ip6_tun_bind.lua'),
+ 'out=s' => \(my $out = 'output.pcap'),
+ 'size=s' => \(my $size = 0)
+) or exit;
+
+my $pcap = pcap_open_dead( DLT_EN10MB, 65535 );
+my $dumper = pcap_dump_open($pcap, $out ) or die 'Could not create output file: ' . $out;
+
+if( $type eq 'inet' ) {
+ gen_inet_pcap( $in, $pkt_count );
+}
+if( $type eq 'tun' ) {
+ gen_tun_pcap( $in, $pkt_count );
+}
+
+pcap_close( $pcap );
+
+# Trim string
+sub trim {
+ my ( $str ) = @_;
+
+ $str =~ s/^\s+|\s+$//g;
+
+ return $str;
+}
+
+# Generate random port based on $port and $port_mask
+sub rand_port {
+ my ( $port, $port_mask ) = @_;
+
+ return ( $port | int( rand( 0xFFFF ) & $port_mask ) );
+}
+
+# Generate packet originating from CPE
+sub gen_tun_packet {
+ my ( $sz, $ether, $ipv6, $ipv4, $udp ) = @_;
+
+ my $hdr_ether = Net::Frame::Layer::ETH->new(
+ src => $ether->{'src'},
+ dst => $ether->{'dst'},
+ type => NF_ETH_TYPE_IPv6
+ )->pack;
+
+ my $hdr_ipv6 = Net::Frame::Layer::IPv6->new(
+ nextHeader => NF_IPv6_PROTOCOL_IPIP,
+ hopLimit => IPv6_HOP_LIMIT,
+ src => $ipv6->{'src'},
+ dst => $ipv6->{'dst'},
+ payloadLength => $sz + NF_IPv4_HDR_LEN + UDP_HDR_LEN
+ )->pack;
+
+ my $hdr_ipv4 = Net::Frame::Layer::IPv4->new(
+ length => $sz + UDP_HDR_LEN + NF_IPv4_HDR_LEN,
+ ttl => IPv4_TIME_TO_LIVE,
+ protocol => NF_IPv4_PROTOCOL_UDP,
+ src => $ipv4->{'src'},
+ dst => $ipv4->{'dst'}
+ )->pack;
+
+ my $hdr_udp = Net::Frame::Layer::UDP->new(
+ src => $udp->{'src'},
+ dst => $udp->{'dst'},
+ length => $sz + UDP_HDR_LEN
+ )->pack;
+
+ my $pkt = pack( "H*", "de" x $sz );
+ $pkt = $hdr_ether . $hdr_ipv6 . $hdr_ipv4 . $hdr_udp . $pkt;
+
+ my $pkt_size = length( $pkt );
+
+ my $hdr = {
+ tv_sec => 0,
+ tv_usec => 0,
+ len => $pkt_size,
+ caplen => $pkt_size
+ };
+
+ return ( $hdr, $pkt );
+}
+
+# Generate packet originating from the internet
+sub gen_inet_packet {
+ my ( $sz, $ether, $ipv4, $udp ) = @_;
+
+ my $hdr_ether = Net::Frame::Layer::ETH->new(
+ src => $ether->{'src'},
+ dst => $ether->{'dst'},
+ type => NF_ETH_TYPE_IPv4
+ )->pack;
+
+ my $hdr_ipv4 = Net::Frame::Layer::IPv4->new(
+ length => $sz + UDP_HDR_LEN + NF_IPv4_HDR_LEN,
+ ttl => IPv4_TIME_TO_LIVE,
+ protocol => NF_IPv4_PROTOCOL_UDP,
+ src => $ipv4->{'src'},
+ dst => $ipv4->{'dst'}
+ )->pack;
+
+ my $hdr_udp = Net::Frame::Layer::UDP->new(
+ src => $udp->{'src'},
+ dst => $udp->{'dst'},
+ length => $sz + UDP_HDR_LEN
+ )->pack;
+
+ my $pkt = pack( "H*", "de" x $sz );
+ $pkt = $hdr_ether . $hdr_ipv4 . $hdr_udp . $pkt;
+
+ my $pkt_size = length( $pkt );
+
+ my $hdr = {
+ tv_sec => 0,
+ tv_usec => 0,
+ len => $pkt_size,
+ caplen => $pkt_size
+ };
+
+ return ( $hdr, $pkt );
+}
+
+# Read bindings file
+sub read_bindings {
+ my ( $file ) = @_;
+
+ print "Reading bindings file...\n";
+
+ my @rows;
+
+ open my $fh, "<:encoding(utf8)", $file or die $file . ": $!";
+LINE: while ( my $line = <$fh> ) {
+ next if ($line =~ /^--.*/); # Skip comments
+
+ my ($ip6, $mac, $ip4, $port);
+ if ($line =~ /\s*\{.*\},\s*$/) { # Weak check for a data line...
+
+ $line =~ /ip6\s*=\s*ip6\("([^\)]*)"\)/ && do { $ip6 = trim($1); };
+ unless ( inet_pton( AF_INET6, $ip6 ) ) { print "ERROR - Invalid ipv6: $ip6\n"; next LINE; }
+
+ $line =~ /ip\s*=\s*ip\("([^\)]*)"\)/ && do { $ip4 = trim($1); };
+ unless ( inet_pton( AF_INET, $ip4 ) ) { print "ERROR - Invalid ipv4: $ip4\n"; next LINE; }
+
+ $line =~ /mac\s*=\s*mac\("([^\)]*)"\)/ && do { $mac = trim($1); };
+ unless ( $mac =~ /^([0-9a-f]{2}([:-]|$)){6}$/i ) { print "ERROR - Invalid mac: $mac\n"; next LINE; }
+
+ $line =~ /port\s*=\s*([0-9]*)/ && do { $port = trim($1); };
+ unless ( int($port) ) { print "ERROR - Invalid port number: $port\n"; next LINE; }
+
+ push @rows, {
+ ipv6 => $ip6,
+ mac => $mac,
+ ipv4 => $ip4,
+ port => $port
+ }
+ }
+ }
+ close $fh;
+
+ return @rows;
+}
+
+# Generate packets originating from CPE
+sub gen_tun_pcap {
+ my ( $binding_file, $pkt_count ) = @_;
+ my @bind = read_bindings($binding_file);
+ my $idx = 0;
+ my $row;
+ my $public_port = 0;
+
+ print "Generating $pkt_count Tunnel packets...\n";
+
+ my $max = @bind;
+ for( my $i=0; $i<$pkt_count; $i++ ) {
+
+ $idx = rand $max;
+ $row = @bind[$idx];
+
+ $public_port = rand_port( $row->{port}, 0x3f );
+
+ my ( $hdr, $pkt ) = gen_tun_packet(
+ $size,
+ { src => $row->{mac}, dst => ETHER_STATIC_MAC },
+ { src => $row->{ipv6}, dst => IPv6_STATIC_IP },
+ { src => $row->{ipv4}, dst => IPv4_STATIC_IP },
+ { src => $public_port, dst => UDP_STATIC_PORT }
+ );
+
+ pcap_dump( $dumper, $hdr, $pkt );
+ }
+}
+
+# Generate packets originating from the internet
+sub gen_inet_pcap {
+ my ( $binding_file, $pkt_count ) = @_;
+ my @bind = read_bindings($binding_file);
+ my $idx = 0;
+ my $row;
+ my $public_port = 0;
+
+ print "Generating $pkt_count Internet packets...\n";
+
+ my $max = @bind;
+ for( my $i=0; $i<$pkt_count; $i++ ) {
+
+ $idx = rand $max;
+ $row = @bind[$idx];
+
+ $public_port = rand_port( $row->{port}, 0x3f );
+
+ my ( $hdr, $pkt ) = gen_inet_packet(
+ $size,
+ { src => ETHER_STATIC_MAC, dst => $row->{mac} },
+ { src => IPv4_STATIC_IP, dst => $row->{ipv4} },
+ { src => UDP_STATIC_PORT, dst => $public_port }
+ );
+
+ pcap_dump( $dumper, $hdr, $pkt );
+ }
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl
new file mode 100755
index 00000000..02af5103
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl
@@ -0,0 +1,266 @@
+#!/usr/bin/perl
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+=head1 NAME
+
+ipv6_tun_bindings.pl
+
+=head1 SYNOPSIS
+
+ ipv6_tun_bindings.pl [-n <num_entries>] [-tun_ip <ipv6>] [-mac <next_hop_mac>]
+ [-pub_ip <ipv4>] [-port <begin>-<end>] [-set <num_ports>]
+ [-suffix <suffix>] [-test <num_entries>] [-sym|-nosym]
+ [-help]
+
+=head1 DESCRIPTION
+
+This script can be used to generate a binding table for the IPv6 Tunnel
+task implemented in PROX (ipv6_encap and ipv6_decap).
+The entries in this table bind a specific tunnel endpoint (lwB4 in lw4over6
+architecture) to a public IPv4 address and port set.
+The port set is actually derived from the port specified in the table
+and a port bitmask in the PROX task configuration ("lookup port mask").
+
+The ipv6_encap task uses the binding table to know where to tunnel IPv4
+traffic to. The ipv6_decap task uses the table to verify tunnel packets
+have a valid public IPv4 and port combination for the originating tunnel.
+
+The table uses the Lua syntax so it can be loaded into PROX. Example:
+return {
+ {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0000"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4608},
+ {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0001"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4672},
+ {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0002"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4736},
+ {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0003"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4800},
+}
+
+The script generates consecutive entries, starting from a given IP address
+and assigning ports within a given range, increasing the port number by a
+fixed amount which should correspond to the port lookup mask being used.
+
+UDF table: In addition to the binding table itself, the script can optionally
+generate accompanying UDF tables for generating test traffic matching the
+binding table. Such UDF tables can then be used in a traffic generation tool.
+
+=head1 OPTIONS
+
+=over 22
+
+=item -n <num_entries>
+
+How many entries in the binding table
+
+=item -tun_ip <ipv6>
+
+Starting tunnel endpoint IPv6 address (will be incremented)
+
+=item -mac <next_hop_mac>
+
+MAC address of the next hop to reach the tunnel endpoints
+
+=item -pub_ip <ipv4>
+
+Starting public IPv4 address
+
+=item -port <begin>-<end>
+
+Range of ports where to assign Port Sets
+
+=item -set <num_ports>
+
+Number of ports in set (should be a power of 2 because bitmasking is used
+in lwAFTR)
+
+=item -suffix <suffix>
+
+Filename suffix to use for the generated file(s)
+
+=item -test <num_entries>
+
+Number of random entries to put into test UDF table
+
+=item -sym
+
+Whether the same random entry from the table should be inserted into both
+traffic sides or if different entries should be used
+
+=item -help
+
+Shows the full script documentation.
+
+=back
+
+=head1 AUTHOR
+
+ Copyright(c) 2010-2017 Intel Corporation.
+ All rights reserved.
+
+=cut
+
+
+use strict vars;
+use Getopt::Long;
+use Pod::Usage;
+use Socket qw(AF_INET AF_INET6 inet_ntop inet_pton);
+
+sub parse_ip
+{
+ my ($str, $ip_ref, $family) = @_;
+
+ my $packed = inet_pton($family, $str);
+ return 0 if (!defined($packed));
+
+ if ($family == AF_INET6) {
+ #print unpack("H*", $packed). "\n";
+ my @w = unpack("NNNN", $packed);
+ my ($high, $low) = (($w[0] << 32) | $w[1], ($w[2] << 32) | $w[3]);
+ @$ip_ref = ($high, $low);
+ }
+ else {
+ $$ip_ref = unpack("N", $packed);
+ }
+ return 1;
+}
+
+sub ntop6
+{
+ my ($in) = @_;
+ my $packed = pack('NNNN', $in->[0] >> 32, $in->[0] & 0xffffffff,
+ $in->[1] >> 32, $in->[1] & 0xffffffff);
+ return inet_ntop(AF_INET6, $packed);
+}
+
+sub ntop6_expanded
+{
+ my ($in) = @_;
+ return sprintf('%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x',
+ ($in->[0] >> 48) & 0xffff, ($in->[0] >> 32) & 0xffff,
+ ($in->[0] >> 16) & 0xffff, ($in->[0] ) & 0xffff,
+ ($in->[1] >> 48) & 0xffff, ($in->[1] >> 32) & 0xffff,
+ ($in->[1] >> 16) & 0xffff, ($in->[1] ) & 0xffff);
+}
+
+my ($tun_ip_str, $pub_ip_str, $ports_str);
+
+GetOptions(
+ 'help' => sub () { Pod::Usage::pod2usage( -verbose => 2 ); exit; },
+ 'n=i' => \(my $num_B4s = 10),
+ 'tun_ip=s' => \(my $tun_ip_str = 'fe80:0000:0000:0000:0200:00ff:0000:0000'),
+ 'pub_ip=s' => \(my $pub_ip_str = '171.205.239.1'),
+ 'mac=s' => \(my $next_hop_mac = 'fe:80:00:00:00:00'),
+ 'port=s' => \(my $ports_str='4608-11968'),
+ 'set=n' => \(my $port_set_sz = 64),
+ 'suffix=s' => \(my $suffix = ''),
+ 'test=n' => \(my $num_test_lines = 200000),
+ 'sym!' => \(my $symmetric_traffic = TRUE),
+) or pod2usage(-verbose => 1) && exit;
+
+my @tun_ip;
+parse_ip($tun_ip_str, \@tun_ip, AF_INET6) or print("Invalid starting tunnel IP: $tun_ip_str\n") && pod2usage(-verbose => 1) && exit;
+parse_ip($pub_ip_str, \(my $pub_ip), AF_INET) or print("Invalid starting public IP: $pub_ip_str\n") && pod2usage(-verbose => 1) && exit;
+my @port_range;
+if ($ports_str =~ /^([^d]+)\s*\-\s*([^d]+)$/) {
+ @port_range = ($1, $2);
+}
+else { print "Invalid port range: $ports_str\n"; pod2usage(-verbose => 1); exit }
+
+# Summary of input data
+print "File suffix: $suffix\n" if ($suffix);
+print "Starting Tunnel IP: " . ntop6(\@tun_ip) . "\n";
+print "Starting Public IP: ".inet_ntop(AF_INET, pack("N", $pub_ip)) . "\n";
+print "Public Port Range: $port_range[0]-$port_range[1] by blocks of $port_set_sz\n";
+
+my @data; # Holds generated binding table, so we can later generate test traffic for it
+
+# Binding table for PROX IPv6 Tunnel
+my $filename = 'ip6_tun_bind'.$suffix.'.lua';
+print "\nGenerating binding table with $num_B4s entries into $filename ... ";
+open(my $fh, '>', $filename) or die "Could not open file '$filename' $!";
+print $fh "-- Bindings for lwaftr: lwB4 IPv6 address, next hop MAC address\n";
+print $fh "-- towards lwB4, IPv4 Public address, IPv4 Public Port Set\n";
+print $fh "\n";
+print $fh "return {" . "\n";
+my $port = $port_range[0];
+for (my $B4_id = 0; $B4_id < $num_B4s; $B4_id++) {
+ $data[$B4_id]{'b4_ipv6'} = ntop6_expanded(\@tun_ip);
+ $data[$B4_id]{'pub_ipv4'} = "" . (($pub_ip >> 24) & 0xff) . "." . (($pub_ip >> 16) & 0xff) . "." . (($pub_ip >> 8) & 0xff) . "." . ($pub_ip & 0xff);
+ $data[$B4_id]{'pub_port'} = $port;
+ $data[$B4_id]{'next_hop_mac'} = $next_hop_mac;
+
+ print $fh " {";
+ print $fh "ip6 = ip6(\"" . $data[$B4_id]{'b4_ipv6'} . "\")";
+ print $fh ", mac = mac(\"" . $data[$B4_id]{'next_hop_mac'} . "\")";
+ print $fh ", ip = ip(\"" . $data[$B4_id]{'pub_ipv4'} . "\")";
+ print $fh ", port = " . $data[$B4_id]{'pub_port'};
+ print $fh "},\n";
+
+ $port += $port_set_sz;
+ if ($port > $port_range[1]) {
+ $pub_ip++;
+ $port = $port_range[0];
+ }
+
+ # Move to next Tunnel address
+ if (@tun_ip[1] < 0xffffffffffffffff) {
+ @tun_ip[1]++;
+ } else {
+ @tun_ip[0]++;
+ @tun_ip[1] = 0;
+ }
+}
+print $fh "}" . "\n";
+close $fh;
+print "[DONE]\n";
+
+# Test traffic "UDF Tables"
+if ($num_test_lines) {
+ print "Generating $num_test_lines lines of test UDF table into lwAFTR_tun|inet".$suffix.".csv ... ";
+
+ # Tunnel Packets from B4 to lwAFTR
+ my $filename = 'lwAFTR_tun' . $suffix . '.csv';
+ open(my $fh_tun, '>', $filename) or die "Could not open file '$filename' $!";
+ print $fh_tun "b4_ip,pub_ip,pub_port\n";
+ print $fh_tun "22,66,74\n"; # Offsets
+ print $fh_tun "16,4,2\n"; # Sizes
+ print $fh_tun "6,5,3\n"; # Format (IPv6, IPv4, Decimal)
+ print $fh_tun ",,\n";
+
+ # Internet Packets towards the lwAFTR, to be sent to corresp lwB4 over tunnel
+ my $filename = 'lwAFTR_inet' . $suffix . '.csv';
+ open(my $fh_inet, '>', $filename) or die "Could not open file '$filename' $!";
+ print $fh_inet "pub_ip,pub_port\n";
+ print $fh_inet "30,36\n"; # Offsets
+ print $fh_inet "4,2\n"; # Sizes
+ print $fh_inet "5,3\n"; # Format (IPv6, IPv4, Decimal)
+ print $fh_inet ",,\n";
+
+ for (my $i = 0; $i < $num_test_lines; $i++) {
+ my $B4_id = int(rand($num_B4s));
+ my $port = $data[$B4_id]{'pub_port'} + int(rand($port_set_sz));
+ printf $fh_tun $data[$B4_id]{'b4_ipv6'} . "," . $data[$B4_id]{'pub_ipv4'} . "," . $port . "\n";
+
+ if (! $symmetric_traffic) {
+ $B4_id = int(rand($num_B4s));
+ $port = $data[$B4_id]{'pub_port'} + int(rand($port_set_sz));
+ }
+ printf $fh_inet $data[$B4_id]{'pub_ipv4'} . "," . $port . "\n";
+ }
+
+ close $fh_tun;
+ close $fh_inet;
+ print "[DONE]\n";
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
new file mode 100644
index 00000000..49d819d8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+rapid (Rapid Automated Performance Indication for Dataplane)
+************************************************************
+
+rapid is a set of files offering an easy way to do a sanity check of the
+dataplane performance of an OpenStack environment.
+
+Copy the files in a directory on a machine that can run the OpenStack CLI
+commands and that can reach the OpenStack public network. Also create a qcow2
+image in the same directory with the following characteristics:
+* Name of the qcow2 file should be: rapidVM.qcow2
+ This default name can be changed on the rapid command line
+* Should have DPDK and PROX installed. PROX should be in /root/prox/ directory
+* Image should have cloud-init installed
+
+Source the openrc file of the OpenStack environment so that the OpenStack CLI
+commands can be run:
+ # source openrc
+Now you can run the rapid.py file. Use help for more info on the usage:
+ # ./rapid.py --help
+
+rapid will use the OpenStack CLI to create the flavor, key-pair, network, image,
+stack, ...
+Then it will connect to the 2 VMs that have been instantiated and it will launch
+PROX in both VMs.
+Once that is done it will connect to the PROX tcp socket and start sending
+commands to run the actual test.
+It will print test results on the screen while running.
+The PROX instance in the Generator VM will generate packets which will arrive in
+the PROX instance running on the SUT (System Under Test) VM. The SUT will then
+send the packets back to the generator by swapping source and destination.
+
+Notes about prox_gen_user_data.sh and prox_sut_user_data.sh scripts:
+- These scripts contain commands that will be executed using cloud-init at
+ startup of the VMs. They contain a hard-coded PCI address for the DPDK
+ interface that will be used by PROX. You might want to check that this is
+ actually the right PCI address.
+- These scripts also assume some specific DPDK directory and tools which might
+ change over different DPDK release. They have been tested with DPDK-17.02.
+- These scripts are also assuming that this interface is on the "dpdk-network"
+ network managed by OpenStack.
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg
new file mode 100644
index 00000000..522eb801
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg
@@ -0,0 +1,64 @@
+;;
+;; Copyright (c) 2010-2017 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+[lua]
+dofile("parameters.lua")
+
+[port 0]
+name=p0
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=4K
+
+[global]
+name=Basic Gen
+
+[core 0]
+mode=master
+
+[core 1]
+name=p0
+task=0
+mode=gen
+sub mode=l3
+rx ring=yes
+tx port=p0
+bps=1250000000
+pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d ${gen_hex_ip} ${sut_hex_ip} 0b b8 0b b9 00 08 55 7b
+gateway ipv4=${sut_ip}
+local ipv4=${gen_ip}
+min bulk size=$mbs
+;random=XXXXXXXXXXXXXXXX
+;random=0000000000XXXXXX ; 64 possibilities
+;rand_offset=34 ; SOURCE UDP PORT
+;random=XXXXXXXXXXXXXXXX
+;random=000000000XXXXXXX ; 128
+;rand_offset=36 ; DESTINTAITON UDP PORT
+
+[core 2]
+task=0
+mode=arp
+rx port=p0,p0,p0,p0
+tx port=p0
+tx cores=1t0
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
new file mode 100644
index 00000000..b384e9f0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
@@ -0,0 +1,218 @@
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from __future__ import print_function
+
+import os
+import subprocess
+import socket
+
+class prox_ctrl(object):
+ def __init__(self, ip, key=None, user=None):
+ self._ip = ip
+ self._key = key
+ self._user = user
+ self._children = []
+ self._proxsock = []
+
+ def ip(self):
+ return self._ip
+
+ def connect(self):
+ """Simply try to run 'true' over ssh on remote system.
+ On failure, raise RuntimeWarning exception when possibly worth
+ retrying, and raise RuntimeError exception otherwise.
+ """
+ return self.run_cmd('true', True)
+
+ def close(self):
+ """Must be called before program termination."""
+ for prox in self._proxsock:
+ prox.quit()
+ children = len(self._children)
+ if children == 0:
+ return
+ if children > 1:
+ print('Waiting for %d child processes to complete ...' % children)
+ for child in self._children:
+ ret = os.waitpid(child[0], os.WNOHANG)
+ if ret[0] == 0:
+ print("Waiting for child process '%s' to complete ..." % child[1])
+ ret = os.waitpid(child[0], 0)
+ rc = ret[1]
+ if os.WIFEXITED(rc):
+ if os.WEXITSTATUS(rc) == 0:
+ print("Child process '%s' completed successfully" % child[1])
+ else:
+ print("Child process '%s' returned exit status %d" % (
+ child[1], os.WEXITSTATUS(rc)))
+ elif os.WIFSIGNALED(rc):
+ print("Child process '%s' exited on signal %d" % (
+ child[1], os.WTERMSIG(rc)))
+ else:
+ print("Wait status for child process '%s' is 0x%04x" % (
+ child[1], rc))
+
+ def run_cmd(self, command, _connect=False):
+ """Execute command over ssh on remote system.
+ Wait for remote command completion.
+ Return command output (combined stdout and stderr).
+ _connect argument is reserved for connect() method.
+ """
+ cmd = self._build_ssh(command)
+ try:
+ return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as ex:
+ if _connect and ex.returncode == 255:
+ raise RuntimeWarning(ex.output.strip())
+ raise RuntimeError('ssh returned exit status %d:\n%s'
+ % (ex.returncode, ex.output.strip()))
+
+ def fork_cmd(self, command, name=None):
+ """Execute command over ssh on remote system, in a child process.
+ Do not wait for remote command completion.
+ Return child process id.
+ """
+ if name is None:
+ name = command
+ cmd = self._build_ssh(command)
+ pid = os.fork()
+ if (pid != 0):
+ # In the parent process
+ self._children.append((pid, name))
+ return pid
+ # In the child process: use os._exit to terminate
+ try:
+ # Actually ignore output on success, but capture stderr on failure
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as ex:
+ raise RuntimeError("Child process '%s' failed:\n"
+ 'ssh returned exit status %d:\n%s'
+ % (name, ex.returncode, ex.output.strip()))
+ os._exit(0)
+
+ def prox_sock(self, port=8474):
+ """Connect to the PROX instance on remote system.
+ Return a prox_sock object on success, None on failure.
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self._ip, port))
+ prox = prox_sock(sock)
+ self._proxsock.append(prox)
+ return prox
+ except:
+ return None
+
+ def scp_put(self, src, dst):
+ """Copy src file from local system to dst on remote system."""
+ cmd = [ 'scp',
+ '-B',
+ '-oStrictHostKeyChecking=no',
+ '-oUserKnownHostsFile=/dev/null',
+ '-oLogLevel=ERROR' ]
+ if self._key is not None:
+ cmd.extend(['-i', self._key])
+ cmd.append(src)
+ remote = ''
+ if self._user is not None:
+ remote += self._user + '@'
+ remote += self._ip + ':' + dst
+ cmd.append(remote)
+ try:
+ # Actually ignore output on success, but capture stderr on failure
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as ex:
+ raise RuntimeError('scp returned exit status %d:\n%s'
+ % (ex.returncode, ex.output.strip()))
+
+ def _build_ssh(self, command):
+ cmd = [ 'ssh',
+ '-oBatchMode=yes',
+ '-oStrictHostKeyChecking=no',
+ '-oUserKnownHostsFile=/dev/null',
+ '-oLogLevel=ERROR' ]
+ if self._key is not None:
+ cmd.extend(['-i', self._key])
+ remote = ''
+ if self._user is not None:
+ remote += self._user + '@'
+ remote += self._ip
+ cmd.append(remote)
+ cmd.append(command)
+ return cmd
+
+class prox_sock(object):
+ def __init__(self, sock):
+ self._sock = sock
+ self._rcvd = b''
+
+ def quit(self):
+ if self._sock is not None:
+ self._send('quit')
+ self._sock.close()
+ self._sock = None
+
+ def start(self, cores):
+ self._send('start %s' % ','.join(map(str, cores)))
+
+ def stop(self, cores):
+ self._send('stop %s' % ','.join(map(str, cores)))
+
+ def speed(self, speed, cores, tasks=None):
+ if tasks is None:
+ tasks = [ 0 ] * len(cores)
+ elif len(tasks) != len(cores):
+ raise ValueError('cores and tasks must have the same len')
+ for (core, task) in zip(cores, tasks):
+ self._send('speed %s %s %s' % (core, task, speed))
+
+ def reset_stats(self):
+ self._send('reset stats')
+
+ def core_stats(self, cores, task=0):
+ rx = tx = drop = tsc = hz = 0
+ self._send('core stats %s %s' % (','.join(map(str, cores)), task))
+ for core in cores:
+ stats = self._recv().split(',')
+ rx += int(stats[0])
+ tx += int(stats[1])
+ drop += int(stats[2])
+ tsc = int(stats[3])
+ hz = int(stats[4])
+ return rx, tx, drop, tsc, hz
+
+ def set_random(self, cores, task, offset, mask, length):
+ self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length))
+
+ def _send(self, cmd):
+ """Append LF and send command to the PROX instance."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
+ self._sock.sendall(cmd.encode() + b'\n')
+
+ def _recv(self):
+ """Receive response from PROX instance, and return it with LF removed."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot receive anymore")
+ pos = self._rcvd.find(b'\n')
+ while pos == -1:
+ self._rcvd += self._sock.recv(256)
+ pos = self._rcvd.find(b'\n')
+ rsp = self._rcvd[:pos]
+ self._rcvd = self._rcvd[pos+1:]
+ return rsp.decode()
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh
new file mode 100644
index 00000000..e7f58a9f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+echo 128 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
+mount -t hugetlbfs nodev /mnt/huge
+modprobe uio
+insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+/root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio 00:04.0
+iptables -F
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh
new file mode 100644
index 00000000..e7f58a9f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+echo 128 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
+mount -t hugetlbfs nodev /mnt/huge
+modprobe uio
+insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+/root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio 00:04.0
+iptables -F
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py
new file mode 100755
index 00000000..1a0ea41c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import subprocess
+import getopt
+from prox_ctrl import prox_ctrl
+
+version="17.04.19"
+stack = "rapidTestEnv" #Default string for stack
+yaml = "rapid.yaml" #Default string for yaml file
+key = "prox" # This is also the default in the yaml file....
+flavor = "prox_flavor" # This is also the default in the yaml file....
+image = "rapidVM" # This is also the default in the yaml file....
+image_file = "rapidVM.qcow2"
+network = "dpdk-network" # This is also the default in the yaml file....
+subnet = "dpdk-subnet" #Hardcoded at this moment
+
+def usage():
+ print("usage: rapid [--version] [-v]")
+ print(" [--stack STACK_NAME]")
+ print(" [--yaml YAML_FILE]")
+ print(" [--key KEY_NAME]")
+ print(" [--flavor FLAVOR_NAME]")
+ print(" [--image IMAGE_NAME]")
+ print(" [--image_file IMAGE_FILE]")
+ print(" [--network NETWORK]")
+ print(" [-h] [--help]")
+ print("")
+ print("Command-line interface to RAPID")
+ print("")
+ print("optional arguments:")
+ print(" -v, --version Show program's version number and exit")
+ print(" --stack STACK_NAME Specify a name for the heat stack. Default is rapidTestEnv.")
+ print(" --yaml YAML_FILE Specify the yaml file to be used. Default is rapid.yaml.")
+ print(" --key KEY_NAME Specify the key to be used. Default is prox.")
+ print(" --flavor FLAVOR_NAME Specify the flavor to be used. Default is prox_flavor.")
+ print(" --image IMAGE_NAME Specify the image to be used. Default is rapidVM.")
+ print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is rapidVM.qcow2.")
+ print(" --network NETWORK Specify the network name to be used for the dataplane. Default is dpdk-network.")
+ print(" -h, --help Show help message and exit.")
+ print("")
+ print("To delete the rapid stack, type the following command")
+ print(" openstack stack delete --yes --wait DPTestEnv")
+ print("Note that rapidTestEnv is the default stack name. Replace with STACK_NAME if needed")
+
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "yaml=","stack=","key=","flavor=","image=","network="])
+except getopt.GetoptError as err:
+ print("===========================================")
+ print str(err)
+ print("===========================================")
+ usage()
+ sys.exit(2)
+if args:
+ usage()
+ sys.exit(2)
+for opt, arg in opts:
+ if opt in ("-h", "--help"):
+ usage()
+ sys.exit()
+ if opt in ("-v", "--version"):
+ print("Rapid Automated Performance Indication for Dataplane "+version)
+ sys.exit()
+ if opt in ("--stack"):
+ stack = arg
+ print ("Using '"+stack+"' as name for the stack")
+ elif opt in ("--yaml"):
+ yaml = arg
+ print ("Using stack: "+yaml)
+ elif opt in ("--key"):
+ key = arg
+ print ("Using key: "+key)
+ elif opt in ("--flavor"):
+ flavor = arg
+ print ("Using flavor: "+flavor)
+ elif opt in ("--image"):
+ image = arg
+ print ("Using image: "+image)
+ elif opt in ("--image_file"):
+ image_file = arg
+ print ("Using qcow2 file: "+image_file)
+ elif opt in ("--network"):
+ network = arg
+ print ("Using network: "+ network)
+
+print("Checking image: "+image)
+cmd = 'openstack image show '+image+' |grep "status " | tr -s " " | cut -d" " -f 4'
+ImageExist = subprocess.check_output(cmd , shell=True).strip()
+if ImageExist == 'active':
+ print("Image already available")
+else:
+ print('Creating image ...')
+ cmd = 'openstack image create --disk-format qcow2 --container-format bare --public --file ./'+image_file+ ' ' +image+' |grep "status " | tr -s " " | cut -d" " -f 4'
+ ImageExist = subprocess.check_output(cmd , shell=True).strip()
+ if ImageExist == 'active':
+ print('Image created and active')
+ cmd = 'openstack image set --property hw_vif_multiqueue_enabled="true" ' +image
+ subprocess.check_call(cmd , shell=True)
+ else :
+ raise Exception("Failed to create image")
+
+print("Checking key: "+key)
+cmd = 'openstack keypair show '+key+' |grep "name " | tr -s " " | cut -d" " -f 4'
+KeyExist = subprocess.check_output(cmd , shell=True).strip()
+if KeyExist == key:
+ print("Key already installed")
+else:
+ print('Creating key ...')
+ cmd = 'openstack keypair create '+ key + '>' +key+'.pem'
+ subprocess.check_call(cmd , shell=True)
+ cmd = 'chmod 600 ' +key+'.pem'
+ subprocess.check_call(cmd , shell=True)
+ cmd = 'openstack keypair show '+key+' |grep "name " | tr -s " " | cut -d" " -f 4'
+ KeyExist = subprocess.check_output(cmd , shell=True).strip()
+ if KeyExist == key:
+ print("Key created")
+ else :
+ raise Exception("Failed to create key: " + key)
+
+print("Checking flavor: "+flavor)
+cmd = 'openstack flavor show '+flavor+' |grep "name " | tr -s " " | cut -d" " -f 4'
+FlavorExist = subprocess.check_output(cmd , shell=True).strip()
+if FlavorExist == flavor:
+ print("Flavor already installed")
+else:
+ print('Creating flavor ...')
+ cmd = 'openstack flavor create '+flavor+' --ram 8192 --disk 80 --vcpus 4 |grep "name " | tr -s " " | cut -d" " -f 4'
+ FlavorExist = subprocess.check_output(cmd , shell=True).strip()
+ if FlavorExist == flavor:
+ cmd = 'openstack flavor set '+ flavor +' --property hw:mem_page_size="large" --property hw:cpu_policy="dedicated" --property hw:cpu_threads_policy="isolate"'
+ subprocess.check_call(cmd , shell=True)
+ print("Flavor created")
+ else :
+ raise Exception("Failed to create flavor: " + flavor)
+
+print("Checking network: "+network)
+cmd = 'openstack network show '+network+' |grep "status " | tr -s " " | cut -d" " -f 4'
+NetworkExist = subprocess.check_output(cmd , shell=True).strip()
+if NetworkExist == 'ACTIVE':
+ print("Network already active")
+else:
+ print('Creating network ...')
+ cmd = 'openstack network create '+network+' |grep "status " | tr -s " " | cut -d" " -f 4'
+ NetworkExist = subprocess.check_output(cmd , shell=True).strip()
+ if NetworkExist == 'ACTIVE':
+ print("Network created")
+ else :
+ raise Exception("Failed to create network: " + network)
+
+print("Checking subnet: "+subnet)
+cmd = 'neutron subnet-show '+ subnet+' |grep "name " | tr -s " " | cut -d" " -f 4'
+SubnetExist = subprocess.check_output(cmd , shell=True).strip()
+if SubnetExist == subnet:
+ print("Subnet already exists")
+else:
+ print('Creating subnet ...')
+ cmd = 'neutron subnet-create --name '+ subnet+ ' ' +network+' 10.10.10.0/24 |grep "name " | tr -s " " | cut -d" " -f 4'
+ SubnetExist = subprocess.check_output(cmd , shell=True).strip()
+ if SubnetExist == subnet:
+ print("Subnet created")
+ else :
+ raise Exception("Failed to create subnet: " + subnet)
+
+print("Checking Stack: "+stack)
+cmd = 'openstack stack show '+stack+' |grep "stack_status " | tr -s " " | cut -d" " -f 4'
+StackRunning = subprocess.check_output(cmd , shell=True).strip()
+if StackRunning == '':
+ print('Creating Stack ...')
+ cmd = 'openstack stack create -t '+ yaml + ' --parameter flavor="'+flavor +'" --parameter key="'+ key + '" --parameter image="'+image + '" --parameter dpdk_network="'+network+'" --wait '+stack +' |grep "stack_status " | tr -s " " | cut -d" " -f 4'
+ StackRunning = subprocess.check_output(cmd , shell=True).strip()
+if StackRunning != 'CREATE_COMPLETE':
+ raise Exception("Failed to create stack")
+
+print('Stack running')
+genName=stack+'-gen'
+sutName=stack+'-sut'
+cmd = 'nova list | grep '+ genName +' | tr -s " " | cut -d " " -f 4'
+genVMName = subprocess.check_output(cmd , shell=True).strip()
+print('Generator: '+ genVMName)
+cmd = 'nova list | grep '+ sutName +' | tr -s " " | cut -d " " -f 4'
+sutVMName = subprocess.check_output(cmd , shell=True).strip()
+print('SUT: '+ sutVMName)
+cmd='nova show ' + genVMName + ' | grep "dpdk-network" | tr -s " " | cut -d" " -f 5'
+genDPIP = subprocess.check_output(cmd , shell=True).strip()
+cmd='nova show ' + genVMName + ' | grep "admin_internal_net" | tr -s " " | cut -d" " -f 6'
+genAdminIP = subprocess.check_output(cmd , shell=True).strip()
+cmd='nova show ' + sutVMName + ' | grep "dpdk-network" | tr -s " " | cut -d" " -f 5'
+sutDPIP = subprocess.check_output(cmd , shell=True).strip()
+cmd='nova show ' + sutVMName + ' | grep "admin_internal_net" | tr -s " " | cut -d" " -f 6'
+sutAdminIP = subprocess.check_output(cmd , shell=True).strip()
+
+#========================================================================
+def connect_socket(client):
+ attempts = 1
+ print("Trying to connect to PROX (just launched) on %s, attempt: %d"
+ % (client.ip(), attempts))
+ sock = None
+ while True:
+ sock = client.prox_sock()
+ if sock is not None:
+ break
+ attempts += 1
+ if attempts > 20:
+ raise Exception("Failed to connect to PROX on %s after %d attempts"
+ % (client.ip(), attempts))
+ time.sleep(10)
+ print("Trying to connect to PROX (just launched) on %s, attempt: %d"
+ % (client.ip(), attempts))
+ print("Connected to PROX on %s" % client.ip())
+ return sock
+
+def connect_client(client):
+ attempts = 1
+ print ("Trying to connect to VM which was just launched on %s, attempt: %d"
+ % (client.ip(), attempts))
+ while True:
+ try:
+ client.connect()
+ break
+ except RuntimeWarning, ex:
+ attempts += 1
+ if attempts > 20:
+ raise Exception("Failed to connect to VM after %d attempts:\n%s"
+ % (attempts, ex))
+ time.sleep(15)
+ print ("Trying to connect to VM which was just launched on %s, attempt: %d"
+ % (client.ip(), attempts))
+ print("Connected to VM on %s" % client.ip())
+
+
+def run_testA():
+ global genclient
+ global sutclient
+ ip = genDPIP.split('.')
+ hexgenDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2)
+ ip = sutDPIP.split('.')
+ hexsutDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2)
+ with open("parameters.lua", "w") as f:
+ f.write('gen_hex_ip="'+hexgenDPIP+'"\n')
+ f.write('sut_hex_ip="'+hexsutDPIP+'"\n')
+ f.write('gen_ip="'+genDPIP+'"\n')
+ f.write('sut_ip="'+sutDPIP+'"\n')
+ f.close
+ genclient.scp_put('./gen.cfg', '/root/gen.cfg')
+ sutclient.scp_put('./sut.cfg', '/root/sut.cfg')
+ genclient.scp_put('./parameters.lua', '/root/parameters.lua')
+ sutclient.scp_put('./parameters.lua', '/root/parameters.lua')
+ print("Config files copied")
+ cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg'
+ genclient.fork_cmd(cmd, 'PROX GEN')
+ cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg'
+ sutclient.fork_cmd(cmd, 'PROX SUT')
+ gensock = connect_socket(genclient)
+ sutsock = connect_socket(sutclient)
+ new_speed = 100
+ attempts = 0
+ cores = [1,2]
+ gencores = [1]
+ gensock.reset_stats()
+ sutsock.reset_stats()
+ gensock.start([2])
+ print("+---------------------------------------------------------------------------------------------------------+")
+ print("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |")
+ print("+------+-----------------+----------------+----------------+----------------+----------------+------------+")
+ print("| Test | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen | Result |")
+ print("+------+-----------------+----------------+----------------+----------------+----------------+------------+")
+ while (new_speed > 0.1):
+ attempts += 1
+ # Start generating packets at requested speed (in % of a 10Gb/s link)
+ gensock.speed(new_speed, gencores)
+ gensock.start(gencores)
+ time.sleep(1)
+ # Get statistics now that the generation is stable and NO ARP messages any more
+ old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats([1])
+ old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(cores)
+ time.sleep(10)
+ # Get statistics after some execution time
+ new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(cores)
+ new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats([1])
+ time.sleep(1)
+ # Stop generating
+ gensock.stop(gencores)
+ drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
+ tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
+ tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
+ sut_rx = new_sut_rx - old_sut_rx
+ sut_tx = new_sut_tx - old_sut_tx
+ sut_tsc = new_sut_tsc - old_sut_tsc
+ if (tx == 0):
+ raise Exception("TX = 0")
+ drop_rate = round(((drop-rx) * 100.0)/(tx+drop-rx),1)
+ pps_req_tx = round((tx+drop-rx)*tsc_hz*1.0/(tsc*1000000),5)
+ pps_tx = round(tx*tsc_hz*1.0/(tsc*1000000),5)
+ pps_rx = round(rx*tsc_hz*1.0/(tsc*1000000),5)
+ pps_sut_tx = round(sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000),5)
+ if ((drop_rate) < 1):
+ # This will stop the test when number of dropped packets is below a certain percentage
+ print("+------+-----------------+----------------+----------------+----------------+----------------+------------+")
+ print('|{:>5}'.format(str(attempts))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | SUCCESS |")
+ print("+------+-----------------+----------------+----------------+----------------+----------------+------------+")
+ break
+ else:
+ print('|{:>5}'.format(str(attempts))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | FAILED |")
+ # Following calculates the ratio for the new speed to be applied
+ # On the Y axis, we will find the ratio, a number between 0 and 1
+ # On the x axis, we find the % of dropped packets, a number between 0 and 100
+ # 2 lines are drawn and we take the minumun of these lines to calculate the ratio
+ # One line goes through (0,y0) and (p,q)
+ # The second line goes through (p,q) and (100,y100)
+ y0=0.99
+ y100=0.1
+ p=15
+ q=.9
+ ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100))
+ new_speed = (int(new_speed*ratio*100)+0.5)/100
+ gensock.quit()
+ sutsock.quit()
+ time.sleep(2)
+ print("")
+
+def run_testB():
+ global genclient
+ global sutclient
+ ip = genDPIP.split('.')
+ hexgenDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2)
+ ip = sutDPIP.split('.')
+ hexsutDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2)
+ with open("parameters.lua", "w") as f:
+ f.write('gen_hex_ip="'+hexgenDPIP+'"\n')
+ f.write('sut_hex_ip="'+hexsutDPIP+'"\n')
+ f.write('gen_ip="'+genDPIP+'"\n')
+ f.write('sut_ip="'+sutDPIP+'"\n')
+ f.close
+ genclient.scp_put('./gen.cfg', '/root/gen.cfg')
+ sutclient.scp_put('./sut.cfg', '/root/sut.cfg')
+ genclient.scp_put('./parameters.lua', '/root/parameters.lua')
+ sutclient.scp_put('./parameters.lua', '/root/parameters.lua')
+ print("Config files copied")
+ cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg'
+ genclient.fork_cmd(cmd, 'PROX GEN')
+ cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg'
+ sutclient.fork_cmd(cmd, 'PROX SUT')
+ gensock = connect_socket(genclient)
+ sutsock = connect_socket(sutclient)
+ print("+----------------------------------------------------------------------------------------------+")
+ print("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |")
+ print("+--------+-----------------+----------------+----------------+----------------+----------------+")
+ print("| Flows | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen |")
+ print("+--------+-----------------+----------------+----------------+----------------+----------------+")
+ cores = [1,2]
+ gencores = [1]
+ gensock.start([2])
+ new_speed = 100
+ # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable.
+ flows={128:['0000000000000XXX','000000000000XXXX'],1024:['00000000000XXXXX','00000000000XXXXX'],8192:['0000000000XXXXXX','000000000XXXXXXX'],65535:['00000000XXXXXXXX','00000000XXXXXXXX'],524280:['0000000XXXXXXXXX','000000XXXXXXXXXX']}
+ for flow_number in sorted(flows.iterkeys()):
+ #new_speed = 100 Commented out: Not starting from 100% since we are trying more flows, so speed will not be higher than the speed achieved in previous loop
+ attempts = 0
+ gensock.reset_stats()
+ sutsock.reset_stats()
+ source_port,destination_port = flows[flow_number]
+ gensock.set_random(gencores,0,34,source_port,2)
+ gensock.set_random(gencores,0,36,destination_port,2)
+ while (new_speed > 0.1):
+ attempts += 1
+ # Start generating packets at requested speed (in % of a 10Gb/s link)
+ gensock.speed(new_speed, gencores)
+ gensock.start(gencores)
+ time.sleep(1)
+ # Get statistics now that the generation is stable and NO ARP messages any more
+ old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats([1])
+ old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(cores)
+ time.sleep(10)
+ # Get statistics after some execution time
+ new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(cores)
+ new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats([1])
+ time.sleep(1)
+ # Stop generating
+ gensock.stop(gencores)
+ drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
+ tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
+ tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
+ sut_rx = new_sut_rx - old_sut_rx
+ sut_tx = new_sut_tx - old_sut_tx
+ sut_tsc = new_sut_tsc - old_sut_tsc
+ if (tx == 0):
+ raise Exception("TX = 0")
+ drop_rate = round(((drop-rx) * 100.0)/(tx+drop-rx),1)
+ pps_req_tx = round((tx+drop-rx)*tsc_hz*1.0/(tsc*1000000),5)
+ pps_tx = round(tx*tsc_hz*1.0/(tsc*1000000),5)
+ pps_rx = round(rx*tsc_hz*1.0/(tsc*1000000),5)
+ pps_sut_tx = round(sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000),5)
+ if ((drop_rate) < 1):
+ # This will stop the test when number of dropped packets is below a certain percentage
+ print('|{:>7}'.format(str(flow_number))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |")
+ print("+--------+-----------------+----------------+----------------+----------------+----------------+")
+ break
+ # Following calculates the ratio for the new speed to be applied
+ # On the Y axis, we will find the ratio, a number between 0 and 1
+ # On the x axis, we find the % of dropped packets, a number between 0 and 100
+ # 2 lines are drawn and we take the minumun of these lines to calculate the ratio
+ # One line goes through (0,y0) and (p,q)
+ # The second line goes through (p,q) and (100,y100)
+ y0=0.99
+ y100=0.1
+ p=15
+ q=.9
+ ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100))
+ new_speed = (int(new_speed*ratio*100)+0.5)/100
+ gensock.quit()
+ sutsock.quit()
+ time.sleep(2)
+ print("")
+
+#========================================================================
+genclient = prox_ctrl(genAdminIP, key+'.pem')
+connect_client(genclient)
+sutclient = prox_ctrl(sutAdminIP, key+'.pem')
+connect_client(sutclient)
+#####################################################################################
+run_testA()
+run_testB()
+#####################################################################################
+genclient.close()
+sutclient.close()
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml
new file mode 100644
index 00000000..eab957f5
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml
@@ -0,0 +1,105 @@
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+heat_template_version: 2016-04-08
+description: RAPID stack (Rapid Automated Performance Indication for Dataplane)
+parameters:
+ image:
+ type: string
+ label: Image name or ID
+ description: Image to be used for compute instance
+ default: RapidVM
+ flavor:
+ type: string
+ label: Flavor
+ description: Type of instance (flavor) to be used
+ default: prox_flavor
+ key:
+ type: string
+ label: Key name
+ description: Name of key-pair to be used for compute instance
+ default: prox
+ dpdk_network:
+ type: string
+ label: Private network name or ID
+ description: Network to attach instance to.
+ default: dpdk-network
+ private_network:
+ type: string
+ label: Private network name or ID
+ description: Network to attach instance to.
+ default: admin_internal_net
+ availability_zone:
+ type: string
+ description: The Availability Zone to launch the instance.
+ default: nova
+
+resources:
+ sut:
+ type: OS::Nova::Server
+ properties:
+ availability_zone: { get_param: availability_zone }
+ user_data:
+ get_file: prox_sut_user_data.sh
+ key_name: { get_param: key }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+ - network: { get_param: private_network }
+ - network: { get_param: dpdk_network }
+ gen:
+ type: OS::Nova::Server
+ properties:
+ availability_zone: { get_param: availability_zone }
+ user_data:
+ get_file: prox_gen_user_data.sh
+ key_name: { get_param: key }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+ - network: { get_param: private_network }
+ - network: { get_param: dpdk_network }
+
+ sut_floating_ip:
+ type: OS::Nova::FloatingIP
+ properties:
+ pool: admin_floating_net
+
+ gen_floating_ip:
+ type: OS::Nova::FloatingIP
+ properties:
+ pool: admin_floating_net
+
+ sut_association:
+ type: OS::Nova::FloatingIPAssociation
+ properties:
+ floating_ip: { get_resource: sut_floating_ip }
+ server_id: { get_resource: sut }
+
+ gen_association:
+ type: OS::Nova::FloatingIPAssociation
+ properties:
+ floating_ip: { get_resource: gen_floating_ip }
+ server_id: { get_resource: gen }
+
+outputs:
+ sut_ip:
+ description: IP address of the instance
+ value: { get_attr: [sut, first_address] }
+ gen_ip:
+ description: IP address of the instance
+ value: { get_attr: [gen, first_address] }
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg
new file mode 100644
index 00000000..2937a749
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg
@@ -0,0 +1,51 @@
+;;
+;; Copyright (c) 2010-2017 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+[lua]
+dofile("parameters.lua")
+
+[port 0]
+name=if0
+mac=hardware
+
+[defaults]
+mempool size=2K
+
+[global]
+name=NOP forwarding
+
+[core 0]
+mode=master
+
+[core 1]
+name=swap
+task=0
+mode=arp
+sub mode=local
+rx port=if0
+tx port=if0
+tx cores=1t1
+local ipv4=${sut_ip}
+task=1
+mode=swap
+rx ring=yes
+tx port=if0
+drop=no
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/start_vm.py b/VNFs/DPPD-PROX/helper-scripts/start_vm.py
new file mode 100755
index 00000000..7af7df9c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/start_vm.py
@@ -0,0 +1,143 @@
+#!/bin/env python2.7
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from os import system
+from os import fork, _exit
+from subprocess import check_output
+import socket
+from time import sleep
+import json
+import sys
+
+# This script starts qemu with the CPU layout specified by the cores
+# array below. Each element in the array represents a core. To enable
+# hyper-threading (i.e. two logical cores per core), each element in
+# the array should be an array of length two. The values stored inside
+# the array define to which host cores the guest cores should be
+# affinitized. All arguments of this script are passed to qemu
+# directly. Porting an existing qemu command line setup to make use of
+# this script requires removing the -smp parameters and -qmp
+# parameters if those were used. These are built by the script based
+# on the cores array.
+
+# After successfully starting qemu, this script will connect through
+# QMP and affinitize all cores within the VM to match cores on the
+# host.
+
+execfile("./vm-cores.py")
+
+def build_mask(cores):
+ ret = 0;
+ for core in cores:
+ for thread in core:
+ ret += 1 << thread;
+ return ret;
+
+n_cores = len(cores);
+n_threads = len(cores[0]);
+
+mask = str(hex((build_mask(cores))))
+
+smp_str = str(n_cores*n_threads)
+smp_str += ",cores=" + str(n_cores)
+smp_str += ",sockets=1"
+smp_str += ",threads=" + str(n_threads)
+
+try:
+ qmp_sock = check_output(["mktemp", "--tmpdir", "qmp-sock-XXXX"]).strip()
+except:
+ qmp_sock = "/tmp/qmp-sock"
+
+qemu_cmdline = ""
+qemu_cmdline += "taskset " + mask + " qemu-system-x86_64 -smp " + smp_str
+qemu_cmdline += " -qmp unix:" + qmp_sock + ",server,nowait"
+qemu_cmdline += " -daemonize"
+
+for a in sys.argv[1:]:
+ qemu_cmdline += " " + a
+
+try:
+ pid = fork()
+except OSError, e:
+ sys.exit("Failed to fork: " + e.strerror)
+
+if (pid != 0):
+ # In the parent process
+ ret = system(qemu_cmdline)
+ if (ret != 0):
+ sys.exit("Failed to run QEMU: exit status " + str(ret) + ". Command line was:\n" + qemu_cmdline)
+ # Parent process done
+ sys.exit(0)
+
+# In the child process: use _exit to terminate
+retry = 0
+s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+while (retry < 10):
+ sleep(1);
+ try:
+ s.connect(qmp_sock)
+ print "Connected to QMP"
+ break;
+ except:
+ pass
+ retry = retry + 1
+ print "Failed to connect to QMP, attempt " + str(retry)
+if (retry >= 10):
+ print "Failed to connect to QMP"
+ _exit(1)
+
+# skip info about protocol
+dat = s.recv(100000)
+# need to run qmp_capabilities before next command works
+s.send("{\"execute\" : \"qmp_capabilities\" }")
+dat = s.recv(100000)
+# Get the PID for each guest core
+s.send("{\"execute\" : \"query-cpus\"}")
+dat = s.recv(100000)
+a = json.loads(dat)["return"];
+
+if (len(a) != n_cores*n_threads):
+ print "Configuration mismatch: " + str(len(a)) + " vCPU reported by QMP, instead of expected " + str(n_cores*n_threads)
+ _exit(1)
+print "QMP reported " + str(len(a)) + " vCPU, as expected"
+
+if (n_threads == 1):
+ idx = 0;
+ for core in a:
+ cm = str(hex(1 << cores[idx][0]))
+ pid = str(core["thread_id"])
+ system("taskset -p " + cm + " " + pid + " > /dev/null")
+ idx = idx + 1
+elif (n_threads == 2):
+ idx = 0;
+ prev = 0;
+ for core in a:
+ cm = str(hex(1 << cores[idx][prev]))
+ pid = str(core["thread_id"])
+ system("taskset -p " + cm + " " + pid + " > /dev/null")
+ prev = prev + 1;
+ if (prev == 2):
+ idx = idx + 1;
+ prev = 0
+else:
+ print "Not implemented yet: more than 2 threads per core"
+ _exit(1)
+
+print "Core affinitization completed"
+_exit(0)
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py
new file mode 100755
index 00000000..f26d0db6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py
@@ -0,0 +1,457 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import socket
+import sys
+import os
+from time import *
+from datetime import datetime
+from optparse import OptionParser
+import time
+from remote_system import *
+from math import log
+
+# General parameters
+accuracy = 0.1 # in percent of line rate
+max_dropped = 0.1 # in percent
+all_pkt_size = [64,128,256,512,1024,1280,1494]
+all_ip_src = [0,6,12,18]
+all_ip_dst = [0,6,12,18]
+
+# Stear parameters
+step_time = 0.001 # in seconds
+step_delta = 10 # in percent of line rate
+
+##### Use case 1: packet loss and latency #####
+low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps
+medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps
+normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100%
+low_steps = 0.1
+medium_steps = 1.0
+
+# Prox parameters
+tx_port0 = [4]
+tx_port1 = [6]
+tx_port2 = [8]
+tx_port3 = [10]
+tx_port4 = [12]
+tx_port5 = [14]
+tx_port6 = [16]
+tx_port7 = [18]
+tx_task = 0
+
+all_rx_cores = [20,22,24,26,28,30,32,34]
+rx_lat_cores = [20,22,24,26,28,30,32,34]
+rx_task = 0
+
+# Some variables, do not change
+
+# Program arguments
+parser = OptionParser()
+parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10)
+parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100)
+parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0)
+parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0)
+(options, args) = parser.parse_args()
+
+init_speed = int(options.init_speed)
+test_duration = int(options.test_duration)
+configure = int(options.configure)
+run = int(options.run)
+
+nb_cores_per_interface = len(tx_port0)
+max_speed = (100.0/nb_cores_per_interface)
+init_speed = (init_speed * 1.0/nb_cores_per_interface)
+accuracy = (accuracy * 1.0/nb_cores_per_interface)
+normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface)
+low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps = (medium_steps /nb_cores_per_interface)
+low_steps = (low_steps /nb_cores_per_interface)
+
+max_dropped = max_dropped / 100
+
+def to_str(arr):
+ ret = ""
+ first = 1;
+ for a in arr:
+ if (first == 0):
+ ret += ","
+
+ ret += str(a)
+ first = 0;
+ return ret;
+
+tx_cores_cpe = tx_port0 + tx_port1 + tx_port2 + tx_port3
+tx_cores_inet = tx_port4 + tx_port5 + tx_port6 + tx_port7
+tx_cores = tx_cores_cpe + tx_cores_inet
+
+def send_all_pkt_size(cores, pkt_size):
+ for c in cores:
+ sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n");
+
+def send_all_value(cores, offset, value, len):
+ for c in cores:
+ sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n");
+
+def send_all_random(cores, offset, rand_str, len):
+ for c in cores:
+ sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+ #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+
+def send_all_speed(cores, speed_perc):
+ for c in cores:
+ sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n");
+
+def send_reset_random():
+ sock.sendall("reset randoms all" + "\n");
+
+def send_reset_value():
+ sock.sendall("reset values all" + "\n");
+
+def rx_stats(tx_cores, tx_task, rx_cores, rx_task):
+ rx = tx = drop = tsc = tsc_hs = ierrors = 0
+ for e in tx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ for e in rx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ # Also get the ierrors as generators might be the bottleneck...
+ sock.sendall("tot ierrors tot\n")
+ recv = recv_once()
+ ierrors += int(recv.split(",")[0])
+ rx+=ierrors
+ return rx,tx,drop,tsc,tsc_hz
+
+def lat_stats(cores,task):
+ lat_min = [0 for e in range(127)]
+ lat_max = [0 for e in range(127)]
+ lat_avg = [0 for e in range(127)]
+ for e in cores:
+ sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n")
+ recv = recv_once()
+ lat_min[e] = int(recv.split(",")[0])
+ lat_max[e] = int(recv.split(",")[1])
+ lat_avg[e] = int(recv.split(",")[2])
+ return lat_min, lat_max, lat_avg
+
+def recv_once():
+ ret_str = "";
+ done = 0;
+ while done == 0:
+ dat = sock.recv(256);
+ i = 0;
+ while(i < len(dat)):
+ if (dat[i] == '\n'):
+ done = 1
+ else:
+ ret_str += dat[i];
+ i = i + 1;
+ return ret_str
+
+def set_pkt_sizes(tx_cores, p):
+ send_all_pkt_size(tx_cores, p-4)
+ # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size
+ send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2)
+ send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20)
+
+def set_pkt_sizes_cpe(tx_cores, p):
+ send_all_pkt_size(tx_cores, p-4)
+ # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size
+ send_all_value(tx_cores, 24, p - 26, 2) # 22 for QinQ (8) MAC (12) EthType (2)
+ send_all_value(tx_cores, 46, p - 46, 2) # 42 for QinQ (8) MAC (12) EthType (2) IP (20)
+
+def set_pkt_sizes_inet(tx_cores, p):
+ send_all_pkt_size(tx_cores, p+24-4)
+ # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size
+ send_all_value(tx_cores, 20, p + 2, 2) # 14 for MAC (12) EthType (2)
+ send_all_value(tx_cores, 48, p - 26, 2) # 14 for MAC (12) EthType (2)
+ send_all_value(tx_cores, 70, p - 46, 2) # 34 for MAC (12) EthType (2) IP (20)
+
+def run_measure_throughput(speed, speed_cpe):
+ done = 0
+ # Intialize tests by stopping cores and resetting stats
+ step=0
+ steps_done = 0
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(2)
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+ sock.sendall("reset stats\n")
+ print "Speed = " + str(speed * nb_cores_per_interface)
+ sleep(1);
+
+ send_all_speed(tx_cores, step);
+
+ # Now starting the steps. First go to the common speed, then increase steps for the faster one.
+ sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n")
+ while (steps_done == 0):
+ sleep(step_time)
+ if (step + step_delta <= speed):
+ step+=step_delta
+ else:
+ steps_done = 1;
+ send_all_speed(tx_cores, step)
+
+ # Steps are now OK. Set speed
+ send_all_speed(tx_cores_inet, speed);
+ send_all_speed(tx_cores_cpe, speed_cpe);
+ sleep(2);
+
+ # Getting statistics to calculate PPS at right speed....
+ rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ sleep(test_duration);
+
+ # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time...
+ rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task)
+ sock.sendall("stop " + to_str(tx_cores) + "\n")
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(3);
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+
+ rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ rx = rx_pps_end - rx_pps_beg
+ tsc = tsc_pps_end - tsc_pps_beg
+ mpps = rx / (tsc/float(tsc_hz)) / 1000000
+ tx = tx_pps_end - tx_pps_beg
+ tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000
+
+ #print "Runtime = " + str((tsc)/float(tsc_hz));
+ if (tx_end == 0):
+ dropped_tot = tx_end - rx_end
+ dropped_pct = 0
+ else:
+ dropped_tot = tx_end - rx_end
+ dropped_pct = ((dropped_tot) * 1.0) / tx_end
+
+ if (dropped_tot > 0):
+ if (dropped_pct >= max_dropped):
+ print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ if (dropped_tot < 0):
+ print "Something wrong happened - received more packets than transmitted"
+ else:
+ print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ print "MPPS = " + str(mpps)
+ print "===================================================="
+ return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg
+
+def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg):
+ f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_flows) + "; " )
+ for e in rx_lat_cores:
+ f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ")
+ f.write("\n");
+ f.flush()
+
+def run_dicho_search(number_flows, pkt_size):
+ previous_success_speed = 0.0
+ previous_error_speed = max_speed
+ speed = init_speed * 1.0
+ done = 0;
+ good_tx_mpps = 0
+ good_mpps = 0
+ good_dropped_pct = 0
+ good_dropped_tot = 0
+ good_speed = 0
+ good_lat_min = [0 for e in range(127)]
+ good_lat_max = [0 for e in range(127)]
+ good_lat_avg = [0 for e in range(127)]
+
+ while done == 0:
+ speed_cpe = (speed * (pkt_size + 20)) / (pkt_size + 24 + 20)
+ dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed, speed_cpe)
+ if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)):
+ good_tx_mpps = tx_mpps
+ good_mpps = mpps
+ good_dropped_pct = dropped_pct
+ good_dropped_tot = dropped_tot
+ good_speed = speed
+ good_lat_min = lat_min
+ good_lat_max = lat_max
+ good_lat_avg = lat_avg
+ write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg);
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg);
+ else:
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg);
+
+ if ((speed == max_speed) and (dropped_pct <= max_dropped)):
+ write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg);
+ done = 1
+ if (dropped_pct <= max_dropped):
+ previous_success_speed = speed
+ if (speed > max_speed - accuracy):
+ speed = max_speed
+ else:
+ if (previous_error_speed - speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_flows, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed + (previous_error_speed - speed)/2;
+ else:
+ previous_error_speed = speed
+ if (speed - previous_success_speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_flows, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed - (speed - previous_success_speed) / 2;
+
+
+def set_source_destination_ip(nb_sources, nb_destinations):
+ # Destination addressese: "00XXXXXX" "XXXXXXXX" "XXXXXXXX" "XXXXXX10"
+ # Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z
+ # Ending with 10 to avoid x.y.z.0 and x.y.z.255
+
+ dst_mask = "10"
+ for i in range (nb_destinations):
+ dst_mask = "X" + str(dst_mask)
+ for i in range (32 - nb_destinations - 2):
+ dst_mask = "0" + str(dst_mask)
+
+ src_mask = "10"
+ for i in range (nb_sources):
+ src_mask = "X" + str(src_mask)
+ for i in range (32 - nb_sources - 2):
+ src_mask = "0" + str(src_mask)
+
+ for c in tx_port0:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port1:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port2:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port3:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port4:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port5:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port6:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+ for c in tx_port7:
+ send_all_random([c], 26, src_mask, 4)
+ send_all_random([c], 30, dst_mask, 4)
+
+#========================================================================
+class TestDefinition():
+ "Stores test parameters"
+ def __init__(self, number_ip_src, number_ip_dst, pkt_size):
+ self.number_ip_src = number_ip_src
+ self.number_ip_dst = number_ip_dst
+ self.pkt_size = pkt_size
+
+#========================================================================
+def run_use_case(number_ip_src, number_ip_dst, pkt_size):
+ number_flows = (2 ** number_ip_src) * (2 ** number_ip_dst)
+# send_reset_random()
+# send_reset_value()
+# set_source_destination_ip(number_ip_src, number_ip_dst)
+ set_pkt_sizes_inet(tx_cores_inet, pkt_size)
+ set_pkt_sizes_cpe(tx_cores_cpe, pkt_size)
+ print "Running test with pkt size= " + str(pkt_size) + " number_ip_src = " + str(number_ip_src) + " number_ip_dst = " + str(number_ip_dst) + " Number flows = " + str(number_flows) + "; \n"
+ run_dicho_search(number_flows, pkt_size)
+ sleep(3)
+
+#========================================================================
+def run_all_use_cases():
+ use_case_nb = 1
+ # Connect to dppd
+ file_path = '/tmp/prox.sock'
+ sock.connect(file_path)
+
+ f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f.flush();
+ f_all.flush();
+ f_minimal.flush();
+
+ # Starting tests
+ print "Stopping all cores and resetting all values and randoms before starting\n"
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+ sock.sendall("stop " + to_str(tx_cores) + "\n")
+ #sock.sendall("stop all")
+ sock.sendall("reset stats\n")
+ sleep(3);
+ for line in file_tests:
+ info = line.split(';')
+ if (info[0][0] == '#'):
+ continue
+ if (info[0][0] == ''):
+ break
+ number_ip_src = int(info[0])
+ number_ip_dst = int(info[1])
+ pkt_size = int(info[2])
+ run_use_case(number_ip_src, number_ip_dst, pkt_size)
+
+#========================================================================
+def configure_use_case():
+ Tests = []
+ number_ip_dst = 0
+ number_ip_src = 0
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(number_ip_src, number_ip_dst, pkt_size))
+
+ pkt_size = 64
+ while (pkt_size < 1494):
+ Tests.append(TestDefinition(number_ip_src, number_ip_dst, pkt_size))
+ pkt_size = (pkt_size *11) / 10
+
+ file_tests = open('test_description.txt', 'w')
+ file_tests.write("# Number_ip_src; number_ip_dst; pkt_size; \n")
+ for test in Tests:
+ file_tests.write(str(test.number_ip_src) + "; " + str(test.number_ip_dst) + "; " + str(test.pkt_size) + "; " + ";\n")
+ file_tests.close()
+
+#========================================================================
+if ((configure == 0) and (run == 0)):
+ print "Nothing to do - please use -r 1 or -c 1"
+if (configure == 1):
+ configure_use_case()
+if (run == 1):
+ print "****************************************************************************************************************"
+ print "** Running Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **"
+ print "****************************************************************************************************************"
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ f_all = open('all_results.txt', 'w')
+ f = open('detailed_results.txt', 'w')
+ f_minimal = open('minimal_results.txt', 'w')
+ file_tests = open('test_description.txt', 'r')
+ run_all_use_cases()
+ f.close();
+ sock.close();
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py
new file mode 100755
index 00000000..f4d211f6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py
@@ -0,0 +1,681 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import socket
+import sys
+import os
+from time import *
+from datetime import datetime
+from optparse import OptionParser
+import time
+from remote_system import *
+from math import log
+
+# General parameters
+accuracy = 0.1 # in percent of line rate
+max_dropped = 0.001 # in percent
+all_pkt_size = [64,128,256,512,1024,1280,1518]
+#all_pkt_size = [64]
+
+# vRouter parameters, in case commands must be sent
+vRouter_host = "192.168.1.96"
+
+# Stear parameters
+step_time = 0.01 # in seconds
+step_delta = 0.025 # in percent of line rate
+
+# Use case dependent parameters
+##### Use case 0: influence of number of routes and next hops #####
+max_number_next_hops = 256 # Maximum number of next-hops per interface
+max_number_routes = 8192 # Maximum number of routes per interface
+max_number_addresses_local_network = 262144
+
+##### Use case 1: packet loss and latency #####
+low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps
+medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps
+normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100%
+low_steps = 0.1
+medium_steps = 1.0
+
+# Prox parameters
+tx_port4 = [19,27,55,63]
+tx_port5 = [20,28,56,64]
+tx_port6 = [21,29,57,65]
+tx_port7 = [22,30,58,66]
+tx_port2 = [23,31,59,67]
+tx_port3 = [24,32,60,68]
+tx_port0 = [25,33,61,69]
+tx_port1 = [26,34,62,70]
+tx_task = 0
+
+all_rx_cores = [1,2,3,4,5,6,7,10]
+rx_lat_cores = [1,2,3,4,5,6,7,10]
+rx_task = 1
+
+# Some variables, do not change
+
+# Program arguments
+parser = OptionParser()
+parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10)
+parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100)
+parser.add_option("-u", "--use-case", dest="use_case", help="Use Case Number", metavar="integer", default=0)
+parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0)
+parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0)
+(options, args) = parser.parse_args()
+
+init_speed = int(options.init_speed)
+test_duration = int(options.test_duration)
+use_case = int(options.use_case)
+configure = int(options.configure)
+run = int(options.run)
+
+nb_cores_per_interface = len(tx_port0)
+max_speed = (100.0/nb_cores_per_interface)
+init_speed = (init_speed * 1.0/nb_cores_per_interface)
+accuracy = (accuracy * 1.0/nb_cores_per_interface)
+normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface)
+low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps = (medium_steps /nb_cores_per_interface)
+low_steps = (low_steps /nb_cores_per_interface)
+
+max_dropped = max_dropped / 100
+
+def to_str(arr):
+ ret = ""
+ first = 1;
+ for a in arr:
+ if (first == 0):
+ ret += ","
+
+ ret += str(a)
+ first = 0;
+ return ret;
+
+tx_cores = tx_port0 + tx_port1 + tx_port2 + tx_port3 + tx_port4 + tx_port5 + tx_port6 + tx_port7
+
+def send_all_pkt_size(cores, pkt_size):
+ for c in cores:
+ sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n");
+
+def send_all_value(cores, offset, value, len):
+ for c in cores:
+ sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n");
+
+def send_all_random(cores, offset, rand_str, len):
+ for c in cores:
+ sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+ #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+
+def send_all_speed(cores, speed_perc):
+ for c in cores:
+ sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n");
+
+def send_reset_random():
+ sock.sendall("reset randoms all" + "\n");
+
+def send_reset_value():
+ sock.sendall("reset values all" + "\n");
+
+def rx_stats(tx_cores, tx_task, rx_cores, rx_task):
+ rx = tx = drop = tsc = tsc_hs = ierrors = 0
+ for e in tx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ for e in rx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ # Also get the ierrors as generators might be the bottleneck...
+ sock.sendall("tot ierrors tot\n")
+ recv = recv_once()
+ ierrors += int(recv.split(",")[0])
+ rx+=ierrors
+ return rx,tx,drop,tsc,tsc_hz
+
+def lat_stats(cores,task):
+ lat_min = [0 for e in range(127)]
+ lat_max = [0 for e in range(127)]
+ lat_avg = [0 for e in range(127)]
+ for e in cores:
+ sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n")
+ recv = recv_once()
+ lat_min[e] = int(recv.split(",")[0])
+ lat_max[e] = int(recv.split(",")[1])
+ lat_avg[e] = int(recv.split(",")[2])
+ return lat_min, lat_max, lat_avg
+
+def recv_once():
+ ret_str = "";
+ done = 0;
+ while done == 0:
+ dat = sock.recv(256);
+ i = 0;
+ while(i < len(dat)):
+ if (dat[i] == '\n'):
+ done = 1
+ else:
+ ret_str += dat[i];
+ i = i + 1;
+ return ret_str
+
+def wait_vRouter_restarted(host):
+ while (1):
+ ret = os.system("ping " + host + " -c 1 > /dev/null")
+ if ret == 0:
+ print "still up..."
+ else:
+ break;
+ sleep(1)
+
+ while (1):
+ ret = os.system("ping " + host + " -c 1 > /dev/null")
+ if (ret == 0):
+ print "UP"
+ break;
+ else:
+ print "still down..."
+ sleep(1)
+
+def reload_vRouter_config(config):
+ print "connecting to vRouter...and copying " + str(config)
+ sut = remote_system("root", vRouter_host)
+ cmd = "cp /config/prox/" + str(config) + " /config/config.boot"
+ sut.run(cmd)
+ print "Rebooting system at " + str(datetime.now().time())
+ sut.run_forked("reboot")
+ sleep(5)
+ wait_vRouter_restarted(vRouter_host)
+ print "Waiting for last startup scripts to start..."
+ last_script = "l2tp"
+ while(1):
+ dmesg = str(sut.run("dmesg"))
+ if last_script in dmesg:
+ print "found l2tp - UP"
+ break;
+ sleep(1)
+ print "vRouter started - waiting 5 last seconds before starting test"
+ sleep(5)
+ print datetime.now().time()
+
+def set_pkt_sizes(tx_cores, p):
+ send_all_pkt_size(tx_cores, p-4)
+ # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size
+ send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2)
+ send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20)
+
+def run_measure_throughput(speed):
+ done = 0
+ # Intialize tests by stopping cores and resetting stats
+ step=0
+ steps_done = 0
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(2)
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+ sock.sendall("reset stats\n")
+ print "Speed = " + str(speed * nb_cores_per_interface)
+ sleep(1);
+
+ send_all_speed(tx_cores, step);
+
+ # Now starting the steps. First go to the common speed, then increase steps for the faster one.
+ sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n")
+ while (steps_done == 0):
+ sleep(step_time)
+ if (step + step_delta <= speed):
+ step+=step_delta
+ else:
+ steps_done = 1;
+ send_all_speed(tx_cores, step)
+
+ # Steps are now OK. Set speed
+ send_all_speed(tx_cores, speed);
+ sleep(2);
+
+ # Getting statistics to calculate PPS at right speed....
+ rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ sleep(test_duration);
+
+ # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time...
+ rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task)
+ sock.sendall("stop " + "," + to_str(tx_cores) + "\n")
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(3);
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+
+ rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ rx = rx_pps_end - rx_pps_beg
+ tsc = tsc_pps_end - tsc_pps_beg
+ mpps = rx / (tsc/float(tsc_hz)) / 1000000
+ tx = tx_pps_end - tx_pps_beg
+ tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000
+
+ #print "Runtime = " + str((tsc)/float(tsc_hz));
+ if (tx_end == 0):
+ dropped_tot = tx_end - rx_end
+ dropped_pct = 0
+ else:
+ dropped_tot = tx_end - rx_end
+ dropped_pct = ((dropped_tot) * 1.0) / tx_end
+
+ if (dropped_tot > 0):
+ if (dropped_pct >= max_dropped):
+ print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ if (dropped_tot < 0):
+ print "Something wrong happened - received more packets than transmitted"
+ else:
+ print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ print "MPPS = " + str(mpps)
+ print "===================================================="
+ return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg
+
+def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg):
+ f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_next_hops) + "; " + str(number_routes) + "; " + str(traffic) + "; ")
+ for e in rx_lat_cores:
+ f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ")
+ f.write("\n");
+ f.flush()
+
+def run_loss_graph(number_next_hops, number_routes, pkt_size, traffic):
+ speed = init_speed * 1.0
+ done = 0;
+ while done == 0:
+ dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed)
+ write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ if (speed <= low_steps_delta_for_loss):
+ done = 1
+ return
+ if (speed >= (medium_steps+normal_steps_delta_for_loss)):
+ speed -= normal_steps_delta_for_loss
+ else:
+ if (speed >= (low_steps+medium_steps_delta_for_loss)):
+ speed -= medium_steps_delta_for_loss
+ else:
+ speed -= low_steps_delta_for_loss
+
+def run_dicho_search(number_next_hops, number_routes, pkt_size, traffic):
+ previous_success_speed = 0.0
+ previous_error_speed = max_speed
+ speed = init_speed * 1.0
+ done = 0;
+ good_tx_mpps = 0
+ good_mpps = 0
+ good_dropped_pct = 0
+ good_dropped_tot = 0
+ good_speed = 0
+ good_lat_min = [0 for e in range(127)]
+ good_lat_max = [0 for e in range(127)]
+ good_lat_avg = [0 for e in range(127)]
+
+ while done == 0:
+ dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed)
+ if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)):
+ good_tx_mpps = tx_mpps
+ good_mpps = mpps
+ good_dropped_pct = dropped_pct
+ good_dropped_tot = dropped_tot
+ good_speed = speed
+ good_lat_min = lat_min
+ good_lat_max = lat_max
+ good_lat_avg = lat_avg
+ write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ else:
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+
+ if ((speed == max_speed) and (dropped_pct <= max_dropped)):
+ write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ done = 1
+ if (dropped_pct <= max_dropped):
+ previous_success_speed = speed
+ if (speed > max_speed - accuracy):
+ speed = max_speed
+ else:
+ if (previous_error_speed - speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed + (previous_error_speed - speed)/2;
+ else:
+ previous_error_speed = speed
+ if (speed - previous_success_speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed - (speed - previous_success_speed) / 2;
+
+
+def set_destination_ip(use_case, nb_destinations, traffic):
+ # minimmum 8 routes i.e. 1 per interface
+ # Destination addressese: "00XXXYY1" "Z00ZZ0ZZ" "AA0AA0AA" "BBBBBB10"
+ # Where X = interface id. Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z
+ # Y, Z and A = additional routes
+ # B = IP in routes. 10 to avoid x.y.z.0 and x.y.z.255
+ # Gaps in A and B to void "too good" distributions e.g. using LPM and
+ # First changing Y
+
+ mask = ""
+ for i in range (2):
+ mask = str(mask)+"0"
+ end_mask = ""
+ if (use_case != 2):
+ end_mask = "XXXXXX10" # Last 8 bits
+
+ if (nb_destinations == 1):
+ end_mask = "0010000000000000000" + str(end_mask)
+ if (nb_destinations == 2):
+ end_mask = "X010000000000000000" + str(end_mask)
+ if (nb_destinations == 4):
+ end_mask = "XX10000000000000000" + str(end_mask)
+ if (nb_destinations == 8):
+ end_mask = "XX1X000000000000000" + str(end_mask)
+ elif (nb_destinations == 16):
+ end_mask = "XX1X00X000000000000" + str(end_mask)
+ elif (nb_destinations == 32):
+ end_mask = "XX1X00XX00000000000" + str(end_mask)
+ elif (nb_destinations == 64):
+ end_mask = "XX1X00XX0X000000000" + str(end_mask)
+ elif (nb_destinations == 128):
+ end_mask = "XX1X00XX0XX00000000" + str(end_mask)
+ elif (nb_destinations == 256):
+ end_mask = "XX1X00XX0XXX0000000" + str(end_mask)
+ elif (nb_destinations == 512):
+ end_mask = "XX1X00XX0XXXX000000" + str(end_mask)
+ elif (nb_destinations == 1024):
+ end_mask = "XX1X00XX0XXXX0X0000" + str(end_mask)
+ elif (nb_destinations == 2048):
+ end_mask = "XX1X00XX0XXXX0XX000" + str(end_mask)
+ elif (nb_destinations == 4096):
+ end_mask = "XX1X00XX0XXXX0XX0X0" + str(end_mask)
+ elif (nb_destinations == 8192):
+ end_mask = "XX1X00XX0XXXX0XX0XX" + str(end_mask)
+ else:
+ if (nb_destinations <= 64 * 1):
+ end_mask = "0010000000000000000"
+ n_dest = int(log(nb_destinations, 2))
+ for i in range (n_dest):
+ end_mask = str(end_mask) + "X"
+ for i in range (6 - n_dest):
+ end_mask = str(end_mask) + "0"
+ end_mask = str(end_mask) + "10"
+ else:
+ end_mask = "XXXXXX10" # Last 8 bits
+
+ if (nb_destinations == 64 * 2):
+ end_mask = "001X000000000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 4):
+ end_mask = "001X00X000000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 8):
+ end_mask = "001X00XX00000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 16):
+ end_mask = "001X00XX0X000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 32):
+ end_mask = "001X00XX0XX00000000" + str(end_mask)
+ elif (nb_destinations == 64 * 64):
+ end_mask = "001X00XX0XXX0000000" + str(end_mask)
+ elif (nb_destinations == 64 * 128):
+ end_mask = "001X00XX0XXXX000000" + str(end_mask)
+ elif (nb_destinations == 64 * 256):
+ end_mask = "001X00XX0XXXX0X0000" + str(end_mask)
+ elif (nb_destinations == 64 * 512):
+ end_mask = "001X00XX0XXXX0XX000" + str(end_mask)
+ elif (nb_destinations == 64 * 1024):
+ end_mask = "001X00XX0XXXX0XX0X0" + str(end_mask)
+ elif (nb_destinations == 64 * 2048):
+ end_mask = "001X00XX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 4096):
+ end_mask = "001XX0XX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 8192):
+ end_mask = "001XXXXX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 16384):
+ end_mask = "001XXXXXXXXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 32768):
+ end_mask = "001XXXXXXXXXXXXX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 65536):
+ end_mask = "001XXXXXXXXXXXXXXXX" + str(end_mask)
+
+ if (traffic == 0): # One-to-one. From odd interface to even interface and vice versa, no QPI cross
+ mask1 = str(mask) + "001" + str(end_mask)
+ mask2 = str(mask) + "000" + str(end_mask)
+ mask3 = str(mask) + "011" + str(end_mask)
+ mask4 = str(mask) + "010" + str(end_mask)
+ mask5 = str(mask) + "101" + str(end_mask)
+ mask6 = str(mask) + "100" + str(end_mask)
+ mask7 = str(mask) + "111" + str(end_mask)
+ mask8 = str(mask) + "110" + str(end_mask)
+
+ elif (traffic == 1): # Full mesh within QPI (i.e. 1 to 4)
+ mask1 = str(mask) + "0XX" + str(end_mask)
+ mask2 = str(mask) + "0XX" + str(end_mask)
+ mask3 = str(mask) + "0XX" + str(end_mask)
+ mask4 = str(mask) + "0XX" + str(end_mask)
+ mask5 = str(mask) + "1XX" + str(end_mask)
+ mask6 = str(mask) + "1XX" + str(end_mask)
+ mask7 = str(mask) + "1XX" + str(end_mask)
+ mask8 = str(mask) + "1XX" + str(end_mask)
+
+ elif (traffic == 2): # One to one, crossing QPI (100% QPI)
+ mask1 = str(mask) + "100" + str(end_mask)
+ mask2 = str(mask) + "101" + str(end_mask)
+ mask3 = str(mask) + "110" + str(end_mask)
+ mask4 = str(mask) + "111" + str(end_mask)
+ mask5 = str(mask) + "000" + str(end_mask)
+ mask6 = str(mask) + "001" + str(end_mask)
+ mask7 = str(mask) + "010" + str(end_mask)
+ mask8 = str(mask) + "011" + str(end_mask)
+
+ elif (traffic == 3): # 1 to 4 crossing QPI (100% QPI)
+ mask1 = str(mask) + "1XX" + str(end_mask)
+ mask2 = str(mask) + "1XX" + str(end_mask)
+ mask3 = str(mask) + "1XX" + str(end_mask)
+ mask4 = str(mask) + "1XX" + str(end_mask)
+ mask5 = str(mask) + "0XX" + str(end_mask)
+ mask6 = str(mask) + "0XX" + str(end_mask)
+ mask7 = str(mask) + "0XX" + str(end_mask)
+ mask8 = str(mask) + "0XX" + str(end_mask)
+
+ elif (traffic == 4): # 1 to 4 (50% QPI)
+ mask1 = str(mask) + "XX1" + str(end_mask)
+ mask2 = str(mask) + "XX0" + str(end_mask)
+ mask3 = str(mask) + "XX1" + str(end_mask)
+ mask4 = str(mask) + "XX0" + str(end_mask)
+ mask5 = str(mask) + "XX1" + str(end_mask)
+ mask6 = str(mask) + "XX0" + str(end_mask)
+ mask7 = str(mask) + "XX1" + str(end_mask)
+ mask8 = str(mask) + "XX0" + str(end_mask)
+
+ elif (traffic == 5): # Full mesh (50% QPI)
+ mask1 = str(mask) + "XXX" + str(end_mask)
+ mask2 = str(mask) + "XXX" + str(end_mask)
+ mask3 = str(mask) + "XXX" + str(end_mask)
+ mask4 = str(mask) + "XXX" + str(end_mask)
+ mask5 = str(mask) + "XXX" + str(end_mask)
+ mask6 = str(mask) + "XXX" + str(end_mask)
+ mask7 = str(mask) + "XXX" + str(end_mask)
+ mask8 = str(mask) + "XXX" + str(end_mask)
+
+ for c in tx_port0:
+ send_all_random([c], 30, mask1, 4)
+ for c in tx_port1:
+ send_all_random([c], 30, mask2, 4)
+ for c in tx_port2:
+ send_all_random([c], 30, mask3, 4)
+ for c in tx_port3:
+ send_all_random([c], 30, mask4, 4)
+ for c in tx_port4:
+ send_all_random([c], 30, mask5, 4)
+ for c in tx_port5:
+ send_all_random([c], 30, mask6, 4)
+ for c in tx_port6:
+ send_all_random([c], 30, mask7, 4)
+ for c in tx_port7:
+ send_all_random([c], 30, mask8, 4)
+ for c in tx_cores:
+ send_all_random([c], 34, "0XXXXXXXXXXXXX10", 2)
+ send_all_random([c], 36, "0XXXXXXXXXXXXX10", 2)
+
+#========================================================================
+class TestDefinition():
+ "Stores test parameters"
+ def __init__(self, use_case, next_hops, number_routes, pkt_size, traffic, reload):
+ self.use_case = use_case
+ self.next_hops = next_hops
+ self.number_routes = number_routes
+ self.pkt_size = pkt_size
+ self.traffic = traffic
+ self.reload = reload
+
+#========================================================================
+# Use case 0 increases input load and measure output load => show dropped packets at low loads, show overload behavior
+# Use case 1 and use case 2 run dichotomic searches, searching for 0 packet loss (or whaever loss is configured)
+# Use case 1 shows the effect of number of routes and next-hops
+# Use case 2 shows the effect of the number of destination, using a fixed (low) number of routes and next-hops
+#========================================================================
+def run_use_case(use_case, number_next_hops, number_routes, pkt_size, traffic, reload):
+ if (reload):
+ if (use_case == 2):
+ config = "config.1_1" + "_" + str(use_case) + ".boot"
+ else:
+ config = "config." + str(number_routes) + "_" + str(number_next_hops) + ".boot"
+ reload_vRouter_config(config)
+ send_reset_random()
+ send_reset_value()
+ set_destination_ip(use_case, number_routes, traffic)
+ set_pkt_sizes(tx_cores, pkt_size)
+ print "Running test with pkt size= " + str(pkt_size) + " Next hops = " + str(number_next_hops) + "; number of routes = " + str(number_routes) + "; Traffic = " + str(traffic) + " \n"
+ if (use_case == 0):
+ run_loss_graph(number_next_hops, number_routes, pkt_size, traffic)
+ else:
+ run_dicho_search(number_next_hops, number_routes, pkt_size, traffic)
+ sleep(3)
+
+#========================================================================
+def run_all_use_cases():
+ use_case_nb = 1
+ # Connect to dppd
+ file_path = '/tmp/prox.sock'
+ sock.connect(file_path)
+
+ f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f.flush();
+ f_all.flush();
+ f_minimal.flush();
+
+ # Starting tests
+ print "Stopping all cores and resetting all values and randoms before starting\n"
+ sock.sendall("stop all")
+ sock.sendall("reset stats\n")
+ sleep(3);
+ for line in file_tests:
+ info = line.split(';')
+ if (info[0][0] == '#'):
+ continue
+ if (info[0][0] == ''):
+ break
+ use_case = int(info[0])
+ next_hops = int(info[1])
+ number_routes = int(info[2])
+ pkt_size = int(info[3])
+ traffic = int(info[4])
+ reload = int(info[5])
+ print str(use_case_nb) + " : Running use case " + str(use_case) + " next_hops = " + str(next_hops) + " routes = " + str(number_routes) + " pkt_size = " + str(pkt_size) + " traffic = " + str(traffic) + " reload = " + str(reload)
+ run_use_case(use_case, next_hops, number_routes, pkt_size, traffic, reload)
+ use_case_nb = use_case_nb + 1
+
+#========================================================================
+def configure_use_case(use_case):
+ Tests = []
+ if (use_case == 0):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition("0", "1", "1", pkt_size, "0", "1"))
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition("0", "1", "1", pkt_size, "1", "1"))
+ if (use_case == 1):
+ number_next_hops = 1
+ reload = 0
+
+ number_routes = number_next_hops # At least same number of routes that number of next hops
+ while number_routes <= max_number_routes:
+ reload = 1
+ for traffic in range(6):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload))
+ reload = 0
+ if (number_routes < max_number_routes / 2):
+ number_routes = number_routes * 4
+ else:
+ number_routes = number_routes * 2
+
+ number_routes = max_number_next_hops
+ while number_next_hops <= max_number_next_hops:
+ reload = 1
+ for traffic in range(6):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload))
+ reload = 0
+ number_next_hops = number_next_hops * 2
+ if (use_case == 2):
+ number_next_hops = 1
+ reload = 1
+ for traffic in range(6):
+ nb_destinations = 1
+ while nb_destinations <= max_number_addresses_local_network:
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, nb_destinations, pkt_size, traffic, reload))
+ reload = 0
+ nb_destinations = nb_destinations * 2
+ reload = 1
+
+ file_tests = open('test_description.txt', 'w')
+ file_tests.write("# Use case; next_hops; routes; pkt_size; traffic; reload;\n")
+ for test in Tests:
+ file_tests.write(str(test.use_case) + "; " + str(test.next_hops) + "; " + str(test.number_routes) + "; " + str(test.pkt_size) + "; " + str(test.traffic) + "; " + str(test.reload) + ";\n")
+ file_tests.close()
+
+#========================================================================
+if ((configure == 0) and (run == 0)):
+ print "Nothing to do - please use -r 1 or -c 1"
+if (configure == 1):
+ configure_use_case(use_case)
+if (run == 1):
+ print "****************************************************************************************************************"
+ print "** Running vRouter Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **"
+ print "****************************************************************************************************************"
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ f_all = open('all_results.txt', 'w')
+ f = open('detailed_results.txt', 'w')
+ f_minimal = open('minimal_results.txt', 'w')
+ file_tests = open('test_description.txt', 'r')
+ run_all_use_cases()
+ f.close();
+ sock.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py
new file mode 100755
index 00000000..95eb9811
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py
@@ -0,0 +1,681 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import socket
+import sys
+import os
+from time import *
+from datetime import datetime
+from optparse import OptionParser
+import time
+from remote_system import *
+from math import log
+
+# General parameters
+accuracy = 0.1 # in percent of line rate
+max_dropped = 0.001 # in percent
+all_pkt_size = [64,128,256,512,1024,1280,1518]
+#all_pkt_size = [64]
+
+# vRouter parameters, in case commands must be sent
+vRouter_host = "192.168.1.96"
+
+# Stear parameters
+step_time = 0.01 # in seconds
+step_delta = 0.025 # in percent of line rate
+
+# Use case dependent parameters
+##### Use case 0: influence of number of routes and next hops #####
+max_number_next_hops = 256 # Maximum number of next-hops per interface
+max_number_routes = 8192 # Maximum number of routes per interface
+max_number_addresses_local_network = 262144
+
+##### Use case 1: packet loss and latency #####
+low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps
+medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps
+normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100%
+low_steps = 0.1
+medium_steps = 1.0
+
+# Prox parameters
+tx_port0 = [19,27,55,63]
+tx_port1 = [20,28,56,64]
+tx_port2 = [21,29,57,65]
+tx_port3 = [22,30,58,66]
+tx_port4 = []
+tx_port5 = []
+tx_port6 = []
+tx_port7 = []
+tx_task = 0
+
+all_rx_cores = [23,24,25,26]
+rx_lat_cores = [23,24,25,26]
+rx_task = 1
+
+# Some variables, do not change
+
+# Program arguments
+parser = OptionParser()
+parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10)
+parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100)
+parser.add_option("-u", "--use-case", dest="use_case", help="Use Case Number", metavar="integer", default=0)
+parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0)
+parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0)
+(options, args) = parser.parse_args()
+
+init_speed = int(options.init_speed)
+test_duration = int(options.test_duration)
+use_case = int(options.use_case)
+configure = int(options.configure)
+run = int(options.run)
+
+nb_cores_per_interface = len(tx_port0)
+max_speed = (100.0/nb_cores_per_interface)
+init_speed = (init_speed * 1.0/nb_cores_per_interface)
+accuracy = (accuracy * 1.0/nb_cores_per_interface)
+normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface)
+low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface)
+medium_steps = (medium_steps /nb_cores_per_interface)
+low_steps = (low_steps /nb_cores_per_interface)
+
+max_dropped = max_dropped / 100
+
+def to_str(arr):
+ ret = ""
+ first = 1;
+ for a in arr:
+ if (first == 0):
+ ret += ","
+
+ ret += str(a)
+ first = 0;
+ return ret;
+
+tx_cores = tx_port0 + tx_port1 + tx_port2 + tx_port3 + tx_port4 + tx_port5 + tx_port6 + tx_port7
+
+def send_all_pkt_size(cores, pkt_size):
+ for c in cores:
+ sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n");
+
+def send_all_value(cores, offset, value, len):
+ for c in cores:
+ sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n");
+
+def send_all_random(cores, offset, rand_str, len):
+ for c in cores:
+ sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+ #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n");
+
+def send_all_speed(cores, speed_perc):
+ for c in cores:
+ sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n");
+
+def send_reset_random():
+ sock.sendall("reset randoms all" + "\n");
+
+def send_reset_value():
+ sock.sendall("reset values all" + "\n");
+
+def rx_stats(tx_cores, tx_task, rx_cores, rx_task):
+ rx = tx = drop = tsc = tsc_hs = ierrors = 0
+ for e in tx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ for e in rx_cores:
+ sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n")
+ recv = recv_once()
+ rx += int(recv.split(",")[0])
+ tx += int(recv.split(",")[1])
+ drop += int(recv.split(",")[2])
+ tsc = int(recv.split(",")[3])
+ tsc_hz = int(recv.split(",")[4])
+ # Also get the ierrors as generators might be the bottleneck...
+ sock.sendall("tot ierrors tot\n")
+ recv = recv_once()
+ ierrors += int(recv.split(",")[0])
+ rx+=ierrors
+ return rx,tx,drop,tsc,tsc_hz
+
+def lat_stats(cores,task):
+ lat_min = [0 for e in range(127)]
+ lat_max = [0 for e in range(127)]
+ lat_avg = [0 for e in range(127)]
+ for e in cores:
+ sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n")
+ recv = recv_once()
+ lat_min[e] = int(recv.split(",")[0])
+ lat_max[e] = int(recv.split(",")[1])
+ lat_avg[e] = int(recv.split(",")[2])
+ return lat_min, lat_max, lat_avg
+
+def recv_once():
+ ret_str = "";
+ done = 0;
+ while done == 0:
+ dat = sock.recv(256);
+ i = 0;
+ while(i < len(dat)):
+ if (dat[i] == '\n'):
+ done = 1
+ else:
+ ret_str += dat[i];
+ i = i + 1;
+ return ret_str
+
+def wait_vRouter_restarted(host):
+ while (1):
+ ret = os.system("ping " + host + " -c 1 > /dev/null")
+ if ret == 0:
+ print "still up..."
+ else:
+ break;
+ sleep(1)
+
+ while (1):
+ ret = os.system("ping " + host + " -c 1 > /dev/null")
+ if (ret == 0):
+ print "UP"
+ break;
+ else:
+ print "still down..."
+ sleep(1)
+
+def reload_vRouter_config(config):
+ print "connecting to vRouter...and copying " + str(config)
+ sut = remote_system("root", vRouter_host)
+ cmd = "cp /config/prox/" + str(config) + " /config/config.boot"
+ sut.run(cmd)
+ print "Rebooting system at " + str(datetime.now().time())
+ sut.run_forked("reboot")
+ sleep(5)
+ wait_vRouter_restarted(vRouter_host)
+ print "Waiting for last startup scripts to start..."
+ last_script = "l2tp"
+ while(1):
+ dmesg = str(sut.run("dmesg"))
+ if last_script in dmesg:
+ print "found l2tp - UP"
+ break;
+ sleep(1)
+ print "vRouter started - waiting 5 last seconds before starting test"
+ sleep(5)
+ print datetime.now().time()
+
+def set_pkt_sizes(tx_cores, p):
+ send_all_pkt_size(tx_cores, p-4)
+ # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size
+ send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2)
+ send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20)
+
+def run_measure_throughput(speed):
+ done = 0
+ # Intialize tests by stopping cores and resetting stats
+ step=0
+ steps_done = 0
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(2)
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+ sock.sendall("reset stats\n")
+ print "Speed = " + str(speed * nb_cores_per_interface)
+ sleep(1);
+
+ send_all_speed(tx_cores, step);
+
+ # Now starting the steps. First go to the common speed, then increase steps for the faster one.
+ sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n")
+ while (steps_done == 0):
+ sleep(step_time)
+ if (step + step_delta <= speed):
+ step+=step_delta
+ else:
+ steps_done = 1;
+ send_all_speed(tx_cores, step)
+
+ # Steps are now OK. Set speed
+ send_all_speed(tx_cores, speed);
+ sleep(2);
+
+ # Getting statistics to calculate PPS at right speed....
+ rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ sleep(test_duration);
+
+ # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time...
+ rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task)
+ sock.sendall("stop " + "," + to_str(tx_cores) + "\n")
+ sock.sendall("start " + to_str(all_rx_cores) + "\n")
+ sleep(3);
+ sock.sendall("stop " + to_str(all_rx_cores) + "\n")
+
+ rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task);
+ rx = rx_pps_end - rx_pps_beg
+ tsc = tsc_pps_end - tsc_pps_beg
+ mpps = rx / (tsc/float(tsc_hz)) / 1000000
+ tx = tx_pps_end - tx_pps_beg
+ tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000
+
+ #print "Runtime = " + str((tsc)/float(tsc_hz));
+ if (tx_end == 0):
+ dropped_tot = tx_end - rx_end
+ dropped_pct = 0
+ else:
+ dropped_tot = tx_end - rx_end
+ dropped_pct = ((dropped_tot) * 1.0) / tx_end
+
+ if (dropped_tot > 0):
+ if (dropped_pct >= max_dropped):
+ print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ else:
+ if (dropped_tot < 0):
+ print "Something wrong happened - received more packets than transmitted"
+ else:
+ print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end)
+ print "MPPS = " + str(mpps)
+ print "===================================================="
+ return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg
+
+def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg):
+ f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_next_hops) + "; " + str(number_routes) + "; " + str(traffic) + "; ")
+ for e in rx_lat_cores:
+ f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ")
+ f.write("\n");
+ f.flush()
+
+def run_loss_graph(number_next_hops, number_routes, pkt_size, traffic):
+ speed = init_speed * 1.0
+ done = 0;
+ while done == 0:
+ dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed)
+ write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ if (speed <= low_steps_delta_for_loss):
+ done = 1
+ return
+ if (speed >= (medium_steps+normal_steps_delta_for_loss)):
+ speed -= normal_steps_delta_for_loss
+ else:
+ if (speed >= (low_steps+medium_steps_delta_for_loss)):
+ speed -= medium_steps_delta_for_loss
+ else:
+ speed -= low_steps_delta_for_loss
+
+def run_dicho_search(number_next_hops, number_routes, pkt_size, traffic):
+ previous_success_speed = 0.0
+ previous_error_speed = max_speed
+ speed = init_speed * 1.0
+ done = 0;
+ good_tx_mpps = 0
+ good_mpps = 0
+ good_dropped_pct = 0
+ good_dropped_tot = 0
+ good_speed = 0
+ good_lat_min = [0 for e in range(127)]
+ good_lat_max = [0 for e in range(127)]
+ good_lat_avg = [0 for e in range(127)]
+
+ while done == 0:
+ dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed)
+ if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)):
+ good_tx_mpps = tx_mpps
+ good_mpps = mpps
+ good_dropped_pct = dropped_pct
+ good_dropped_tot = dropped_tot
+ good_speed = speed
+ good_lat_min = lat_min
+ good_lat_max = lat_max
+ good_lat_avg = lat_avg
+ write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ else:
+ write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+
+ if ((speed == max_speed) and (dropped_pct <= max_dropped)):
+ write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg);
+ done = 1
+ if (dropped_pct <= max_dropped):
+ previous_success_speed = speed
+ if (speed > max_speed - accuracy):
+ speed = max_speed
+ else:
+ if (previous_error_speed - speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed + (previous_error_speed - speed)/2;
+ else:
+ previous_error_speed = speed
+ if (speed - previous_success_speed < accuracy):
+ write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg);
+ done = 1
+ else:
+ speed = speed - (speed - previous_success_speed) / 2;
+
+
+def set_destination_ip(use_case, nb_destinations, traffic):
+ # minimmum 8 routes i.e. 1 per interface
+ # Destination addressese: "00XXXYY1" "Z00ZZ0ZZ" "AA0AA0AA" "BBBBBB10"
+ # Where X = interface id. Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z
+ # Y, Z and A = additional routes
+ # B = IP in routes. 10 to avoid x.y.z.0 and x.y.z.255
+ # Gaps in A and B to void "too good" distributions e.g. using LPM and
+ # First changing Y
+
+ mask = ""
+ for i in range (2):
+ mask = str(mask)+"0"
+ end_mask = ""
+ if (use_case != 2):
+ end_mask = "XXXXXX10" # Last 8 bits
+
+ if (nb_destinations == 1):
+ end_mask = "0010000000000000000" + str(end_mask)
+ if (nb_destinations == 2):
+ end_mask = "X010000000000000000" + str(end_mask)
+ if (nb_destinations == 4):
+ end_mask = "XX10000000000000000" + str(end_mask)
+ if (nb_destinations == 8):
+ end_mask = "XX1X000000000000000" + str(end_mask)
+ elif (nb_destinations == 16):
+ end_mask = "XX1X00X000000000000" + str(end_mask)
+ elif (nb_destinations == 32):
+ end_mask = "XX1X00XX00000000000" + str(end_mask)
+ elif (nb_destinations == 64):
+ end_mask = "XX1X00XX0X000000000" + str(end_mask)
+ elif (nb_destinations == 128):
+ end_mask = "XX1X00XX0XX00000000" + str(end_mask)
+ elif (nb_destinations == 256):
+ end_mask = "XX1X00XX0XXX0000000" + str(end_mask)
+ elif (nb_destinations == 512):
+ end_mask = "XX1X00XX0XXXX000000" + str(end_mask)
+ elif (nb_destinations == 1024):
+ end_mask = "XX1X00XX0XXXX0X0000" + str(end_mask)
+ elif (nb_destinations == 2048):
+ end_mask = "XX1X00XX0XXXX0XX000" + str(end_mask)
+ elif (nb_destinations == 4096):
+ end_mask = "XX1X00XX0XXXX0XX0X0" + str(end_mask)
+ elif (nb_destinations == 8192):
+ end_mask = "XX1X00XX0XXXX0XX0XX" + str(end_mask)
+ else:
+ if (nb_destinations <= 64 * 1):
+ end_mask = "0010000000000000000"
+ n_dest = int(log(nb_destinations, 2))
+ for i in range (n_dest):
+ end_mask = str(end_mask) + "X"
+ for i in range (6 - n_dest):
+ end_mask = str(end_mask) + "0"
+ end_mask = str(end_mask) + "10"
+ else:
+ end_mask = "XXXXXX10" # Last 8 bits
+
+ if (nb_destinations == 64 * 2):
+ end_mask = "001X000000000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 4):
+ end_mask = "001X00X000000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 8):
+ end_mask = "001X00XX00000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 16):
+ end_mask = "001X00XX0X000000000" + str(end_mask)
+ elif (nb_destinations == 64 * 32):
+ end_mask = "001X00XX0XX00000000" + str(end_mask)
+ elif (nb_destinations == 64 * 64):
+ end_mask = "001X00XX0XXX0000000" + str(end_mask)
+ elif (nb_destinations == 64 * 128):
+ end_mask = "001X00XX0XXXX000000" + str(end_mask)
+ elif (nb_destinations == 64 * 256):
+ end_mask = "001X00XX0XXXX0X0000" + str(end_mask)
+ elif (nb_destinations == 64 * 512):
+ end_mask = "001X00XX0XXXX0XX000" + str(end_mask)
+ elif (nb_destinations == 64 * 1024):
+ end_mask = "001X00XX0XXXX0XX0X0" + str(end_mask)
+ elif (nb_destinations == 64 * 2048):
+ end_mask = "001X00XX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 4096):
+ end_mask = "001XX0XX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 8192):
+ end_mask = "001XXXXX0XXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 16384):
+ end_mask = "001XXXXXXXXXX0XX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 32768):
+ end_mask = "001XXXXXXXXXXXXX0XX" + str(end_mask)
+ elif (nb_destinations == 64 * 65536):
+ end_mask = "001XXXXXXXXXXXXXXXX" + str(end_mask)
+
+ if (traffic == 0): # One-to-one. From odd interface to even interface and vice versa, no QPI cross
+ mask1 = str(mask) + "001" + str(end_mask)
+ mask2 = str(mask) + "000" + str(end_mask)
+ mask3 = str(mask) + "011" + str(end_mask)
+ mask4 = str(mask) + "010" + str(end_mask)
+ mask5 = str(mask) + "101" + str(end_mask)
+ mask6 = str(mask) + "100" + str(end_mask)
+ mask7 = str(mask) + "111" + str(end_mask)
+ mask8 = str(mask) + "110" + str(end_mask)
+
+ elif (traffic == 1): # Full mesh within QPI (i.e. 1 to 4)
+ mask1 = str(mask) + "0XX" + str(end_mask)
+ mask2 = str(mask) + "0XX" + str(end_mask)
+ mask3 = str(mask) + "0XX" + str(end_mask)
+ mask4 = str(mask) + "0XX" + str(end_mask)
+ mask5 = str(mask) + "1XX" + str(end_mask)
+ mask6 = str(mask) + "1XX" + str(end_mask)
+ mask7 = str(mask) + "1XX" + str(end_mask)
+ mask8 = str(mask) + "1XX" + str(end_mask)
+
+ elif (traffic == 2): # One to one, crossing QPI (100% QPI)
+ mask1 = str(mask) + "100" + str(end_mask)
+ mask2 = str(mask) + "101" + str(end_mask)
+ mask3 = str(mask) + "110" + str(end_mask)
+ mask4 = str(mask) + "111" + str(end_mask)
+ mask5 = str(mask) + "000" + str(end_mask)
+ mask6 = str(mask) + "001" + str(end_mask)
+ mask7 = str(mask) + "010" + str(end_mask)
+ mask8 = str(mask) + "011" + str(end_mask)
+
+ elif (traffic == 3): # 1 to 4 crossing QPI (100% QPI)
+ mask1 = str(mask) + "1XX" + str(end_mask)
+ mask2 = str(mask) + "1XX" + str(end_mask)
+ mask3 = str(mask) + "1XX" + str(end_mask)
+ mask4 = str(mask) + "1XX" + str(end_mask)
+ mask5 = str(mask) + "0XX" + str(end_mask)
+ mask6 = str(mask) + "0XX" + str(end_mask)
+ mask7 = str(mask) + "0XX" + str(end_mask)
+ mask8 = str(mask) + "0XX" + str(end_mask)
+
+ elif (traffic == 4): # 1 to 4 (50% QPI)
+ mask1 = str(mask) + "XX1" + str(end_mask)
+ mask2 = str(mask) + "XX0" + str(end_mask)
+ mask3 = str(mask) + "XX1" + str(end_mask)
+ mask4 = str(mask) + "XX0" + str(end_mask)
+ mask5 = str(mask) + "XX1" + str(end_mask)
+ mask6 = str(mask) + "XX0" + str(end_mask)
+ mask7 = str(mask) + "XX1" + str(end_mask)
+ mask8 = str(mask) + "XX0" + str(end_mask)
+
+ elif (traffic == 5): # Full mesh (50% QPI)
+ mask1 = str(mask) + "XXX" + str(end_mask)
+ mask2 = str(mask) + "XXX" + str(end_mask)
+ mask3 = str(mask) + "XXX" + str(end_mask)
+ mask4 = str(mask) + "XXX" + str(end_mask)
+ mask5 = str(mask) + "XXX" + str(end_mask)
+ mask6 = str(mask) + "XXX" + str(end_mask)
+ mask7 = str(mask) + "XXX" + str(end_mask)
+ mask8 = str(mask) + "XXX" + str(end_mask)
+
+ for c in tx_port0:
+ send_all_random([c], 30, mask1, 4)
+ for c in tx_port1:
+ send_all_random([c], 30, mask2, 4)
+ for c in tx_port2:
+ send_all_random([c], 30, mask3, 4)
+ for c in tx_port3:
+ send_all_random([c], 30, mask4, 4)
+ for c in tx_port4:
+ send_all_random([c], 30, mask5, 4)
+ for c in tx_port5:
+ send_all_random([c], 30, mask6, 4)
+ for c in tx_port6:
+ send_all_random([c], 30, mask7, 4)
+ for c in tx_port7:
+ send_all_random([c], 30, mask8, 4)
+ for c in tx_cores:
+ send_all_random([c], 34, "0XXXXXXXXXXXXX10", 2)
+ send_all_random([c], 36, "0XXXXXXXXXXXXX10", 2)
+
+#========================================================================
+class TestDefinition():
+ "Stores test parameters"
+ def __init__(self, use_case, next_hops, number_routes, pkt_size, traffic, reload):
+ self.use_case = use_case
+ self.next_hops = next_hops
+ self.number_routes = number_routes
+ self.pkt_size = pkt_size
+ self.traffic = traffic
+ self.reload = reload
+
+#========================================================================
+# Use case 0 increases input load and measure output load => show dropped packets at low loads, show overload behavior
+# Use case 1 and use case 2 run dichotomic searches, searching for 0 packet loss (or whaever loss is configured)
+# Use case 1 shows the effect of number of routes and next-hops
+# Use case 2 shows the effect of the number of destination, using a fixed (low) number of routes and next-hops
+#========================================================================
+def run_use_case(use_case, number_next_hops, number_routes, pkt_size, traffic, reload):
+ if (reload):
+ if (use_case == 2):
+ config = "config.1_1" + "_" + str(use_case) + ".boot"
+ else:
+ config = "config." + str(number_routes) + "_" + str(number_next_hops) + ".boot"
+ reload_vRouter_config(config)
+ send_reset_random()
+ send_reset_value()
+ set_destination_ip(use_case, number_routes, traffic)
+ set_pkt_sizes(tx_cores, pkt_size)
+ print "Running test with pkt size= " + str(pkt_size) + " Next hops = " + str(number_next_hops) + "; number of routes = " + str(number_routes) + "; Traffic = " + str(traffic) + " \n"
+ if (use_case == 0):
+ run_loss_graph(number_next_hops, number_routes, pkt_size, traffic)
+ else:
+ run_dicho_search(number_next_hops, number_routes, pkt_size, traffic)
+ sleep(3)
+
+#========================================================================
+def run_all_use_cases():
+ use_case_nb = 1
+ # Connect to dppd
+ file_path = '/tmp/prox.sock'
+ sock.connect(file_path)
+
+ f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n")
+ f.flush();
+ f_all.flush();
+ f_minimal.flush();
+
+ # Starting tests
+ print "Stopping all cores and resetting all values and randoms before starting\n"
+ sock.sendall("stop all")
+ sock.sendall("reset stats\n")
+ sleep(3);
+ for line in file_tests:
+ info = line.split(';')
+ if (info[0][0] == '#'):
+ continue
+ if (info[0][0] == ''):
+ break
+ use_case = int(info[0])
+ next_hops = int(info[1])
+ number_routes = int(info[2])
+ pkt_size = int(info[3])
+ traffic = int(info[4])
+ reload = int(info[5])
+ print str(use_case_nb) + " : Running use case " + str(use_case) + " next_hops = " + str(next_hops) + " routes = " + str(number_routes) + " pkt_size = " + str(pkt_size) + " traffic = " + str(traffic) + " reload = " + str(reload)
+ run_use_case(use_case, next_hops, number_routes, pkt_size, traffic, reload)
+ use_case_nb = use_case_nb + 1
+
+#========================================================================
+def configure_use_case(use_case):
+ Tests = []
+ if (use_case == 0):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition("0", "1", "1", pkt_size, "0", "1"))
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition("0", "1", "1", pkt_size, "1", "1"))
+ if (use_case == 1):
+ number_next_hops = 1
+ reload = 0
+
+ number_routes = number_next_hops # At least same number of routes that number of next hops
+ while number_routes <= max_number_routes:
+ reload = 1
+ for traffic in range(6):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload))
+ reload = 0
+ if (number_routes < max_number_routes / 2):
+ number_routes = number_routes * 4
+ else:
+ number_routes = number_routes * 2
+
+ number_routes = max_number_next_hops
+ while number_next_hops <= max_number_next_hops:
+ reload = 1
+ for traffic in range(6):
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload))
+ reload = 0
+ number_next_hops = number_next_hops * 2
+ if (use_case == 2):
+ number_next_hops = 1
+ reload = 1
+ for traffic in range(6):
+ nb_destinations = 1
+ while nb_destinations <= max_number_addresses_local_network:
+ for pkt_size in all_pkt_size:
+ Tests.append(TestDefinition(use_case, number_next_hops, nb_destinations, pkt_size, traffic, reload))
+ reload = 0
+ nb_destinations = nb_destinations * 2
+ reload = 1
+
+ file_tests = open('test_description.txt', 'w')
+ file_tests.write("# Use case; next_hops; routes; pkt_size; traffic; reload;\n")
+ for test in Tests:
+ file_tests.write(str(test.use_case) + "; " + str(test.next_hops) + "; " + str(test.number_routes) + "; " + str(test.pkt_size) + "; " + str(test.traffic) + "; " + str(test.reload) + ";\n")
+ file_tests.close()
+
+#========================================================================
+if ((configure == 0) and (run == 0)):
+ print "Nothing to do - please use -r 1 or -c 1"
+if (configure == 1):
+ configure_use_case(use_case)
+if (run == 1):
+ print "****************************************************************************************************************"
+ print "** Running vRouter Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **"
+ print "****************************************************************************************************************"
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ f_all = open('all_results.txt', 'w')
+ f = open('detailed_results.txt', 'w')
+ f_minimal = open('minimal_results.txt', 'w')
+ file_tests = open('test_description.txt', 'r')
+ run_all_use_cases()
+ f.close();
+ sock.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl b/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl
new file mode 100755
index 00000000..b8baa46b
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl
@@ -0,0 +1,90 @@
+#!/bin/env perl
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# This script creates four sets of files: 2 sets for use case 0 and 1
+# (which use the same configuration) and 2 for use case 2.
+# Each use case is defined by 2 sets of configuration files.
+# interface.txt contains the IP addresses of the DPDK fast path interfaces.
+# route.x.y.txt contains the routing table for different configurations
+# with x being number of routes and y number of next_hops.
+# Those interface.txt and route.x.y.txt files should then be converted
+# to fit the syntax of vRouter configuration files.
+
+use strict;
+my $max_nb_routes = 8192;
+my $max_nb_next_hops = 1024;
+my $max_nb_interfaces = 4;
+my $nb_next_hops = 1;
+my ($interface, $a1, $a2, $a3, $a4, $fh, $output_route);
+
+# Create interface configuration for use case 0 and 1
+my $interface_config = "interface.txt";
+open($fh, '>', $interface_config) or die "Could not open file '$interface_config' $!";
+print $fh "# interface IP address/prefix\n";
+for ($interface = 0; $interface < $max_nb_interfaces; $interface++) {
+ print $fh ($interface+64).".0.0.240/24\n";
+}
+close $fh;
+
+# Create interface configuration for use case 2
+my $interface_config = "interface_use_case_2.txt";
+open($fh, '>', $interface_config) or die "Could not open file '$interface_config' $!";
+print $fh "# interface IP address/prefix\n";
+for ($interface = 0; $interface < $max_nb_interfaces; $interface++) {
+ print $fh ($interface * 8 + 1).".0.0.240/5\n";
+}
+close $fh;
+
+# Create routes configuration for use case 0 and 1
+while ($nb_next_hops <= $max_nb_next_hops) {
+ my $nb_routes_per_interface = $nb_next_hops;
+ while ($nb_routes_per_interface <= $max_nb_routes) {
+ $output_route = "route.".$nb_routes_per_interface.".".$nb_next_hops.".txt";
+ open($fh, '>', $output_route) or die "Could not open file '$output_route' $!";
+ print $fh "# destination/prefix;nex-hop\n";
+
+ for (my $route_nb = 0; $route_nb < $nb_routes_per_interface; $route_nb++) {
+ for ($interface = 0; $interface < $max_nb_interfaces; $interface++) {
+ $a1 = $interface * 8 + 1 + (($route_nb & 1) << 2) + ($route_nb & 2);
+ $a2 = (($route_nb & 4) << 5) + (($route_nb & 8) << 1) + (($route_nb & 0x10) >> 1) + (($route_nb & 0x20) >> 4) + (($route_nb & 0x40) >> 6);
+ $a3 = (($route_nb & 0x80)) + (($route_nb & 0x100) >> 2) + (($route_nb & 0x200) >> 5) + (($route_nb & 0x400) >> 7) + (($route_nb & 0x800) >> 10) + (($route_nb & 0x1000) >> 12);
+ $a4 = 0;
+ print $fh $a1.".".$a2.".".$a3.".".$a4."/24;";
+ print $fh ($interface+64).".0.".(($route_nb % $nb_next_hops) >> 7).".".(1 + (($route_nb % $nb_next_hops) & 0x7f)) ."\n";
+ }
+ }
+ $nb_routes_per_interface = $nb_routes_per_interface * 2;
+ }
+ $nb_next_hops = $nb_next_hops * 2;
+}
+close $fh;
+
+# Create routes configuration for use case 2
+$output_route = "route.1.1.use_case_2.txt";
+open($fh, '>', $output_route) or die "Could not open file '$output_route' $!";
+print $fh "# destination/prefix;nex-hop\n";
+
+for ($interface = 0; $interface < $max_nb_interfaces; $interface++) {
+ $a1 = $interface + 64 ;
+ $a2 = 0;
+ $a3 = 0;
+ $a4 = 0;
+ print $fh $a1.".".$a2.".".$a3.".".$a4."/24;";
+ print $fh ($interface * 8 + 1).".0.0.1\n";
+}
+close $fh;
diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py
new file mode 100755
index 00000000..f00ab77b
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py
@@ -0,0 +1,57 @@
+#!/bin/env python
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import os
+import thread
+import time
+import socket
+
+def ssh(user, ip, cmd):
+ # print cmd;
+ ssh_options = ""
+ ssh_options += "-o StrictHostKeyChecking=no "
+ ssh_options += "-o UserKnownHostsFile=/dev/null "
+ ssh_options += "-o LogLevel=quiet "
+ running = os.popen("ssh " + ssh_options + " " + user + "@" + ip + " \"" + cmd + "\"");
+ ret = {};
+ ret['out'] = running.read().strip();
+ ret['ret'] = running.close();
+ if (ret['ret'] == None):
+ ret['ret'] = 0;
+
+ return ret;
+
+def ssh_check_quit(obj, user, ip, cmd):
+ ret = ssh(user, ip, cmd);
+ if (ret['ret'] != 0):
+ obj._err = True;
+ obj._err_str = ret['out'];
+ exit(-1);
+
+class remote_system:
+ def __init__(self, user, ip):
+ self._ip = ip;
+ self._user = user;
+ def run(self, cmd):
+ return ssh(self._user, self._ip, cmd);
+ def run_forked(self, cmd):
+ thread.start_new_thread(ssh, (self._user, self._ip, cmd));
+ return 0;
+ def scp(self, src, dst):
+ running = os.popen("scp " + self._user + "@" + self._ip + ":" + src + " " + dst);
+ return running.close();
diff --git a/VNFs/DPPD-PROX/helper-scripts/trailing.sh b/VNFs/DPPD-PROX/helper-scripts/trailing.sh
new file mode 100755
index 00000000..5b64b1d7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/trailing.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+bad_lines=$(grep -nHr -e "[[:space:]]$" *.c *.h gen/*.cfg config/*.cfg)
+
+if [ -n "$bad_lines" ]; then
+ echo "Found trailing white-spaces:"
+ echo $bad_lines
+ exit 1;
+fi
+
+for f in *.c *.h gen/*.cfg config/*.cfg; do
+ result=$(tail -n 1 $f | grep "^$" | wc -l)
+
+ if [ "$result" == "1" ]; then
+ echo "Trailing newlines at end of file $f"
+ exit 1
+ fi
+done;
+
+prev="dummy"
+function findDuplicate() {
+ line=1
+ while read p; do
+ if [ "$prev" == "" ]; then
+ if [ "$p" == "" ]; then
+ echo "duplicate empty line at $1:$line"
+ bad=1
+ fi
+ fi
+ prev=$p
+ let "line+=1"
+ done <$1
+}
+
+bad=0
+for f in *.c *.h; do
+ findDuplicate $f
+done;
+
+if [ "$bad" != "0" ]; then
+ exit 1
+fi
+
+tab=" "
+bad_lines=$(grep -nHr -e "^$tab$tab$tab$tab$tab$tab$tab" *.c *.h | head -n1)
+
+if [ -n "$bad_lines" ]; then
+ echo "Code nested too deep:"
+ echo $bad_lines
+ exit 1;
+fi
+
+exit 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/vm-cores.py b/VNFs/DPPD-PROX/helper-scripts/vm-cores.py
new file mode 100644
index 00000000..de794998
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/vm-cores.py
@@ -0,0 +1,20 @@
+#!/bin/env python2.7
+
+##
+## Copyright (c) 2010-2017 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+cores = [[0,20], [1,21], [2,22], [3,23], [4,24], [5,25], [6,26], [7,27], [8,28], [9,29]]
+