summaryrefslogtreecommitdiffstats
path: root/VNFs
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs')
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/increment_till_fail.test68
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py27
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py52
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py31
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py47
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py25
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py87
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py14
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py8
12 files changed, 373 insertions, 134 deletions
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
index 4644a028..8d627e5e 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
@@ -32,8 +32,6 @@ class RapidStackManager(object):
options = config.options(section)
for option in options:
rapid_stack_params[option] = config.get(section, option)
- if 'push_gateway' not in rapid_stack_params.keys():
- rapid_stack_params['push_gateway'] = None
return (rapid_stack_params)
@staticmethod
@@ -44,10 +42,9 @@ class RapidStackManager(object):
heat_param = rapid_stack_params['heat_param']
keypair_name = rapid_stack_params['keypair_name']
user = rapid_stack_params['user']
- push_gateway = rapid_stack_params['push_gateway']
deployment = StackDeployment(cloud_name)
deployment.deploy(stack_name, keypair_name, heat_template, heat_param)
- deployment.generate_env_file(user, push_gateway)
+ deployment.generate_env_file(user)
def main():
rapid_stack_params = {}
@@ -60,7 +57,6 @@ def main():
#heat_param = 'params_rapid.yaml'
#keypair_name = 'prox_key'
#user = 'centos'
- #push_gateway = None
RapidStackManager.deploy_stack(rapid_stack_params)
if __name__ == "__main__":
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
new file mode 100644
index 00000000..a6e5b0c2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
@@ -0,0 +1,64 @@
+;Format: PushGateway
+;Format: Xtesting
+;URL:
+ part1: http://192.168.36.61:9091/metrics/job/
+ part2: test
+ part3: /instance/
+ part4: environment_file
+;rapid_flowsizetest:
+ Flows: Flows
+ Size: Size
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: CoreGenerated
+ SentByNIC: SentByNIC
+ FwdBySUT: FwdBySUT
+ RevByCore: RevByCore
+ AvgLatency: AvgLatency
+ PCTLatency: PCTLatency
+ MaxLatency: MaxLatency
+ PacketsSent: PacketsSent
+ PacketsReceived: PacketsReceived
+ PacketsLost: PacketsLost
+rapid_flowsizetest:
+ project_name: "vsperf"
+ scenario: "vsperf"
+ start_date: start_date
+ stop_date: stop_date
+ case_name: test
+ pod_name: "intel-pod10"
+ installer: "Fuel"
+ version: "1.0"
+ build_tag: "none"
+ criteria: "PASS"
+ details:
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: CoreGenerated
+ SentByNIC: SentByNIC
+ FwdBySUT: FwdBySUT
+ RevByCore: RevByCore
+ Latency (usec):
+ AvgLatency: AvgLatency
+ PCTLatency: PCTLatency
+ MaxLatency: MaxLatency
+ Absolute Packet Count:
+ PacketsSent: PacketsSent
+ PacketsReceived: PacketsReceived
+ PacketsLost: PacketsLost
+rapid_irqtest:
+ Core: Core
+ LessThan1us : B1
+ LessThan5us : B5
+ LessThan10us : B10
+ LessThan50us : B50
+ LessThan100us : B100
+ LessThan500us : B500
+ LessThan1ms : B1000
+ LessThan5ms : B5000
+ LessThan10ms : B10000
+ LessThan50ms : B50000
+ LessThan100ms : B100000
+ LessThan500ms : B500000
+ MoreThan500ms : BM500000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/increment_till_fail.test b/VNFs/DPPD-PROX/helper-scripts/rapid/increment_till_fail.test
new file mode 100644
index 00000000..29e36bfd
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/increment_till_fail.test
@@ -0,0 +1,68 @@
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+[TestParameters]
+name = IncrementTillFailTesting
+number_of_tests = 2
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = gen.cfg
+dest_vm = 2
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = swap.cfg
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=warmuptest
+flowsize=512
+imix=[64]
+warmupspeed=1
+warmuptime=2
+
+[test2]
+test=increment_till_fail
+# Following parameter defines the success criterium for the test.
+# When this test uses multiple combinations of packet size and flows,
+# all combinations must be meeting the same threshold
+# The threshold is expressed in Mpps
+pass_threshold=0.1
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+step = 0.5
+startspeed = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
index dddd29c6..6b9fdcf3 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
@@ -27,14 +27,12 @@ class CoreStatsTest(RapidTest):
"""
Class to manage the corestatstesting
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file, machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
self.machines = machines
def run(self):
- # fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
RapidLog.info("+------------------------------------------------------------------------------------------------------------------+")
RapidLog.info("| Measuring core statistics on 1 or more PROX instances |")
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
@@ -73,15 +71,16 @@ class CoreStatsTest(RapidTest):
old_tsc[i] = new_tsc[i]
tot_drop[i] = tot_drop[i] + tx - rx
RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
- # writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file'] + str(i)
- DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ variables = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NonDPReceived': non_dp_rx,
+ 'NonDPSent': non_dp_tx,
+ 'Dropped': tot_drop[i]}
+ self.post_data('rapid_corestatstest', variables)
if machines_to_go == 0:
duration = duration - 1
machines_to_go = len (self.machines)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
index da53742e..c90630ef 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
@@ -16,10 +16,8 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
-
import sys
import time
-import requests
from math import ceil
from statistics import mean
from past.utils import old_div
@@ -32,9 +30,9 @@ class FlowSizeTest(RapidTest):
"""
Class to manage the flowsizetesting
"""
- def __init__(self, test_param, lat_percentile, runtime, pushgateway,
+ def __init__(self, test_param, lat_percentile, runtime, testname,
environment_file, gen_machine, sut_machine, background_machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.gen_machine = gen_machine
self.sut_machine = sut_machine
self.background_machines = background_machines
@@ -62,6 +60,8 @@ class FlowSizeTest(RapidTest):
def new_speed(self, speed,size,success):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (speed + self.test['step'])
elif 'TST009' in self.test.keys():
if success:
self.test['TST009_L'] = self.test['TST009_m'] + 1
@@ -79,6 +79,8 @@ class FlowSizeTest(RapidTest):
def get_start_speed_and_init(self, size):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (self.test['startspeed'])
elif 'TST009' in self.test.keys():
self.test['TST009_L'] = 0
self.test['TST009_R'] = self.test['TST009_n'] - 1
@@ -128,6 +130,7 @@ class FlowSizeTest(RapidTest):
self.set_background_flows(self.background_machines, flow_number)
endspeed = None
speed = self.get_start_speed_and_init(size)
+ self.record_start_time()
while True:
attempts += 1
endwarning = False
@@ -167,7 +170,7 @@ class FlowSizeTest(RapidTest):
if lat_warning or retry_warning:
endwarning = '| | {:177.177} |'.format(retry_warning + lat_warning)
success = True
- TestPassed = False # fixed rate testing cannot be True, it is just reported numbers every second
+ TestPassed = False # fixed rate testing cannot be True, it is just reporting numbers every second
speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
# The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
# The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
@@ -230,8 +233,12 @@ class FlowSizeTest(RapidTest):
success = False
RapidLog.debug(self.report_result(-attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_perc_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning)
speed = self.new_speed(speed, size, success)
- if self.resolution_achieved():
+ if self.test['test'] == 'increment_till_fail':
+ if not success:
+ break
+ elif self.resolution_achieved():
break
+ self.record_stop_time()
if endspeed is not None:
if TestPassed and (endpps_rx < self.test['pass_threshold']):
TestPassed = False
@@ -240,20 +247,27 @@ class FlowSizeTest(RapidTest):
if endwarning:
RapidLog.info (endwarning)
RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
- # writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file']
- if endabs_dropped == None:
- ead = 0
- else:
- ead = endabs_dropped
- DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(flow_number,size+4,self.get_pps(endspeed,size),endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_max,endabs_tx,endabs_rx,ead,ead)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ if self.test['test'] != 'fixed_rate':
+ variables = {'test': self.test['testname'],
+ 'environment_file': self.test['environment_file'],
+ 'start_date': self.start,
+ 'stop_date': self.stop,
+ 'Flows': flow_number,
+ 'Size': size,
+ 'RequestedSpeed': RapidTest.get_pps(speed,size),
+ 'CoreGenerated': endpps_req_tx,
+ 'SentByNIC': endpps_tx,
+ 'FwdBySUT': endpps_sut_tx,
+ 'RevByCore': endpps_rx,
+ 'AvgLatency': endlat_avg,
+ 'PCTLatency': endlat_perc,
+ 'MaxLatency': endlat_max,
+ 'PacketsSent': endabs_tx,
+ 'PacketsReceived': endabs_rx,
+ 'PacketsLost': abs_dropped}
+ self.post_data('rapid_flowsizetest', variables)
else:
RapidLog.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
self.gen_machine.stop_latency_cores()
return (TestPassed)
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
index 82067295..0f925552 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
@@ -28,17 +28,14 @@ class ImpairTest(RapidTest):
"""
Class to manage the impair testing
"""
- def __init__(self, test_param, lat_percentile, runtime, pushgateway,
+ def __init__(self, test_param, lat_percentile, runtime, testname,
environment_file, gen_machine, sut_machine):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.gen_machine = gen_machine
self.sut_machine = sut_machine
self.test['lat_percentile'] = lat_percentile
def run(self):
- # fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
imix = self.test['imix']
size = mean (imix)
flow_number = self.test['flowsize']
@@ -68,14 +65,20 @@ class ImpairTest(RapidTest):
else:
lat_warning = ''
RapidLog.info(self.report_result(attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration))
-# writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test'] + '/instance/' + self.test['environment_file']
- DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(flow_number,size+4,self.get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,abs_dropped,drop_rate)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ variables = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'Flows': flow_number,
+ 'Size': size,
+ 'RequestedSpeed': RapidTest.get_pps(speed,size),
+ 'CoreGenerated': pps_req_tx,
+ 'SentByNIC': pps_tx,
+ 'FwdBySUT': pps_sut_tx,
+ 'RevByCore': pps_rx,
+ 'AvgLatency': lat_avg,
+ 'PCTLatency': lat_perc,
+ 'MaxLatency': lat_max,
+ 'PacketsLost': abs_dropped,
+ 'DropRate': drop_rate}
+ self.post_data('rapid_impairtest', variables)
self.gen_machine.stop_latency_cores()
return (True)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
index feabe656..3b3ef949 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
@@ -28,9 +28,9 @@ class IrqTest(RapidTest):
"""
Class to manage the irq testing
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file,
+ def __init__(self, test_param, runtime, testname, environment_file,
machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.machines = machines
def run(self):
@@ -45,41 +45,53 @@ class IrqTest(RapidTest):
for machine in self.machines:
buckets=machine.socket.show_irq_buckets(1)
print('Measurement ongoing ... ',end='\r')
- machine.stop()
- old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)]
- irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)]
- irq[0][0] = 'bucket us'
- for j,bucket in enumerate(buckets,start=1):
- irq[0][j] = '<'+ bucket
- irq[0][-1] = '>'+ buckets [-2]
- machine.start()
- time.sleep(2)
- for j,bucket in enumerate(buckets,start=1):
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- old_irq[i][j] = machine.socket.irq_stats(irqcore,j-1)
+ machine.start() # PROX cores will be started within 0 to 1 seconds
+ # That is why we sleep a bit over 1 second to make sure all cores
+ # are started
+ time.sleep(1.2)
+ old_irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ column_names = []
+ for bucket in buckets:
+ column_names.append('<{}'.format(bucket))
+ column_names[-1] = '>{}'.format(buckets[-2])
+ for j,bucket in enumerate(buckets):
+ for i,irqcore in enumerate(machine.get_cores()):
+ old_irq[i][j] = machine.socket.irq_stats(irqcore,j)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might results in the same data or data from the next 1s window
time.sleep(float(self.test['runtime']))
- machine.stop()
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- irq[i][0]='core %s'%irqcore
- for j,bucket in enumerate(buckets,start=1):
- diff = machine.socket.irq_stats(irqcore,j-1) - old_irq[i][j]
+ row_names = []
+ for i,irqcore in enumerate(machine.get_cores()):
+ row_names.append(irqcore)
+ for j,bucket in enumerate(buckets):
+ diff = machine.socket.irq_stats(irqcore,j) - old_irq[i][j]
if diff == 0:
irq[i][j] = '0'
else:
irq[i][j] = str(round(old_div(diff,float(self.test['runtime'])), 2))
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might results in the same data or data from the next 1s window
+ # Conclusion: we don't know the exact window size.
+ # Real measurement windows might be wrong by 1 second
+ # This could be fixed in this script by checking this data every
+ # 0.5 seconds Not implemented since we can also run this test for
+ # a longer time and decrease the error. The absolute number of
+ # interrupts is not so important.
+ machine.stop()
RapidLog.info('Results for PROX instance %s'%machine.name)
- for row in irq:
- RapidLog.info(''.join(['{:>12}'.format(item) for item in row]))
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file']
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- #DATA = 'Machine {}\n'.format(machine.name)
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- DATA = '{}\n'.format(irq[i][0])
- for j,bucket in enumerate(buckets,start=1):
- DATA = DATA + 'B{} {}\n'.format(irq[0][j].replace(">","M").replace("<","").replace(" ",""),irq[i][j])
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ RapidLog.info('{:>12}'.format('bucket us') + ''.join(['{:>12}'.format(item) for item in column_names]))
+ for j, row in enumerate(irq):
+ RapidLog.info('Core {:>7}'.format(row_names[j]) + ''.join(['{:>12}'.format(item) for item in row]))
+ variables = {}
+ variables['test'] = self.test['test']
+ variables['environment_file'] = self.test['environment_file']
+ variables['Machine'] = machine.name
+ for i,irqcore in enumerate(machine.get_cores()):
+ variables['Core'] = '{}'.format(row_names[i])
+ for j,bucket in enumerate(buckets):
+ variables['B{}'.format(column_names[j].replace(">","M").replace("<","").replace(" ",""))] = irq[i][j]
+ self.post_data('rapid_irqtest', variables)
return (True)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
index df71811d..bdf27032 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
@@ -33,26 +33,25 @@ class RapidConfigParser(object):
def parse_config(test_params):
testconfig = configparser.RawConfigParser()
testconfig.read(test_params['test_file'])
- test_params['required_number_of_test_machines'] = int(testconfig.get('TestParameters', 'total_number_of_test_machines'))
- test_params['number_of_tests'] = int(testconfig.get('TestParameters', 'number_of_tests'))
+ test_params['required_number_of_test_machines'] = int(testconfig.get(
+ 'TestParameters', 'total_number_of_test_machines'))
+ test_params['number_of_tests'] = int(testconfig.get('TestParameters',
+ 'number_of_tests'))
test_params['TestName'] = testconfig.get('TestParameters', 'name')
if testconfig.has_option('TestParameters', 'lat_percentile'):
- test_params['lat_percentile'] = old_div(float(testconfig.get('TestParameters', 'lat_percentile')),100.0)
+ test_params['lat_percentile'] = old_div(float(
+ testconfig.get('TestParameters', 'lat_percentile')),100.0)
else:
test_params['lat_percentile'] = 0.99
- RapidLog.info('Latency percentile at {:.0f}%'.format(test_params['lat_percentile']*100))
+ RapidLog.info('Latency percentile at {:.0f}%'.format(
+ test_params['lat_percentile']*100))
config = configparser.RawConfigParser()
config.read(test_params['environment_file'])
test_params['vim_type'] = config.get('Varia', 'vim')
test_params['key'] = config.get('ssh', 'key')
test_params['user'] = config.get('ssh', 'user')
- test_params['total_number_of_machines'] = int(config.get('rapid', 'total_number_of_machines'))
- #if config.has_option('TestParameters', 'pushgateway'):
- if config.has_option('Varia', 'pushgateway'):
- test_params['pushgateway'] = config.get('Varia', 'pushgateway')
- RapidLog.info('Measurements will be pushed to %s'%test_params['pushgateway'])
- else:
- test_params['pushgateway'] = None
+ test_params['total_number_of_machines'] = int(config.get('rapid',
+ 'total_number_of_machines'))
tests = []
test = {}
for test_index in range(1, test_params['number_of_tests']+1):
@@ -61,11 +60,16 @@ class RapidConfigParser(object):
options = testconfig.options(section)
for option in options:
if option in ['imix','imixs','flows']:
- test[option] = ast.literal_eval(testconfig.get(section, option))
+ test[option] = ast.literal_eval(testconfig.get(section,
+ option))
# test[option] = [int(i) for i in test[option]]
- elif option in ['maxframespersecondallingress','stepsize','flowsize']:
+ elif option in ['maxframespersecondallingress','stepsize',
+ 'flowsize']:
test[option] = int(testconfig.get(section, option))
- elif option in ['startspeed','drop_rate_threshold','lat_avg_threshold','lat_perc_threshold','lat_max_threshold','accuracy','maxr','maxz','pass_threshold']:
+ elif option in ['startspeed', 'step', 'drop_rate_threshold',
+ 'lat_avg_threshold','lat_perc_threshold',
+ 'lat_max_threshold','accuracy','maxr','maxz',
+ 'pass_threshold']:
test[option] = float(testconfig.get(section, option))
else:
test[option] = testconfig.get(section, option)
@@ -75,7 +79,8 @@ class RapidConfigParser(object):
if 'drop_rate_threshold' not in test.keys():
test['drop_rate_threshold'] = 0
test_params['tests'] = tests
- if test_params['required_number_of_test_machines'] > test_params['total_number_of_machines']:
+ if test_params['required_number_of_test_machines'] > test_params[
+ 'total_number_of_machines']:
RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
machine_map = configparser.RawConfigParser()
@@ -84,14 +89,19 @@ class RapidConfigParser(object):
machine = {}
for test_machine in range(1, test_params['required_number_of_test_machines']+1):
machine.clear()
- if not(testconfig.has_option('TestM%d'%test_machine, 'prox_socket') and not testconfig.getboolean('TestM%d'%test_machine, 'prox_socket')):
+ if not(testconfig.has_option('TestM%d'%test_machine, 'prox_socket')
+ and not testconfig.getboolean('TestM%d'%test_machine,
+ 'prox_socket')):
section = 'TestM%d'%test_machine
options = testconfig.options(section)
for option in options:
if option in ['prox_socket','prox_launch_exit','monitor']:
machine[option] = testconfig.getboolean(section, option)
elif option in ['cores', 'gencores','latcores']:
- machine[option] = ast.literal_eval(testconfig.get(section, option))
+ machine[option] = ast.literal_eval(testconfig.get(
+ section, option))
+ elif option in ['bucket_size_exp']:
+ machine[option] = int(testconfig.get(section, option))
else:
machine[option] = testconfig.get(section, option)
for key in ['prox_socket','prox_launch_exit']:
@@ -99,7 +109,8 @@ class RapidConfigParser(object):
machine[key] = True
if 'monitor' not in machine.keys():
machine['monitor'] = True
- index = int(machine_map.get('TestM%d'%test_machine, 'machine_index'))
+ index = int(machine_map.get('TestM%d'%test_machine,
+ 'machine_index'))
section = 'M%d'%index
options = config.options(section)
for option in options:
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
index 6991e879..90bf5b28 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
@@ -27,15 +27,12 @@ class PortStatsTest(RapidTest):
"""
Class to manage the portstatstesting
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file,
+ def __init__(self, test_param, runtime, testname, environment_file,
machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.machines = machines
def run(self):
- # fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
RapidLog.info("+---------------------------------------------------------------------------+")
RapidLog.info("| Measuring port statistics on 1 or more PROX instances |")
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
@@ -69,15 +66,15 @@ class PortStatsTest(RapidTest):
old_errors[i] = new_errors[i]
old_tsc[i] = new_tsc[i]
RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
- # writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test'] + '/instance/' + self.test['environment_file'] + str(i)
- DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(i,duration,rx,tx,no_mbufs,errors)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ variables = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NoMbufs': no_mbufs,
+ 'iErrMiss': errors}
+ self.post_data('rapid_corestatstest', variables)
if machines_to_go == 0:
duration = duration - 1
machines_to_go = len (self.machines)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
index 0b0b2049..3be07c21 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
@@ -17,25 +17,31 @@
## limitations under the License.
##
+import yaml
+import requests
import time
+import copy
from past.utils import old_div
from rapid_log import RapidLog
from rapid_log import bcolors
inf = float("inf")
+from datetime import datetime as dt
class RapidTest(object):
"""
Class to manage the testing
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file ):
+ def __init__(self, test_param, runtime, testname, environment_file ):
self.test = test_param
self.test['runtime'] = runtime
- self.test['pushgateway'] = pushgateway
+ self.test['testname'] = testname
self.test['environment_file'] = environment_file
if 'maxr' not in self.test.keys():
self.test['maxr'] = 1
if 'maxz' not in self.test.keys():
self.test['maxz'] = inf
+ with open('format.yaml') as f:
+ self.data_format = yaml.load(f, Loader=yaml.FullLoader)
@staticmethod
def get_percentageof10Gbps(pps_speed,size):
@@ -91,6 +97,48 @@ class RapidTest(object):
machine.stop()
@staticmethod
+ def parse_data_format_dict(data_format, variables):
+ for k, v in data_format.items():
+ if type(v) is dict:
+ RapidTest.parse_data_format_dict(v, variables)
+ else:
+ if v in variables.keys():
+ data_format[k] = variables[v]
+
+ def record_start_time(self):
+ self.start = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+ def record_stop_time(self):
+ self.stop = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+ def post_data(self, test, variables):
+ var = copy.deepcopy(self.data_format)
+ self.parse_data_format_dict(var, variables)
+ if 'URL' not in var.keys():
+ return
+ if test not in var.keys():
+ return
+ URL=''
+ for value in var['URL'].values():
+ URL = URL + value
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
+ if 'Format' in var.keys():
+ if var['Format'] == 'PushGateway':
+ data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
+ response = requests.post(url=URL, data=data,headers=HEADERS)
+ elif var['Format'] == 'Xtesting':
+ data = var[test]
+ response = requests.post(url=URL, json=data)
+ else:
+ return
+ else:
+ return
+ if (response.status_code != 202) and (response.status_code != 200):
+ RapidLog.info('Cannot send metrics to {}'.format(URL))
+ RapidLog.info(data)
+
+
+ @staticmethod
def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
@@ -114,7 +162,8 @@ class RapidTest(object):
if pps_rx is None:
pps_rx_str = '{0: >25}'.format('NA |')
else:
- pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
+ pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
+ RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
if tot_drop is None:
tot_drop_str = ' | NA | '
else:
@@ -122,14 +171,25 @@ class RapidTest(object):
if lat_perc is None:
lat_perc_str = ' |{:^10.10}|'.format('NA')
elif lat_perc_max == True:
- lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
+ lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,
+ float(lat_perc), bcolors.ENDC)
else:
- lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
+ lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,
+ float(lat_perc), bcolors.ENDC)
if elapsed_time is None:
elapsed_time_str = ' NA |'
else:
elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
- return(flow_number_str + '{:>5.1f}'.format(speed) + '% '+speed_prefix +'{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|'+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str + pps_rx_str +lat_avg_prefix+ ' {:>6.0f}'.format(lat_avg)+' us'+lat_perc_str+lat_max_prefix+'{:>6.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) + ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) + tot_drop_str +drop_rate_prefix+ '{:>5.2f}'.format(old_div(float(tx-rx),tx)) +bcolors.ENDC+' |' + elapsed_time_str)
+ return(flow_number_str + '{:>5.1f}'.format(speed) + '% ' + speed_prefix
+ + '{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|' +
+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
+ pps_rx_str + lat_avg_prefix + ' {:>6.0f}'.format(lat_avg) +
+ ' us' + lat_perc_str +lat_max_prefix+'{:>6.0f}'.format(lat_max)
+ + ' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) +
+ ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) +
+ tot_drop_str +drop_rate_prefix +
+ '{:>5.2f}'.format(old_div(float(tx-rx),tx)) + bcolors.ENDC +
+ ' |' + elapsed_time_str)
def run_iteration(self, requested_duration, flow_number, size, speed):
BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
@@ -252,6 +312,21 @@ class RapidTest(object):
lat_avg_sample, sample_percentile, percentile_max,
lat_max_sample, delta_dp_tx, delta_dp_rx,
tot_dp_drop, single_core_measurement_duration))
+ variables = {
+ 'Flows': flow_number,
+ 'Size': size,
+ 'RequestedSpeed': self.get_pps(speed,size),
+ 'CoreGenerated': pps_req_tx,
+ 'SentByNIC': pps_tx,
+ 'FwdBySUT': pps_sut_tx,
+ 'RevByCore': pps_rx,
+ 'AvgLatency': lat_avg_sample,
+ 'PCTLatency': sample_percentile,
+ 'MaxLatency': lat_max_sample,
+ 'PacketsSent': delta_dp_tx,
+ 'PacketsReceived': delta_dp_rx,
+ 'PacketsLost': tot_dp_drop}
+ self.post_data('rapid_flowsizetest', variables)
#Stop generating
self.gen_machine.stop_gen_cores()
r += 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
index db4e969b..d3885bf7 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
@@ -86,27 +86,29 @@ class RapidTestManager(object):
for test_param in test_params['tests']:
RapidLog.info(test_param['test'])
if test_param['test'] in ['flowsizetest', 'TST009test',
- 'fixed_rate']:
+ 'fixed_rate', 'increment_till_fail']:
test = FlowSizeTest(test_param, test_params['lat_percentile'],
- test_params['runtime'], test_params['pushgateway'],
+ test_params['runtime'],
+ test_params['TestName'],
test_params['environment_file'], gen_machine,
sut_machine, background_machines)
elif test_param['test'] in ['corestats']:
test = CoreStatsTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
+ test_params['TestName'],
test_params['environment_file'], machines)
elif test_param['test'] in ['portstats']:
test = PortStatsTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
+ test_params['TestName'],
test_params['environment_file'], machines)
elif test_param['test'] in ['impairtest']:
test = ImpairTest(test_param, test_params['lat_percentile'],
- test_params['runtime'], test_params['pushgateway'],
+ test_params['runtime'],
+ test_params['TestName'],
test_params['environment_file'], gen_machine,
sut_machine)
elif test_param['test'] in ['irqtest']:
test = IrqTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
+ test_params['TestName'],
test_params['environment_file'], machines)
elif test_param['test'] in ['warmuptest']:
test = WarmupTest(test_param, gen_machine)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
index 525cff1a..2e9c6cc2 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
@@ -75,7 +75,7 @@ class StackDeployment(object):
for name in server_group_output:
self.names.append(name)
- def print_paramDict(self, user, push_gateway):
+ def print_paramDict(self, user):
if not(len(self.dp_ips) == len(self.dp_macs) == len(self.mngmt_ips)):
sys.exit()
_ENV_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -106,8 +106,6 @@ class StackDeployment(object):
env_file.write('[Varia]\n')
env_file.write('vim = OpenStack\n')
env_file.write('stack = {}\n'.format(self.stack.stack_name))
- if push_gateway:
- env_file.write('pushgateway = {}\n'.format(push_gateway))
def create_stack(self, stack_name, stack_file_path, param_file):
files, template = template_utils.process_template_path(stack_file_path)
@@ -158,6 +156,6 @@ class StackDeployment(object):
self.create_key()
self.stack = self.create_stack(stack_name, heat_template, heat_param)
- def generate_env_file(self, user = 'centos', push_gateway = None):
+ def generate_env_file(self, user = 'centos'):
self.generate_paramDict()
- self.print_paramDict(user, push_gateway)
+ self.print_paramDict(user)