aboutsummaryrefslogtreecommitdiffstats
path: root/behave_tests
diff options
context:
space:
mode:
authorGwenael Lambrouin <gwenael.lambrouin@orange.com>2021-06-28 18:10:41 +0200
committerGwenael Lambrouin <gwenael.lambrouin@orange.com>2021-07-22 16:37:55 +0200
commit93d8fc2cd18822136a4d848c38d934203e53c034 (patch)
treea589c971e6feec909cee7539bdf06b877916743f /behave_tests
parent6b8818d15c7f88706ba638df0e5320bc68572e19 (diff)
Compare the latency result with a fixed threshold of 1ms
Change-Id: I2b4ea4ee6e6442d4ceac268e7bf3c6bf9277ff54 Signed-off-by: Gwenael Lambrouin <gwenael.lambrouin@orange.com>
Diffstat (limited to 'behave_tests')
-rw-r--r--behave_tests/features/non-regression.feature3
-rw-r--r--behave_tests/features/steps/steps.py30
2 files changed, 31 insertions, 2 deletions
diff --git a/behave_tests/features/non-regression.feature b/behave_tests/features/non-regression.feature
index 89c3c4d..62daafa 100644
--- a/behave_tests/features/non-regression.feature
+++ b/behave_tests/features/non-regression.feature
@@ -31,8 +31,7 @@ Feature: non-regression
When NFVbench API is ready
Then run is started and waiting for result
And push result to database
- And verify latency result is in same range as the previous result
- And verify latency result is in same range as the characterization result
+ And verify latency result is lower than 1000 microseconds
Examples: Frame sizes and throughput percentages
| frame_size | throughput |
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py
index 8798280..965b0c8 100644
--- a/behave_tests/features/steps/steps.py
+++ b/behave_tests/features/steps/steps.py
@@ -226,6 +226,36 @@ def get_latency_result_from_database(context, threshold='90%'):
if last_result:
compare_latency_values(context, last_result, threshold)
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+ """Check latency result against a fixed threshold.
+
+ Check that the average latency measured during the current scenario run is
+ lower or equal to the provided fixed reference value.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ test results for that run.
+
+ max_avg_latency_usec: Reference value to be used as a threshold. This
+ is a maximum average latency expressed in microseconds.
+
+ Raises:
+ AssertionError: The latency result is strictly greater than the reference value.
+
+ """
+ # Get the just measured average latency (a float):
+ new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+ # Compare measured value to reference:
+ if new_avg_latency_usec > max_avg_latency_usec:
+ raise AssertionError("Average latency higher than max threshold: "
+ "{avg_latency} usec > {threshold} usec".format(
+ avg_latency=round(new_avg_latency_usec),
+ threshold=round(max_avg_latency_usec)))
+
+
@then(
'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
def compare_throughput_pps_result_with_range_values(context, min_reference_value,