aboutsummaryrefslogtreecommitdiffstats
path: root/behave_tests/features/steps/steps.py
diff options
context:
space:
mode:
Diffstat (limited to 'behave_tests/features/steps/steps.py')
-rw-r--r--behave_tests/features/steps/steps.py30
1 files changed, 30 insertions, 0 deletions
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py
index 8798280..965b0c8 100644
--- a/behave_tests/features/steps/steps.py
+++ b/behave_tests/features/steps/steps.py
@@ -226,6 +226,36 @@ def get_latency_result_from_database(context, threshold='90%'):
if last_result:
compare_latency_values(context, last_result, threshold)
+
+@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds')
+def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float):
+ """Check latency result against a fixed threshold.
+
+ Check that the average latency measured during the current scenario run is
+ lower or equal to the provided fixed reference value.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ test results for that run.
+
+ max_avg_latency_usec: Reference value to be used as a threshold. This
+ is a maximum average latency expressed in microseconds.
+
+ Raises:
+ AssertionError: The latency result is strictly greater than the reference value.
+
+ """
+ # Get the just measured average latency (a float):
+ new_avg_latency_usec = context.synthesis['avg_delay_usec']
+
+ # Compare measured value to reference:
+ if new_avg_latency_usec > max_avg_latency_usec:
+ raise AssertionError("Average latency higher than max threshold: "
+ "{avg_latency} usec > {threshold} usec".format(
+ avg_latency=round(new_avg_latency_usec),
+ threshold=round(max_avg_latency_usec)))
+
+
@then(
'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput')
def compare_throughput_pps_result_with_range_values(context, min_reference_value,