diff options
-rw-r--r-- | behave_tests/features/non-regression.feature | 3 | ||||
-rw-r--r-- | behave_tests/features/steps/steps.py | 30 |
2 files changed, 31 insertions, 2 deletions
diff --git a/behave_tests/features/non-regression.feature b/behave_tests/features/non-regression.feature index 89c3c4d..62daafa 100644 --- a/behave_tests/features/non-regression.feature +++ b/behave_tests/features/non-regression.feature @@ -31,8 +31,7 @@ Feature: non-regression When NFVbench API is ready Then run is started and waiting for result And push result to database - And verify latency result is in same range as the previous result - And verify latency result is in same range as the characterization result + And verify latency result is lower than 1000 microseconds Examples: Frame sizes and throughput percentages | frame_size | throughput | diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py index 8798280..965b0c8 100644 --- a/behave_tests/features/steps/steps.py +++ b/behave_tests/features/steps/steps.py @@ -226,6 +226,36 @@ def get_latency_result_from_database(context, threshold='90%'): if last_result: compare_latency_values(context, last_result, threshold) + +@then('verify latency result is lower than {max_avg_latency_usec:g} microseconds') +def check_latency_result_against_fixed_threshold(context, max_avg_latency_usec: float): + """Check latency result against a fixed threshold. + + Check that the average latency measured during the current scenario run is + lower or equal to the provided fixed reference value. + + Args: + context: The context data of the current scenario run. It includes the + test results for that run. + + max_avg_latency_usec: Reference value to be used as a threshold. This + is a maximum average latency expressed in microseconds. + + Raises: + AssertionError: The latency result is strictly greater than the reference value. + + """ + # Get the just measured average latency (a float): + new_avg_latency_usec = context.synthesis['avg_delay_usec'] + + # Compare measured value to reference: + if new_avg_latency_usec > max_avg_latency_usec: + raise AssertionError("Average latency higher than max threshold: " + "{avg_latency} usec > {threshold} usec".format( + avg_latency=round(new_avg_latency_usec), + threshold=round(max_avg_latency_usec))) + + @then( 'verify result is in [{min_reference_value}pps, {max_reference_value}pps] range for throughput') def compare_throughput_pps_result_with_range_values(context, min_reference_value, |