aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGwenael Lambrouin <gwenael.lambrouin@orange.com>2021-07-16 11:12:38 +0200
committerGwenael Lambrouin <gwenael.lambrouin@orange.com>2021-07-22 17:10:30 +0200
commitbc825ac3ee6f58791d47f9bfef288624624f700c (patch)
treefabb14540291b57f6a9cf2e5517d2b728167ecb7
parentc80ee07cd1aeb76420c42af1b87b365321ce6298 (diff)
behave_tests: change packet rate for latency non regression tests
Base the rate of the latency test on the latest characterization max throughput test instead of the latest non regression max throughput test. The goal is to use the same packet rate for all latency tests and to avoid variations of the latency result due to the variation of the max throughput results, ie to decouple max throughput and latency test results. This is achieved with a new "Given" behave phrase: Given packet rate equal to {percentage} of max throughput of last characterization This new phrase is now used by default in non-regression.feature, but it is still possible to use the previous behaviour with the phrase: Given <throughput> rate of previous scenario Change-Id: I15b5d7a68cd57c67d01d2119781f65114e6d41ce Signed-off-by: Gwenael Lambrouin <gwenael.lambrouin@orange.com>
-rw-r--r--behave_tests/features/non-regression.feature4
-rw-r--r--behave_tests/features/steps/steps.py90
-rw-r--r--behave_tests/features/steps/testapi.py186
3 files changed, 278 insertions, 2 deletions
diff --git a/behave_tests/features/non-regression.feature b/behave_tests/features/non-regression.feature
index 62daafa..1c21c47 100644
--- a/behave_tests/features/non-regression.feature
+++ b/behave_tests/features/non-regression.feature
@@ -27,14 +27,14 @@ Feature: non-regression
Given 10 sec run duration
And <frame_size> frame size
And 100k flow count
- And <throughput> rate of previous scenario
+ And packet rate equal to <percentage> of max throughput of last characterization
When NFVbench API is ready
Then run is started and waiting for result
And push result to database
And verify latency result is lower than 1000 microseconds
Examples: Frame sizes and throughput percentages
- | frame_size | throughput |
+ | frame_size | percentage |
| 64 | 70% |
| 64 | 90% |
| 768 | 70% |
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py
index 13a85e1..e7d9b2c 100644
--- a/behave_tests/features/steps/steps.py
+++ b/behave_tests/features/steps/steps.py
@@ -17,6 +17,7 @@
from behave import given
from behave import when
from behave import then
+from copy import deepcopy
from requests import RequestException
from retry import retry
import json
@@ -28,6 +29,9 @@ from typing import Optional
from nfvbench.summarizer import Formatter
from nfvbench.traffic_gen.traffic_utils import parse_rate_str
+from testapi import TestapiClient, nfvbench_input_to_str
+
+
STATUS_ERROR = "ERROR"
STATUS_OK = "OK"
@@ -130,6 +134,76 @@ def add_percentage_rate(context, percentage_rate):
context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}")
+@given('packet rate equal to {percentage} of max throughput of last characterization')
+def add_packet_rate(context, percentage: str):
+ """Update nfvbench run config with packet rate based on reference value.
+
+ For the already configured frame size and flow count, retrieve the max
+ throughput obtained during the latest successful characterization run. Then
+ retain `percentage` of this value for the packet rate and update `context`.
+
+ Args:
+ context: The context data of the current scenario run. It includes the
+ testapi endpoints to retrieve the reference values.
+
+ percentage: String representation of the percentage of the reference max
+ throughput. Example: "70%"
+
+ Updates context:
+ context.percentage_rate: percentage of reference max throughput
+ using a string representation. Example: "70%"
+
+ context.json['rate']: packet rate in packets per second using a string
+ representation. Example: "2000pps"
+
+ Raises:
+ ValueError: invalid percentage string
+
+ AssertionError: cannot find reference throughput value
+
+ """
+ # Validate percentage
+ if not percentage.endswith('%'):
+ raise ValueError('Invalid percentage string: "{0}"'.format(percentage))
+ percentage_float = convert_percentage_str_to_float(percentage)
+
+ # Retrieve nfvbench results report from testapi for:
+ # - the latest throughput scenario inside a characterization feature that passed
+ # - the test duration, frame size and flow count given in context.json
+ # - (optionally) the user_label and flavor_type given in context.json
+ # - the 'ndr' rate
+ testapi_params = {"project_name": context.data['PROJECT_NAME'],
+ "case_name": "characterization"}
+ nfvbench_test_conditions = deepcopy(context.json)
+ nfvbench_test_conditions['rate'] = 'ndr'
+ testapi_client = TestapiClient(testapi_url=context.data['TEST_DB_URL'],
+ logger=context.logger)
+ last_result = testapi_client.find_last_result(testapi_params,
+ scenario_tag="throughput",
+ nfvbench_test_input=nfvbench_test_conditions)
+ if last_result is None:
+ error_msg = "No characterization result found for scenario_tag=throughput"
+ error_msg += " and nfvbench test conditions "
+ error_msg += nfvbench_input_to_str(nfvbench_test_conditions)
+ raise AssertionError(error_msg)
+
+ # From the results report, extract the max throughput in packets per second
+ total_tx_rate = extract_value(last_result["output"], "total_tx_rate")
+ context.logger.info("add_packet_rate: max throughput of last characterization (pps): "
+ f"{total_tx_rate:,}")
+
+ # Compute the desired packet rate
+ rate = round(total_tx_rate * percentage_float)
+ context.logger.info(f"add_packet_rate: percentage={percentage} rate(pps)={rate:,}")
+
+ # Build rate string using a representation understood by nfvbench
+ rate_str = str(rate) + "pps"
+
+ # Update context
+ context.percentage_rate = percentage
+ context.json['rate'] = rate_str
+
+
"""When steps."""
@@ -470,6 +544,22 @@ def latency_comparison(context, old_latency=None, threshold=None, reference_valu
def get_result_from_input_values(input, result):
+ """Check test conditions in scenario results input.
+
+ Check whether the input parameters of a behave scenario results record from
+ testapi match the input parameters of the latest test. In other words,
+ check that the test results from testapi come from a test done under the
+ same conditions (frame size, flow count, rate, ...)
+
+ Args:
+ input: input dict of a results dict of a behave scenario from testapi
+
+ result: dict of nfvbench params used during the last test
+
+ Returns:
+ True if test conditions match, else False.
+
+ """
# Select required keys (other keys can be not set or unconsistent between scenarios)
required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
if 'user_label' in result:
diff --git a/behave_tests/features/steps/testapi.py b/behave_tests/features/steps/testapi.py
new file mode 100644
index 0000000..67e5104
--- /dev/null
+++ b/behave_tests/features/steps/testapi.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+# Copyright 2021 Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import json
+import requests
+
+
+class TestapiClient:
+ def __init__(self, testapi_url: str, logger):
+ """
+ Args:
+ testapi_url: testapi URL as a string, for instance
+ "http://172.20.73.203:8000/api/v1/results"
+
+ logger: reference to behave_tests logger.
+
+ """
+ self._base_url = testapi_url
+ self._logger = logger
+
+ def find_last_result(self, testapi_params, scenario_tag: str, nfvbench_test_input):
+ """Search testapi database and return latest result matching filters.
+
+ Look for the most recent testapi result matching testapi params, behave
+ scenario tag and nfvbench test input params, and return that result as a
+ dictionary.
+
+ Args:
+ testapi_params: dict holding the parameters of the testapi request. See
+ `build_testapi_url()` for the list of supported keys.
+
+ scenario_tag: Behave scenario tag to filter results. One of
+ "throughput" or "latency".
+
+ nfvbench_test_input: dict holding nfvbench test parameters and used
+ to filter the testapi results. The following keys are currently
+ supported:
+ - mandatory keys: 'duration_sec', 'frame_sizes', 'flow_count', 'rate'
+ - optional keys: 'user_label', 'flavor_type'
+
+ Returns:
+ None if no result matching the filters can be found, else a dictionary
+ built from testapi JSON test result.
+
+ """
+ self._logger.info(f"find_last_result: filter on scenario tag: {scenario_tag}")
+ nfvbench_input_str = nfvbench_input_to_str(nfvbench_test_input)
+ self._logger.info(f"find_last_result: filter on test conditions: {nfvbench_input_str}")
+
+ page = 1
+ while True: # While there are results pages to read
+ url = self._build_testapi_url(testapi_params, page)
+ self._logger.info("find_last_result: GET " + url)
+ last_results = self._do_testapi_request(url)
+
+ for result in last_results["results"]:
+ for tagged_result in result["details"]["results"][scenario_tag]:
+ if tagged_result["output"]["status"] != "OK":
+ # Drop result if nfvbench status is not OK
+ # (such result should not have been put in database by behave_tests,
+ # but let's be cautious)
+ continue
+ if equal_test_conditions(tagged_result["input"], nfvbench_test_input):
+ return tagged_result
+
+ if page >= last_results["pagination"]["total_pages"]:
+ break
+ page += 1
+
+ return None
+
+ def _build_testapi_url(self, testapi_params, page=1):
+ """Build URL for testapi request.
+
+ Build a URL for a testapi HTTP GET request using the provided parameters and
+ limiting the results to the tests whose criteria equals "PASS".
+
+ Args:
+ testapi_params: dictionary holding the parameters of the testapi
+ request:
+ - mandatory keys: "project_name", "case_name"
+ - optional keys: "installer", "pod_name"
+ - ignored keys: "build_tag", "scenario", "version", "criteria".
+
+ page: (Optional) number of the results page to get.
+
+ """
+ url = self._base_url
+ url += f"?project={testapi_params['project_name']}"
+ url += f"&case={testapi_params['case_name']}"
+
+ if "installer" in testapi_params.keys():
+ url += f"&installer={testapi_params['installer']}"
+ if "pod_name" in testapi_params.keys():
+ url += f"&pod={testapi_params['pod_name']}"
+
+ url += '&criteria=PASS'
+ url += f"&page={page}"
+
+ return url
+
+ def _do_testapi_request(self, testapi_url):
+ """Perform HTTP GET request on testapi.
+
+ Perform an HTTP GET request on testapi, check status code and return JSON
+ results as dictionary.
+
+ Args: testapi_url: a complete URL to request testapi results (with base
+ endpoint and parameters)
+
+ Returns:
+ The JSON document from testapi as a Python dictionary
+
+ Raises:
+
+ """
+ response = requests.get(testapi_url)
+ assert response.status_code == 200 # TODO: better error message
+ results = json.loads(response.text)
+ return results
+
+
+def equal_test_conditions(testapi_input, nfvbench_input):
+ """Check test conditions in behave scenario results record.
+
+ Check whether a behave scenario results record from testapi matches a given
+ nfvbench input, ie whether the record comes from a test done under the same
+ conditions (frame size, flow count, ...)
+
+ Args:
+ testapi_input: dict holding the test conditions of a behave scenario
+ results record from testapi
+
+ nfvbench_input: dict of nfvbench test parameters (reference)
+
+ The following dict keys are currently supported:
+ - mandatory keys: 'duration_sec', 'frame_sizes', 'flow_count', 'rate'
+ - optional keys: 'user_label', 'flavor_type'
+
+ Optional keys are taken into account only when they can be found in
+ `nfvbench_input`, else they are ignored.
+
+ Returns:
+ True if test conditions match, else False.
+
+ """
+ # Select required keys (other keys can be not set or unconsistent between scenarios)
+ required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate']
+ if 'user_label' in nfvbench_input:
+ required_keys.append('user_label')
+ if 'flavor_type' in nfvbench_input:
+ required_keys.append('flavor_type')
+
+ try:
+ testapi_subset = {k: testapi_input[k] for k in required_keys}
+ nfvbench_subset = {k: nfvbench_input[k] for k in required_keys}
+ return testapi_subset == nfvbench_subset
+ except KeyError:
+ # Fail the comparison if a required key is missing from one of the dicts
+ return False
+
+
+def nfvbench_input_to_str(nfvbench_input: dict) -> str:
+ """Build string showing nfvbench input parameters used for results search
+
+ Args:
+ nfvbench_input: dict of nfvbench test parameters
+ """
+ string = ""
+ for key in ['user_label', 'flavor_type', 'frame_sizes', 'flow_count', 'rate', 'duration_sec']:
+ if key in nfvbench_input:
+ string += f"{key}={nfvbench_input[key]} "
+ return string