diff options
Diffstat (limited to 'behave_tests/features/steps')
-rw-r--r-- | behave_tests/features/steps/steps.py | 90 | ||||
-rw-r--r-- | behave_tests/features/steps/testapi.py | 186 |
2 files changed, 276 insertions, 0 deletions
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py index 13a85e1..e7d9b2c 100644 --- a/behave_tests/features/steps/steps.py +++ b/behave_tests/features/steps/steps.py @@ -17,6 +17,7 @@ from behave import given from behave import when from behave import then +from copy import deepcopy from requests import RequestException from retry import retry import json @@ -28,6 +29,9 @@ from typing import Optional from nfvbench.summarizer import Formatter from nfvbench.traffic_gen.traffic_utils import parse_rate_str +from testapi import TestapiClient, nfvbench_input_to_str + + STATUS_ERROR = "ERROR" STATUS_OK = "OK" @@ -130,6 +134,76 @@ def add_percentage_rate(context, percentage_rate): context.logger.info(f"add_percentage_rate: {percentage_rate} => rate={rate}") +@given('packet rate equal to {percentage} of max throughput of last characterization') +def add_packet_rate(context, percentage: str): + """Update nfvbench run config with packet rate based on reference value. + + For the already configured frame size and flow count, retrieve the max + throughput obtained during the latest successful characterization run. Then + retain `percentage` of this value for the packet rate and update `context`. + + Args: + context: The context data of the current scenario run. It includes the + testapi endpoints to retrieve the reference values. + + percentage: String representation of the percentage of the reference max + throughput. Example: "70%" + + Updates context: + context.percentage_rate: percentage of reference max throughput + using a string representation. Example: "70%" + + context.json['rate']: packet rate in packets per second using a string + representation. Example: "2000pps" + + Raises: + ValueError: invalid percentage string + + AssertionError: cannot find reference throughput value + + """ + # Validate percentage + if not percentage.endswith('%'): + raise ValueError('Invalid percentage string: "{0}"'.format(percentage)) + percentage_float = convert_percentage_str_to_float(percentage) + + # Retrieve nfvbench results report from testapi for: + # - the latest throughput scenario inside a characterization feature that passed + # - the test duration, frame size and flow count given in context.json + # - (optionally) the user_label and flavor_type given in context.json + # - the 'ndr' rate + testapi_params = {"project_name": context.data['PROJECT_NAME'], + "case_name": "characterization"} + nfvbench_test_conditions = deepcopy(context.json) + nfvbench_test_conditions['rate'] = 'ndr' + testapi_client = TestapiClient(testapi_url=context.data['TEST_DB_URL'], + logger=context.logger) + last_result = testapi_client.find_last_result(testapi_params, + scenario_tag="throughput", + nfvbench_test_input=nfvbench_test_conditions) + if last_result is None: + error_msg = "No characterization result found for scenario_tag=throughput" + error_msg += " and nfvbench test conditions " + error_msg += nfvbench_input_to_str(nfvbench_test_conditions) + raise AssertionError(error_msg) + + # From the results report, extract the max throughput in packets per second + total_tx_rate = extract_value(last_result["output"], "total_tx_rate") + context.logger.info("add_packet_rate: max throughput of last characterization (pps): " + f"{total_tx_rate:,}") + + # Compute the desired packet rate + rate = round(total_tx_rate * percentage_float) + context.logger.info(f"add_packet_rate: percentage={percentage} rate(pps)={rate:,}") + + # Build rate string using a representation understood by nfvbench + rate_str = str(rate) + "pps" + + # Update context + context.percentage_rate = percentage + context.json['rate'] = rate_str + + """When steps.""" @@ -470,6 +544,22 @@ def latency_comparison(context, old_latency=None, threshold=None, reference_valu def get_result_from_input_values(input, result): + """Check test conditions in scenario results input. + + Check whether the input parameters of a behave scenario results record from + testapi match the input parameters of the latest test. In other words, + check that the test results from testapi come from a test done under the + same conditions (frame size, flow count, rate, ...) + + Args: + input: input dict of a results dict of a behave scenario from testapi + + result: dict of nfvbench params used during the last test + + Returns: + True if test conditions match, else False. + + """ # Select required keys (other keys can be not set or unconsistent between scenarios) required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate'] if 'user_label' in result: diff --git a/behave_tests/features/steps/testapi.py b/behave_tests/features/steps/testapi.py new file mode 100644 index 0000000..67e5104 --- /dev/null +++ b/behave_tests/features/steps/testapi.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# Copyright 2021 Orange +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import json +import requests + + +class TestapiClient: + def __init__(self, testapi_url: str, logger): + """ + Args: + testapi_url: testapi URL as a string, for instance + "http://172.20.73.203:8000/api/v1/results" + + logger: reference to behave_tests logger. + + """ + self._base_url = testapi_url + self._logger = logger + + def find_last_result(self, testapi_params, scenario_tag: str, nfvbench_test_input): + """Search testapi database and return latest result matching filters. + + Look for the most recent testapi result matching testapi params, behave + scenario tag and nfvbench test input params, and return that result as a + dictionary. + + Args: + testapi_params: dict holding the parameters of the testapi request. See + `build_testapi_url()` for the list of supported keys. + + scenario_tag: Behave scenario tag to filter results. One of + "throughput" or "latency". + + nfvbench_test_input: dict holding nfvbench test parameters and used + to filter the testapi results. The following keys are currently + supported: + - mandatory keys: 'duration_sec', 'frame_sizes', 'flow_count', 'rate' + - optional keys: 'user_label', 'flavor_type' + + Returns: + None if no result matching the filters can be found, else a dictionary + built from testapi JSON test result. + + """ + self._logger.info(f"find_last_result: filter on scenario tag: {scenario_tag}") + nfvbench_input_str = nfvbench_input_to_str(nfvbench_test_input) + self._logger.info(f"find_last_result: filter on test conditions: {nfvbench_input_str}") + + page = 1 + while True: # While there are results pages to read + url = self._build_testapi_url(testapi_params, page) + self._logger.info("find_last_result: GET " + url) + last_results = self._do_testapi_request(url) + + for result in last_results["results"]: + for tagged_result in result["details"]["results"][scenario_tag]: + if tagged_result["output"]["status"] != "OK": + # Drop result if nfvbench status is not OK + # (such result should not have been put in database by behave_tests, + # but let's be cautious) + continue + if equal_test_conditions(tagged_result["input"], nfvbench_test_input): + return tagged_result + + if page >= last_results["pagination"]["total_pages"]: + break + page += 1 + + return None + + def _build_testapi_url(self, testapi_params, page=1): + """Build URL for testapi request. + + Build a URL for a testapi HTTP GET request using the provided parameters and + limiting the results to the tests whose criteria equals "PASS". + + Args: + testapi_params: dictionary holding the parameters of the testapi + request: + - mandatory keys: "project_name", "case_name" + - optional keys: "installer", "pod_name" + - ignored keys: "build_tag", "scenario", "version", "criteria". + + page: (Optional) number of the results page to get. + + """ + url = self._base_url + url += f"?project={testapi_params['project_name']}" + url += f"&case={testapi_params['case_name']}" + + if "installer" in testapi_params.keys(): + url += f"&installer={testapi_params['installer']}" + if "pod_name" in testapi_params.keys(): + url += f"&pod={testapi_params['pod_name']}" + + url += '&criteria=PASS' + url += f"&page={page}" + + return url + + def _do_testapi_request(self, testapi_url): + """Perform HTTP GET request on testapi. + + Perform an HTTP GET request on testapi, check status code and return JSON + results as dictionary. + + Args: testapi_url: a complete URL to request testapi results (with base + endpoint and parameters) + + Returns: + The JSON document from testapi as a Python dictionary + + Raises: + + """ + response = requests.get(testapi_url) + assert response.status_code == 200 # TODO: better error message + results = json.loads(response.text) + return results + + +def equal_test_conditions(testapi_input, nfvbench_input): + """Check test conditions in behave scenario results record. + + Check whether a behave scenario results record from testapi matches a given + nfvbench input, ie whether the record comes from a test done under the same + conditions (frame size, flow count, ...) + + Args: + testapi_input: dict holding the test conditions of a behave scenario + results record from testapi + + nfvbench_input: dict of nfvbench test parameters (reference) + + The following dict keys are currently supported: + - mandatory keys: 'duration_sec', 'frame_sizes', 'flow_count', 'rate' + - optional keys: 'user_label', 'flavor_type' + + Optional keys are taken into account only when they can be found in + `nfvbench_input`, else they are ignored. + + Returns: + True if test conditions match, else False. + + """ + # Select required keys (other keys can be not set or unconsistent between scenarios) + required_keys = ['duration_sec', 'frame_sizes', 'flow_count', 'rate'] + if 'user_label' in nfvbench_input: + required_keys.append('user_label') + if 'flavor_type' in nfvbench_input: + required_keys.append('flavor_type') + + try: + testapi_subset = {k: testapi_input[k] for k in required_keys} + nfvbench_subset = {k: nfvbench_input[k] for k in required_keys} + return testapi_subset == nfvbench_subset + except KeyError: + # Fail the comparison if a required key is missing from one of the dicts + return False + + +def nfvbench_input_to_str(nfvbench_input: dict) -> str: + """Build string showing nfvbench input parameters used for results search + + Args: + nfvbench_input: dict of nfvbench test parameters + """ + string = "" + for key in ['user_label', 'flavor_type', 'frame_sizes', 'flow_count', 'rate', 'duration_sec']: + if key in nfvbench_input: + string += f"{key}={nfvbench_input[key]} " + return string |