diff options
Diffstat (limited to 'xtesting')
-rw-r--r-- | xtesting/ansible/host_vars/127.0.0.1 | 3 | ||||
-rw-r--r-- | xtesting/ansible/site.yml | 4 | ||||
-rw-r--r-- | xtesting/behaveframework.py | 123 | ||||
-rw-r--r-- | xtesting/testcases.yaml | 62 |
4 files changed, 190 insertions, 2 deletions
diff --git a/xtesting/ansible/host_vars/127.0.0.1 b/xtesting/ansible/host_vars/127.0.0.1 index 9e3f1b9..125032f 100644 --- a/xtesting/ansible/host_vars/127.0.0.1 +++ b/xtesting/ansible/host_vars/127.0.0.1 @@ -1,5 +1,8 @@ docker_args: env: {} + params: + net: host + privileged: true volumes: - /lib/modules/$(uname -r):/lib/modules/$(uname -r) - /usr/src/kernels:/usr/src/kernels -v /dev:/dev diff --git a/xtesting/ansible/site.yml b/xtesting/ansible/site.yml index 4643a32..37fa6c3 100644 --- a/xtesting/ansible/site.yml +++ b/xtesting/ansible/site.yml @@ -16,5 +16,7 @@ - container: nfvbench tests: - 10kpps-pvp-run + - characterization + - non-regression properties: - execution-type: SEQUENTIALLY + execution-type: SEQUENTIALLY
\ No newline at end of file diff --git a/xtesting/behaveframework.py b/xtesting/behaveframework.py new file mode 100644 index 0000000..651240d --- /dev/null +++ b/xtesting/behaveframework.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +# Copyright 2021 Orange +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Define classes required to run any Behave test suites.""" + +from __future__ import division + +import logging +import os +import time + +import json +import six + +from behave.__main__ import main as behave_main + +from xtesting.core import testcase + +__author__ = "Deepak Chandella <deepak.chandella@orange.com>" + + +class BehaveFramework(testcase.TestCase): + """BehaveFramework runner.""" + # pylint: disable=too-many-instance-attributes + + __logger = logging.getLogger(__name__) + dir_results = "/var/lib/xtesting/results" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.json_file = os.path.join(self.res_dir, 'output.json') + self.total_tests = 0 + self.pass_tests = 0 + self.fail_tests = 0 + self.skip_tests = 0 + self.response = None + + def parse_results(self): + """Parse output.json and get the details in it.""" + with open(self.json_file) as stream_: + self.response = json.load(stream_) + if self.response: + self.total_tests = len(self.response) + for item in self.response: + if item['status'] == 'passed': + self.pass_tests += 1 + elif item['status'] == 'failed': + self.fail_tests += 1 + elif item['status'] == 'skipped': + self.skip_tests += 1 + self.result = 100 * ( + self.pass_tests / self.total_tests) + self.details = {} + self.details['total_tests'] = self.total_tests + self.details['pass_tests'] = self.pass_tests + self.details['fail_tests'] = self.fail_tests + self.details['skip_tests'] = self.skip_tests + self.details['tests'] = self.response + + def run(self, **kwargs): + """Run the BehaveFramework feature files + + Here are the steps: + * create the output directories if required, + * run behave features with parameters + * get the results in output.json, + + Args: + kwargs: Arbitrary keyword arguments. + + Returns: + EX_OK if all suites ran well. + EX_RUN_ERROR otherwise. + """ + try: + suites = kwargs["suites"] + tags = kwargs.get("tags", []) + console = kwargs["console"] if "console" in kwargs else False + except KeyError: + self.__logger.exception("Mandatory args were not passed") + return self.EX_RUN_ERROR + if not os.path.exists(self.res_dir): + try: + os.makedirs(self.res_dir) + except Exception: # pylint: disable=broad-except + self.__logger.exception("Cannot create %s", self.res_dir) + return self.EX_RUN_ERROR + config = ['--tags=' + ','.join(tags), + '--junit', '--junit-directory={}'.format(self.res_dir), + '--format=json', '--outfile={}'.format(self.json_file)] + if six.PY3: + html_file = os.path.join(self.res_dir, 'output.html') + config += ['--format=behave_html_formatter:HTMLFormatter', + '--outfile={}'.format(html_file)] + if console: + config += ['--format=pretty', + '--outfile=-'] + for feature in suites: + config.append(feature) + self.start_time = time.time() + behave_main(config) + self.stop_time = time.time() + + try: + self.parse_results() + self.__logger.info("Results were successfully parsed") + except Exception: # pylint: disable=broad-except + self.__logger.exception("Cannot parse results") + return self.EX_RUN_ERROR + return self.EX_OK diff --git a/xtesting/testcases.yaml b/xtesting/testcases.yaml index cbb5c45..eea60b9 100644 --- a/xtesting/testcases.yaml +++ b/xtesting/testcases.yaml @@ -6,6 +6,65 @@ tiers: description: 'Data Plane Performance Testing' testcases: - + case_name: characterization + project_name: nfvbench + criteria: 100 + blocking: true + clean_flag: false + description: '' + run: + name: 'nfvbench_behaveframework' + args: + suites: + - /opt/nfvbench/behave_tests/features/characterization-full.feature + tags: + - characterization + console: + - true + - + case_name: non-regression + project_name: nfvbench + criteria: 100 + blocking: true + clean_flag: false + description: '' + run: + name: 'nfvbench_behaveframework' + args: + suites: + - /opt/nfvbench/behave_tests/features/non-regression.feature + tags: + - non-regression + console: + - true + + - + name: nfvbench-rapid-characterization + order: 2 + description: 'Data Plane Performance Testing' + testcases: + - + case_name: rapid-characterization + project_name: nfvbench + criteria: 100 + blocking: true + clean_flag: false + description: '' + run: + name: 'nfvbench_behaveframework' + args: + suites: + - /opt/nfvbench/behave_tests/features/characterization-samples.feature + tags: + - characterization + console: + - true + - + name: nfvbench-demo + order: 3 + description: 'Data Plane Performance Testing' + testcases: + - case_name: 10kpps-pvp-run project_name: nfvbench criteria: 100 @@ -17,4 +76,5 @@ tiers: args: cmd: - nfvbench -c /tmp/nfvbench/nfvbench.cfg --rate 10kpps - + console: + - true
\ No newline at end of file |