aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--behave_tests/features/quick-test-10kpps.feature13
-rw-r--r--behave_tests/features/steps/steps.py2
-rw-r--r--docker/Dockerfile2
-rw-r--r--requirements.txt2
-rw-r--r--xtesting/behaveframework.py123
-rw-r--r--xtesting/testcases.yaml23
6 files changed, 37 insertions, 128 deletions
diff --git a/behave_tests/features/quick-test-10kpps.feature b/behave_tests/features/quick-test-10kpps.feature
new file mode 100644
index 0000000..d46000c
--- /dev/null
+++ b/behave_tests/features/quick-test-10kpps.feature
@@ -0,0 +1,13 @@
+@quick-test-10kpps
+Feature: quick-test-10kpps
+
+ @throughput
+ Scenario: Run a 10s test at 10kpps with 64-byte frames and 128 flows
+ Given 10 sec run duration
+ And TRex is restarted
+ And 64 frame size
+ And 128 flow count
+ And 10kpps rate
+ When NFVbench API is ready
+ Then 1 runs are started and waiting for maximum result
+ And push result to database
diff --git a/behave_tests/features/steps/steps.py b/behave_tests/features/steps/steps.py
index b20a9cc..f4dda58 100644
--- a/behave_tests/features/steps/steps.py
+++ b/behave_tests/features/steps/steps.py
@@ -284,7 +284,7 @@ def push_result_database(context):
"""Utils methods."""
-@retry(AssertionError, tries=10, delay=5.0, logger=None)
+@retry(AssertionError, tries=24, delay=5.0, logger=None)
def test_nfvbench_api(context):
try:
r = requests.get("http://{ip}:{port}/status".format(ip=context.host_ip, port=context.port))
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 4336261..e851c76 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -40,8 +40,6 @@ RUN apt-get update && apt-get install -y \
&& wget -O nfvbenchvm-$VM_IMAGE_VER.qcow2 http://artifacts.opnfv.org/nfvbench/images/nfvbenchvm_centos-$VM_IMAGE_VER.qcow2 \
# Override Xtesting testcases.yaml file by NFVbench default one
&& cp xtesting/testcases.yaml /usr/local/lib/python3.8/dist-packages/xtesting/ci/testcases.yaml \
- # Temporary override waiting for PR approval : https://gerrit.opnfv.org/gerrit/c/functest-xtesting/+/72431
- && cp xtesting/behaveframework.py /usr/local/lib/python3.8/dist-packages/xtesting/core/behaveframework.py \
&& python3 ./docker/cleanup_generators.py \
&& rm -rf /opt/nfvbench/.git \
# Symlink for retrocompatibility 4.x
diff --git a/requirements.txt b/requirements.txt
index 7c74119..a333380 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -22,7 +22,7 @@ tabulate>=0.7.5
flask>=0.12
fluent-logger>=0.5.3
netaddr>=0.7.19
-xtesting>=0.87.0
+xtesting>=0.92.0
hdrhistogram>=0.8.0
behave>=1.2.6
retry>=0.9.2 \ No newline at end of file
diff --git a/xtesting/behaveframework.py b/xtesting/behaveframework.py
deleted file mode 100644
index 651240d..0000000
--- a/xtesting/behaveframework.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2021 Orange
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-"""Define classes required to run any Behave test suites."""
-
-from __future__ import division
-
-import logging
-import os
-import time
-
-import json
-import six
-
-from behave.__main__ import main as behave_main
-
-from xtesting.core import testcase
-
-__author__ = "Deepak Chandella <deepak.chandella@orange.com>"
-
-
-class BehaveFramework(testcase.TestCase):
- """BehaveFramework runner."""
- # pylint: disable=too-many-instance-attributes
-
- __logger = logging.getLogger(__name__)
- dir_results = "/var/lib/xtesting/results"
-
- def __init__(self, **kwargs):
- super().__init__(**kwargs)
- self.json_file = os.path.join(self.res_dir, 'output.json')
- self.total_tests = 0
- self.pass_tests = 0
- self.fail_tests = 0
- self.skip_tests = 0
- self.response = None
-
- def parse_results(self):
- """Parse output.json and get the details in it."""
- with open(self.json_file) as stream_:
- self.response = json.load(stream_)
- if self.response:
- self.total_tests = len(self.response)
- for item in self.response:
- if item['status'] == 'passed':
- self.pass_tests += 1
- elif item['status'] == 'failed':
- self.fail_tests += 1
- elif item['status'] == 'skipped':
- self.skip_tests += 1
- self.result = 100 * (
- self.pass_tests / self.total_tests)
- self.details = {}
- self.details['total_tests'] = self.total_tests
- self.details['pass_tests'] = self.pass_tests
- self.details['fail_tests'] = self.fail_tests
- self.details['skip_tests'] = self.skip_tests
- self.details['tests'] = self.response
-
- def run(self, **kwargs):
- """Run the BehaveFramework feature files
-
- Here are the steps:
- * create the output directories if required,
- * run behave features with parameters
- * get the results in output.json,
-
- Args:
- kwargs: Arbitrary keyword arguments.
-
- Returns:
- EX_OK if all suites ran well.
- EX_RUN_ERROR otherwise.
- """
- try:
- suites = kwargs["suites"]
- tags = kwargs.get("tags", [])
- console = kwargs["console"] if "console" in kwargs else False
- except KeyError:
- self.__logger.exception("Mandatory args were not passed")
- return self.EX_RUN_ERROR
- if not os.path.exists(self.res_dir):
- try:
- os.makedirs(self.res_dir)
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot create %s", self.res_dir)
- return self.EX_RUN_ERROR
- config = ['--tags=' + ','.join(tags),
- '--junit', '--junit-directory={}'.format(self.res_dir),
- '--format=json', '--outfile={}'.format(self.json_file)]
- if six.PY3:
- html_file = os.path.join(self.res_dir, 'output.html')
- config += ['--format=behave_html_formatter:HTMLFormatter',
- '--outfile={}'.format(html_file)]
- if console:
- config += ['--format=pretty',
- '--outfile=-']
- for feature in suites:
- config.append(feature)
- self.start_time = time.time()
- behave_main(config)
- self.stop_time = time.time()
-
- try:
- self.parse_results()
- self.__logger.info("Results were successfully parsed")
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot parse results")
- return self.EX_RUN_ERROR
- return self.EX_OK
diff --git a/xtesting/testcases.yaml b/xtesting/testcases.yaml
index bb4a521..03579b7 100644
--- a/xtesting/testcases.yaml
+++ b/xtesting/testcases.yaml
@@ -60,8 +60,29 @@ tiers:
console:
- true
-
- name: nfvbench-demo
+ name: quick-test-10kpps
order: 3
+ description: 'Quick nfvbench test at low packet rate'
+ testcases:
+ -
+ case_name: quick-test-10kpps
+ project_name: nfvbench
+ criteria: 100
+ blocking: true
+ clean_flag: false
+ description: ''
+ run:
+ name: 'nfvbench_behaveframework'
+ args:
+ suites:
+ - /opt/nfvbench/behave_tests/features/quick-test-10kpps.feature
+ tags:
+ - quick-test-10kpps
+ console:
+ - true
+ -
+ name: nfvbench-demo
+ order: 4
description: 'Data Plane Performance Testing'
testcases:
-