summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xdocs/testing/user/userguide/01-introduction.rst2
-rwxr-xr-xdocs/testing/user/userguide/03-architecture.rst2
-rw-r--r--docs/testing/user/userguide/11-vtc-overview.rst128
-rw-r--r--docs/testing/user/userguide/15-list-of-tcs.rst3
-rw-r--r--docs/testing/user/userguide/glossary.rst3
-rw-r--r--docs/testing/user/userguide/index.rst1
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc074.rst72
-rw-r--r--docs/testing/user/userguide/references.rst1
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml18
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py92
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py54
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py1
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py16
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py15
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py3
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py3
-rw-r--r--yardstick/orchestrator/heat.py10
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py276
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py340
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py43
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py6
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py5
23 files changed, 841 insertions, 258 deletions
diff --git a/docs/testing/user/userguide/01-introduction.rst b/docs/testing/user/userguide/01-introduction.rst
index d846e759c..494b1ef3d 100755
--- a/docs/testing/user/userguide/01-introduction.rst
+++ b/docs/testing/user/userguide/01-introduction.rst
@@ -66,8 +66,6 @@ This document consists of the following chapters:
yardstick report CLI to view the test result in table format and also values
pinned on to a graph
-* Chapter :doc:`11-vtc-overview` provides information on the :term:`VTC`.
-
* Chapter :doc:`12-nsb-overview` describes the methodology implemented by the
Yardstick - Network service benchmarking to test real world usecase for a
given VNF.
diff --git a/docs/testing/user/userguide/03-architecture.rst b/docs/testing/user/userguide/03-architecture.rst
index 622002ee4..886631510 100755
--- a/docs/testing/user/userguide/03-architecture.rst
+++ b/docs/testing/user/userguide/03-architecture.rst
@@ -262,8 +262,6 @@ Yardstick Directory structure
*plugin/* - Plug-in configuration files are stored here.
-*vTC/* - Contains the files for running the virtual Traffic Classifier tests.
-
*yardstick/* - Contains the internals of Yardstick: Runners, Scenario, Contexts,
CLI parsing, keys, plotting tools, dispatcher, plugin
install/remove scripts and so on.
diff --git a/docs/testing/user/userguide/11-vtc-overview.rst b/docs/testing/user/userguide/11-vtc-overview.rst
deleted file mode 100644
index 47582358c..000000000
--- a/docs/testing/user/userguide/11-vtc-overview.rst
+++ /dev/null
@@ -1,128 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, National Center of Scientific Research "Demokritos" and others.
-
-==========================
-Virtual Traffic Classifier
-==========================
-
-Abstract
-========
-
-.. _TNOVA: http://www.t-nova.eu/
-.. _TNOVAresults: http://www.t-nova.eu/results/
-.. _Yardstick: https://wiki.opnfv.org/yardstick
-
-This chapter provides an overview of the virtual Traffic Classifier, a
-contribution to OPNFV Yardstick_ from the EU Project TNOVA_.
-Additional documentation is available in TNOVAresults_.
-
-Overview
-========
-
-The virtual Traffic Classifier (:term:`VTC`) :term:`VNF`, comprises of a
-Virtual Network Function Component (:term:`VNFC`). The :term:`VNFC` contains
-both the Traffic Inspection module, and the Traffic forwarding module, needed
-to run the :term:`VNF`. The exploitation of Deep Packet Inspection
-(:term:`DPI`) methods for traffic classification is built around two basic
-assumptions:
-
-* third parties unaffiliated with either source or recipient are able to
- inspect each IP packet's payload
-
-* the classifier knows the relevant syntax of each application's packet
- payloads (protocol signatures, data patterns, etc.).
-
-The proposed :term:`DPI` based approach will only use an indicative, small
-number of the initial packets from each flow in order to identify the content
-and not inspect each packet.
-
-In this respect it follows the Packet Based per Flow State (term:`PBFS`). This
-method uses a table to track each session based on the 5-tuples (src address,
-dest address, src port,dest port, transport protocol) that is maintained for
-each flow.
-
-Concepts
-========
-
-* *Traffic Inspection*: The process of packet analysis and application
- identification of network traffic that passes through the :term:`VTC`.
-
-* *Traffic Forwarding*: The process of packet forwarding from an incoming
- network interface to a pre-defined outgoing network interface.
-
-* *Traffic Rule Application*: The process of packet tagging, based on a
- predefined set of rules. Packet tagging may include e.g. Type of Service
- (:term:`ToS`) field modification.
-
-Architecture
-============
-
-The Traffic Inspection module is the most computationally intensive component
-of the :term:`VNF`. It implements filtering and packet matching algorithms in
-order to support the enhanced traffic forwarding capability of the :term:`VNF`.
-The component supports a flow table (exploiting hashing algorithms for fast
-indexing of flows) and an inspection engine for traffic classification.
-
-The implementation used for these experiments exploits the nDPI library.
-The packet capturing mechanism is implemented using libpcap. When the
-:term:`DPI` engine identifies a new flow, the flow register is updated with the
-appropriate information and transmitted across the Traffic Forwarding module,
-which then applies any required policy updates.
-
-The Traffic Forwarding moudle is responsible for routing and packet forwarding.
-It accepts incoming network traffic, consults the flow table for classification
-information for each incoming flow and then applies pre-defined policies
-marking e.g. :term:`ToS`/Differentiated Services Code Point (:term:`DSCP`)
-multimedia traffic for Quality of Service (:term:`QoS`) enablement on the
-forwarded traffic.
-It is assumed that the traffic is forwarded using the default policy until it
-is identified and new policies are enforced.
-
-The expected response delay is considered to be negligible, as only a small
-number of packets are required to identify each flow.
-
-Graphical Overview
-==================
-
-.. code-block:: console
-
- +----------------------------+
- | |
- | Virtual Traffic Classifier |
- | |
- | Analysing/Forwarding |
- | ------------> |
- | ethA ethB |
- | |
- +----------------------------+
- | ^
- | |
- v |
- +----------------------------+
- | |
- | Virtual Switch |
- | |
- +----------------------------+
-
-Install
-=======
-
-run the vTC/build.sh with root privileges
-
-Run
-===
-
-::
-
- sudo ./pfbridge -a eth1 -b eth2
-
-
-.. note:: Virtual Traffic Classifier is not support in OPNFV Danube release.
-
-
-Development Environment
-=======================
-
-Ubuntu 14.04 Ubuntu 16.04
diff --git a/docs/testing/user/userguide/15-list-of-tcs.rst b/docs/testing/user/userguide/15-list-of-tcs.rst
index 37ce819f1..0efecebd1 100644
--- a/docs/testing/user/userguide/15-list-of-tcs.rst
+++ b/docs/testing/user/userguide/15-list-of-tcs.rst
@@ -17,8 +17,7 @@ Yardstick test cases are divided in two main categories:
described in :doc:`02-methodology`
* *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
- aspect of a feature delivered by an OPNFV Project, including the test cases
- developed for the :term:`VTC`.
+ aspect of a feature delivered by an OPNFV Project.
Generic NFVI Test Case Descriptions
===================================
diff --git a/docs/testing/user/userguide/glossary.rst b/docs/testing/user/userguide/glossary.rst
index f8ff41887..be98aa6c0 100644
--- a/docs/testing/user/userguide/glossary.rst
+++ b/docs/testing/user/userguide/glossary.rst
@@ -60,6 +60,3 @@ Glossary
ToS
Type of Service
-
- VTC
- Virtual Traffic Classifier
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index b936e723d..1cbd0858f 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -23,7 +23,6 @@ Yardstick User Guide
08-grafana
09-api
10-yardstick-user-interface
- 11-vtc-overview
12-nsb-overview
13-nsb-installation
14-nsb-operation
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
index 92cd51439..261a8bd95 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
@@ -19,16 +19,27 @@ Yardstick Test Case Description TC074
|metric | Storage performance |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Storperf integration with yardstick. The purpose of StorPerf |
-| | is to provide a tool to measure block and object storage |
-| | performance in an NFVI. When complemented with a |
-| | characterization of typical VF storage performance |
-| | requirements, it can provide pass/fail thresholds for test, |
-| | staging, and production NFVI environments. |
-| | |
-| | The benchmarks developed for block and object storage will |
-| | be sufficiently varied to provide a good preview of expected |
-| | storage performance behavior for any type of VNF workload. |
+|test purpose | To evaluate and report on the Cinder volume performance. |
+| | |
+| | This testcase integrates with OPNFV StorPerf to measure |
+| | block performance of the underlying Cinder drivers. Many |
+| | options are supported, and even the root disk (Glance |
+| | ephemeral storage can be profiled. |
+| | |
+| | The fundamental concept of the test case is to first fill |
+| | the volumes with random data to ensure reported metrics |
+| | are indicative of continued usage and not skewed by |
+| | transitional performance while the underlying storage |
+| | driver allocates blocks. |
+| | The metrics for filling the volumes with random data |
+| | are not reported in the final results. The test also |
+| | ensures the volumes are performing at a consistent level |
+| | of performance by measuring metrics every minute, and |
+| | comparing the trend of the metrics over the run. By |
+| | evaluating the min and max values, as well as the slope of |
+| | the trend, it can make the determination that the metrics |
+| | are stable, and not fluctuating beyond industry standard |
+| | norms. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc074.yaml |
@@ -38,7 +49,8 @@ Yardstick Test Case Description TC074
| | * public_network: "ext-net" - name of public network |
| | * volume_size: 2 - cinder volume size |
| | * block_sizes: "4096" - data block size |
-| | * queue_depths: "4" |
+| | * queue_depths: "4" - the number of simultaneous I/Os |
+| | to perform at all times |
| | * StorPerf_ip: "192.168.200.2" |
| | * query_interval: 10 - state query interval |
| | * timeout: 600 - maximum allowed job time |
@@ -50,7 +62,11 @@ Yardstick Test Case Description TC074
| | performance in an NFVI. |
| | |
| | StorPerf is delivered as a Docker container from |
-| | https://hub.docker.com/r/opnfv/storperf/tags/. |
+| | https://hub.docker.com/r/opnfv/storperf-master/tags/. |
+| | |
+| | The underlying tool used is FIO, and StorPerf supports |
+| | any FIO option in order to tailor the test to the exact |
+| | workload needed. |
| | |
+--------------+--------------------------------------------------------------+
|references | Storperf_ |
@@ -80,9 +96,17 @@ Yardstick Test Case Description TC074
| | - rr: 100% Read, random access |
| | - wr: 100% Write, random access |
| | - rw: 70% Read / 30% write, random access |
-| | * nossd: Do not perform SSD style preconditioning. |
-| | * nowarm: Do not perform a warmup prior to |
| | measurements. |
+| | * workloads={json maps} |
+| | This parameter supercedes the workload and calls the V2.0 |
+| | API in StorPerf. It allows for greater control of the |
+| | parameters to be passed to FIO. For example, running a |
+| | random read/write with a mix of 90% read and 10% write |
+| | would be expressed as follows: |
+| | {"9010randrw": {"rw":"randrw","rwmixread": "90"}} |
+| | Note: This must be passed in as a string, so don't forget |
+| | to escape or otherwise properly deal with the quotes. |
+| | |
| | * report= [job_id] |
| | Query the status of the supplied job_id and report on |
| | metrics. If a workload is supplied, will report on only |
@@ -92,8 +116,7 @@ Yardstick Test Case Description TC074
| | |
+--------------+--------------------------------------------------------------+
|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions | need to add one. A key pair for launching agents is also |
-| | required. |
+|conditions | need to add one. |
| | |
| | Storperf is required to be installed in the environment. |
| | There are two possible methods for Storperf installation: |
@@ -126,10 +149,21 @@ Yardstick Test Case Description TC074
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored |
-| | in glance. TC is invoked and logs are produced and stored. |
+|step 1 | Yardstick calls StorPerf to create the heat stack with the |
+| | number of VMs and size of Cinder volumes specified. The |
+| | VMs will be on their own private subnet, and take floating |
+| | IP addresses from the specified public network. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick calls StorPerf to fill all the volumes with |
+| | random data. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Yardstick calls StorPerf to perform the series of tests |
+| | specified by the workload, queue depths and block sizes. |
| | |
-| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 4 | Yardstick calls StorPerf to delete the stack it created. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | None. Storage performance results are fetched and stored. |
diff --git a/docs/testing/user/userguide/references.rst b/docs/testing/user/userguide/references.rst
index 05729ba75..3e18c96e9 100644
--- a/docs/testing/user/userguide/references.rst
+++ b/docs/testing/user/userguide/references.rst
@@ -13,7 +13,6 @@ OPNFV
* Parser wiki: https://wiki.opnfv.org/parser
* Pharos wiki: https://wiki.opnfv.org/pharos
-* VTC: https://wiki.opnfv.org/vtc
* Yardstick CI: https://build.opnfv.org/ci/view/yardstick/
* Yardstick and ETSI TST001 presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925205%2Fopnfv_summit_-_bridging_opnfv_and_etsi.pdf
* Yardstick Project presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925208%2Fopnfv_summit_-_yardstick_project.pdf
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index fe8423d25..d08dbaa6e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -15,20 +15,30 @@ description: >
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
+{% set workload = workload or "" %}
+{% set workloads = workloads or "" %}
+{% set agent_count = agent_count or 1 %}
+{% set block_sizes = block_sizes or "4096" %}
+{% set queue_depths = queue_depths or "4" %}
+{% set steady_state_samples = steady_state_samples or 10 %}
+{% set volume_size = volume_size or 4 %}
scenarios:
-
type: StorPerf
options:
- agent_count: 1
+ agent_count: {{agent_count}}
agent_image: "Ubuntu-16.04"
agent_flavor: "storperf"
public_network: {{public_network}}
- volume_size: 4
- block_sizes: "4096"
- queue_depths: "4"
+ volume_size: {{volume_size}}
+ block_sizes: {{block_sizes}}
+ queue_depths: {{queue_depths}}
StorPerf_ip: {{StorPerf_ip}}
query_interval: 10
timeout: 300
+ workload: {{workload}}
+ workloads: {{workloads}}
+ steady_state_samples: {{steady_state_samples}}
runner:
type: Iteration
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..8093cd2d2 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,10 +74,13 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
@@ -111,9 +109,13 @@ class StorPerf(base.Scenario):
elif setup_res.status_code == 200:
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
@@ -149,7 +151,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +165,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,8 +175,16 @@ class StorPerf(base.Scenario):
except KeyError:
pass
+ api_version = "v1.0"
+
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version),
json=job_args)
job_res_content = jsonutils.loads(job_res.content)
@@ -187,15 +200,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -216,14 +220,46 @@ class StorPerf(base.Scenario):
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+ job_res_content = jsonutils.loads(job_res.content)
+
+ if job_res.status_code != 200:
+ raise RuntimeError(
+ "Failed to start initialization job, error message:",
+ job_res_content["message"])
+ elif job_res.status_code == 200:
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index 06c6b0bcb..8274ff9ce 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -34,6 +34,13 @@ PROTO_UDP = 'udp'
PROTO_TCP = 'tcp'
PROTO_VLAN = 'vlan'
+SINGLE_VALUE = "singleValue"
+
+S_VLAN = 0
+C_VLAN = 1
+
+ETHER_TYPE_802_1ad = '0x88a8'
+
IP_VERSION_4_MASK = 24
IP_VERSION_6_MASK = 64
@@ -367,10 +374,28 @@ class IxNextgen(object): # pragma: no cover
traffic_param['outer_l2']['framesize'])
srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02'))
- # NOTE(ralonsoh): add QinQ tagging when
- # traffic_param['outer_l2']['QinQ'] exists.
- # s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
- # c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ if traffic_param['outer_l2']['QinQ']:
+ s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'etherType')
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', ETHER_TYPE_802_1ad,
+ '-fieldValue', ETHER_TYPE_802_1ad,
+ '-valueType', SINGLE_VALUE)
+
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+
+ self._update_vlan_tag(fg_id, s_vlan, S_VLAN)
+ self._update_vlan_tag(fg_id, c_vlan, C_VLAN)
self.ixnet.setMultiAttribute(
config_element + '/transmissionControl',
@@ -391,6 +416,27 @@ class IxNextgen(object): # pragma: no cover
self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
'sourceAddress', srcmac)
+ def _update_vlan_tag(self, fg_id, params, vlan=0):
+ field_to_param_map = {
+ 'vlanUserPriority': 'priority',
+ 'cfi': 'cfi',
+ 'vlanID': 'id'
+ }
+ for field, param in field_to_param_map.items():
+ value = params.get(param)
+ if value:
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_VLAN)[vlan],
+ field)
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', value,
+ '-fieldValue', value,
+ '-valueType', SINGLE_VALUE)
+
+ self.ixnet.commit()
+
def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
mask, count):
"""Set the IPv4 address in a config element stack IP field
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 2086273e6..26dc1fe04 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -88,6 +88,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'outer_l2': {
'framesize': value['outer_l2']['framesize'],
'framesPerSecond': True,
+ 'QinQ': value['outer_l2'].get('QinQ'),
'srcmac': mac['src_mac_{}'.format(port_index)],
'dstmac': mac['dst_mac_{}'.format(port_index)],
},
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 9457096c8..506a880e0 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -88,11 +88,6 @@ class ProxBinSearchProfile(ProxProfile):
theor_max_thruput = actual_max_thruput = 0
result_samples = {}
- rate_samples = {}
- pos_retry = 0
- neg_retry = 0
- total_retry = 0
- ok_retry = 0
# Store one time only value in influxdb
single_samples = {
@@ -110,15 +105,11 @@ class ProxBinSearchProfile(ProxProfile):
"interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
- for test_value in self.bounds_iterator(LOG):
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
pos_retry = 0
neg_retry = 0
total_retry = 0
- rate_samples["MAX_Rate"] = self.current_upper
- rate_samples["MIN_Rate"] = self.current_lower
- rate_samples["Test_Rate"] = test_value
- self.queue.put(rate_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info("Checking MAX %s MIN %s TEST %s",
self.current_upper, self.lower_bound, test_value)
while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
@@ -188,6 +179,11 @@ class ProxBinSearchProfile(ProxProfile):
self.queue.put({'theor_max_throughput': theor_max_thruput})
LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 0e1dbd592..b54fc575f 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -70,7 +70,7 @@ class PortPgIDMap(object):
class RFC2544Profile(trex_traffic_profile.TrexProfile):
"""TRex RFC2544 traffic profile"""
- TOLERANCE_LIMIT = 0.05
+ TOLERANCE_LIMIT = 0.01
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
@@ -246,6 +246,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
def get_drop_percentage(self, samples, tol_low, tol_high,
correlated_traffic):
"""Calculate the drop percentage and run the traffic"""
+ completed = False
tx_rate_fps = 0
rx_rate_fps = 0
for sample in samples:
@@ -266,15 +267,15 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
drop_percent = round(
(float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
- tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
if drop_percent > tol_high:
self.max_rate = self.rate
elif drop_percent < tol_low:
self.min_rate = self.rate
- # else:
- # NOTE(ralonsoh): the test should finish here
- # pass
+ else:
+ completed = True
+
last_rate = self.rate
self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
@@ -295,4 +296,4 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
'Rate': last_rate,
'Latency': latency
}
- return output
+ return completed, output
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 3ef7c33c5..a09f2a7a9 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -426,7 +426,8 @@ class ClientResourceHelper(ResourceHelper):
iteration_index = 0
while self._terminated.value == 0:
iteration_index += 1
- self._run_traffic_once(traffic_profile)
+ if self._run_traffic_once(traffic_profile):
+ self._terminated.value = 1
mq_producer.tg_method_iteration(iteration_index)
self.client.stop(self.all_ports)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index cdbb41485..7ecb12478 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -45,11 +45,12 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
time.sleep(self.SAMPLING_PERIOD)
traffic_profile.stop_traffic(self)
- output = traffic_profile.get_drop_percentage(
+ completed, output = traffic_profile.get_drop_percentage(
samples, self.rfc2544_helper.tolerance_low,
self.rfc2544_helper.tolerance_high,
self.rfc2544_helper.correlated_traffic)
self._queue.put(output)
+ return completed
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index e0c0db262..99a5760a3 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -227,14 +227,10 @@ name (i.e. %s).
def add_volume_attachment(self, server_name, volume_name, mountpoint=None):
"""add to the template an association of volume to instance"""
- log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ", server_name,
- volume_name)
-
+ log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ",
+ server_name, volume_name)
name = "%s-%s" % (server_name, volume_name)
-
- volume_id = op_utils.get_volume_id(volume_name)
- if not volume_id:
- volume_id = {'get_resource': volume_name}
+ volume_id = {'get_resource': volume_name}
self.resources[name] = {
'type': 'OS::Cinder::VolumeAttachment',
'properties': {'instance_uuid': {'get_resource': server_name},
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
index 21e3874b8..d4801ef2c 100644
--- a/yardstick/tests/unit/benchmark/runner/test_duration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -11,17 +11,50 @@ import mock
import unittest
import multiprocessing
import os
+import time
from yardstick.benchmark.runners import duration
+from yardstick.common import exceptions as y_exc
class DurationRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
+ self.count = 101
+ self.side_effect = side_effect
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
+ return self.count
+
def setUp(self):
self.scenario_cfg = {
'runner': {'interval': 0, "duration": 0},
'type': 'some_type'
}
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ def _assert_defaults__worker_run_one_iteration(self):
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
@mock.patch.object(os, 'getpid')
@mock.patch.object(multiprocessing, 'Process')
def test__run_benchmark_called_with(self, mock_multiprocessing_process,
@@ -37,3 +70,246 @@ class DurationRunnerTest(unittest.TestCase):
target=duration._worker_process,
args=(runner.result_queue, benchmark_cls, 'my_method',
self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ def test__worker_process_called_with_cfg(self):
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_called_with_cfg_loop(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.call_count, 2)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ def test__worker_process_called_without_cfg(self):
+ scenario_cfg = {'runner': {}}
+ aborted = multiprocessing.Event()
+ aborted.set()
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ scenario_cfg, {}, aborted, mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(1)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(1)
+ self.benchmark.teardown.assert_called_once()
+
+ def test__worker_process_output_queue(self):
+ self.benchmark.my_method = mock.Mock(return_value='my_result')
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+ self.assertEquals(output_queue.get(), 'my_result')
+
+ def test__worker_process_output_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 101
+ while not output_queue.empty():
+ count += 1
+ self.assertEquals(output_queue.get(), count)
+
+ def test__worker_process_queue(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.5
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
+ self.scenario_cfg['sla'] = {}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_queue_on_sla_validation_error_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_queue_on_broad_exception(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertNotEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(SystemExit) as raised:
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 5844746ab..2ba53cb93 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,18 +11,18 @@
from __future__ import absolute_import
+import json
import unittest
import mock
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios.storage import storperf
# pylint: disable=unused-argument
# disable this for now because I keep forgetting mock patch arg ordering
-
-
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost(object):
@@ -32,10 +32,24 @@ def mocked_requests_config_post(*args, **kwargs):
return MockResponseConfigPost(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "false"}',
+ '"stack_created": false}',
200)
+def mocked_requests_config_post_fail(*args, **kwargs):
+ class MockResponseConfigPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigPost(
+ '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+ 'Error validating value \'foo\': Unable to find network with ' +
+ 'name or id \'foo\'"}',
+ 400)
+
+
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet(object):
@@ -45,10 +59,47 @@ def mocked_requests_config_get(*args, **kwargs):
return MockResponseConfigGet(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "true"}',
+ '"stack_created": true}',
200)
+def mocked_requests_config_get_not_created(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{"stack_id": "",'
+ '"stack_created": false}',
+ 200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{}',
+ 200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet(object):
@@ -73,6 +124,18 @@ def mocked_requests_job_post(*args, **kwargs):
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
+def mocked_requests_job_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete(object):
@@ -100,10 +163,7 @@ def mocked_requests_delete_failed(*args, **kwargs):
self.json_data = json_data
self.status_code = status_code
- if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
- return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
- return MockResponseDeleteFailed('{}', 404)
+ return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
class StorPerfTestCase(unittest.TestCase):
@@ -119,11 +179,14 @@ class StorPerfTestCase(unittest.TestCase):
self.result = {}
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_config_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_config_get)
- def test_successful_setup(self, mock_post, mock_get):
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup(self, mock_get, mock_post):
+ mock_post.side_effect = [mocked_requests_config_post(),
+ mocked_requests_job_post()]
+ mock_get.side_effect = [mocked_requests_config_get(),
+ mocked_requests_job_get()]
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -146,14 +209,47 @@ class StorPerfTestCase(unittest.TestCase):
self.assertTrue(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_job_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_job_get)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
- def test_successful_run(self, mock_post, mock_get, mock_delete):
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_unsuccessful(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_not_created
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_no_payload(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_no_payload
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup_config_post_failed(self, mock_get, mock_post):
+ mock_post.side_effect = mocked_requests_config_post_fail
+
+ args = {
+ "options": {
+ "public_network": "foo"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.setup)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v1_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -165,6 +261,74 @@ class StorPerfTestCase(unittest.TestCase):
"query_interval": 0,
"timeout": 60
}
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ "workload": "rs",
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ sample_output = '{"Status": "Completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+ expected_result = jsonutils.loads(sample_output)
+
+ s.run(self.result)
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/jobs',
+ json=jsonutils.loads(json.dumps(expected_post)))
+
+ self.assertEqual(self.result, expected_result)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v2_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
args = {
"options": options
@@ -179,13 +343,126 @@ class StorPerfTestCase(unittest.TestCase):
expected_result = jsonutils.loads(sample_output)
s.run(self.result)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
self.assertEqual(self.result, expected_result)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete)
- def test_successful_teardown(self, mock_delete):
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ self.assertRaises(RuntimeError, s.run, self.ctx)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(storperf.StorPerf, 'setup')
+ def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ 'timeout': 60,
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.run(self.result)
+
+ mock_setup.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.initialize_disks()
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_initialize_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.initialize_disks)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch.object(requests, 'delete')
+ def test_teardown(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_job_delete
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -207,11 +484,12 @@ class StorPerfTestCase(unittest.TestCase):
s.teardown()
self.assertFalse(s.setup_done)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete_failed)
- def test_failed_teardown(self, mock_delete):
+ @mock.patch.object(requests, 'delete')
+ def test_teardown_request_delete_failed(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_delete_failed
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -230,4 +508,6 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
- self.assertRaises(AssertionError, s.teardown(), self.result)
+ self.assertRaises(RuntimeError, s.teardown)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 5970ecae6..e078d70ad 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -16,6 +16,8 @@ import mock
import IxNetwork
import unittest
+from copy import deepcopy
+
from yardstick.common import exceptions
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -31,7 +33,8 @@ TRAFFIC_PARAMETERS = {
'rate': 10000.5,
'rate_unit': 'fps',
'outer_l2': {
- 'framesize': {'64B': '25', '256B': '75'}
+ 'framesize': {'64B': '25', '256B': '75'},
+ 'QinQ': None
},
'outer_l3': {
'count': 512,
@@ -61,7 +64,8 @@ TRAFFIC_PARAMETERS = {
'rate': 75.2,
'rate_unit': '%',
'outer_l2': {
- 'framesize': {'128B': '35', '1024B': '65'}
+ 'framesize': {'128B': '35', '1024B': '65'},
+ 'QinQ': None
},
'outer_l3': {
'count': 1024,
@@ -344,6 +348,41 @@ class TestIxNextgen(unittest.TestCase):
'-type', 'continuous', '-duration', 50)
])
+ def test_update_frame_qinq(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='cfg_element'), \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac'),\
+ mock.patch.object(self.ixnet_gen, '_get_stack_item',
+ return_value='item'), \
+ mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field'):
+
+ traffic_parameters = deepcopy(TRAFFIC_PARAMETERS)
+ traffic_parameters[UPLINK]['outer_l2']['QinQ'] = {
+ 'S-VLAN': {'id': 128,
+ 'priority': 1,
+ 'cfi': 0},
+ 'C-VLAN': {'id': 512,
+ 'priority': 0,
+ 'cfi': 2}
+ }
+
+ self.ixnet_gen.update_frame(traffic_parameters, 50)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('field', '-auto', 'false', '-singleValue', '0x88a8',
+ '-fieldValue', '0x88a8', '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 1,
+ '-fieldValue', 1, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 128,
+ '-fieldValue', 128, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 512,
+ '-fieldValue', 512, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 2,
+ '-fieldValue', 2, '-valueType', 'singleValue')
+ ], any_order=True)
+
def test_update_frame_flow_not_present(self):
with mock.patch.object(
self.ixnet_gen, '_get_config_element_by_flow_group_name',
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index a4fdc8d04..2e0331e8e 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -238,15 +238,17 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
'in_packets': 4040,
'latency': 'Latency2'}}
]
- output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ completed, output = rfc2544_profile.get_drop_percentage(
+ samples, 0, 0, False)
expected = {'DropPercentage': 0.3963,
'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
'RxThroughput': 312.5,
'TxThroughput': 304.5,
'CurrentDropPercentage': 0.3963,
- 'Rate': 100,
+ 'Rate': 100.0,
'Throughput': 312.5}
self.assertEqual(expected, output)
+ self.assertFalse(completed)
class PortPgIDMapTestCase(base.BaseUnitTestCase):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index c35d2db35..4a1d8c30e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1091,7 +1091,8 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertIs(client_resource_helper._connect(client), client)
@mock.patch.object(ClientResourceHelper, '_build_ports')
- @mock.patch.object(ClientResourceHelper, '_run_traffic_once')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once',
+ return_value=(True, mock.ANY))
def test_run_traffic(self, mock_run_traffic_once, mock_build_ports):
client_resource_helper = ClientResourceHelper(mock.Mock())
client = mock.Mock()
@@ -1103,7 +1104,7 @@ class TestClientResourceHelper(unittest.TestCase):
as mock_terminated:
mock_connect.return_value = client
type(mock_terminated).value = mock.PropertyMock(
- side_effect=[0, 1, lambda x: x])
+ side_effect=[0, 1, 1, lambda x: x])
client_resource_helper.run_traffic(traffic_profile, mq_producer)
mock_build_ports.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 6aba41006..a5b9f258e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -30,13 +30,14 @@ class TestTrexRfcResouceHelper(unittest.TestCase):
mock_traffic_profile.config.duration = 3
mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
'port_pg_id_map')
- mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ mock_traffic_profile.get_drop_percentage.return_value = (True,
+ 'percentage')
rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
rfc_rh.TRANSIENT_PERIOD = 0
rfc_rh.rfc2544_helper = mock.Mock()
with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
- rfc_rh._run_traffic_once(mock_traffic_profile)
+ self.assertTrue(rfc_rh._run_traffic_once(mock_traffic_profile))
mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)