summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile.aarch64.patch26
-rwxr-xr-xdocs/testing/user/userguide/01-introduction.rst2
-rwxr-xr-xdocs/testing/user/userguide/03-architecture.rst2
-rw-r--r--docs/testing/user/userguide/11-vtc-overview.rst128
-rw-r--r--docs/testing/user/userguide/15-list-of-tcs.rst3
-rw-r--r--docs/testing/user/userguide/glossary.rst3
-rw-r--r--docs/testing/user/userguide/index.rst1
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc074.rst72
-rw-r--r--docs/testing/user/userguide/references.rst1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml9
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml18
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py32
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py1
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py6
-rw-r--r--yardstick/benchmark/scenarios/lib/check_value.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py1
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py92
-rw-r--r--yardstick/common/exceptions.py8
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py121
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py31
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py16
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py15
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py3
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py46
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py3
-rw-r--r--yardstick/orchestrator/heat.py10
-rw-r--r--yardstick/ssh.py2
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_arithmetic.py220
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py276
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py36
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py64
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py340
-rw-r--r--yardstick/tests/unit/common/test_template_format.py13
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py113
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py66
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py6
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py5
-rw-r--r--yardstick/tests/unit/test_ssh.py23
44 files changed, 1475 insertions, 403 deletions
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index 472310f96..2e2808ed1 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,14 +1,14 @@
-From: ting wu <ting.wu@enea.com>
-Date: Tue, 8 May 2018 14:02:52 +0200
-Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file
+From: Cristina Pauna <cristina.pauna@enea.com>
+Date: Mon, 23 Jul 2018 15:16:59 +0300
+Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
-Signed-off-by: ting wu <ting.wu@enea.com>
+Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
---
- docker/Dockerfile | 11 ++++++-----
- 1 file changed, 6 insertions(+), 5 deletions(-)
+ docker/Dockerfile | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 71ce6b58..952d0f78 100644
+index 71ce6b58..fce7c116 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,9 +7,9 @@
@@ -33,7 +33,17 @@ index 71ce6b58..952d0f78 100644
RUN easy_install -U setuptools==30.0.0
RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5
-@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+@@ -40,7 +41,8 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${Y
+ RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
+ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
+
+-RUN ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
++RUN sed -i -e '/- configure_gui/d' ${YARDSTICK_REPO_DIR}/ansible/install.yaml && \
++ ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+
+ RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
+
+@@ -48,8 +50,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
diff --git a/docs/testing/user/userguide/01-introduction.rst b/docs/testing/user/userguide/01-introduction.rst
index d846e759c..494b1ef3d 100755
--- a/docs/testing/user/userguide/01-introduction.rst
+++ b/docs/testing/user/userguide/01-introduction.rst
@@ -66,8 +66,6 @@ This document consists of the following chapters:
yardstick report CLI to view the test result in table format and also values
pinned on to a graph
-* Chapter :doc:`11-vtc-overview` provides information on the :term:`VTC`.
-
* Chapter :doc:`12-nsb-overview` describes the methodology implemented by the
Yardstick - Network service benchmarking to test real world usecase for a
given VNF.
diff --git a/docs/testing/user/userguide/03-architecture.rst b/docs/testing/user/userguide/03-architecture.rst
index 622002ee4..886631510 100755
--- a/docs/testing/user/userguide/03-architecture.rst
+++ b/docs/testing/user/userguide/03-architecture.rst
@@ -262,8 +262,6 @@ Yardstick Directory structure
*plugin/* - Plug-in configuration files are stored here.
-*vTC/* - Contains the files for running the virtual Traffic Classifier tests.
-
*yardstick/* - Contains the internals of Yardstick: Runners, Scenario, Contexts,
CLI parsing, keys, plotting tools, dispatcher, plugin
install/remove scripts and so on.
diff --git a/docs/testing/user/userguide/11-vtc-overview.rst b/docs/testing/user/userguide/11-vtc-overview.rst
deleted file mode 100644
index 47582358c..000000000
--- a/docs/testing/user/userguide/11-vtc-overview.rst
+++ /dev/null
@@ -1,128 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, National Center of Scientific Research "Demokritos" and others.
-
-==========================
-Virtual Traffic Classifier
-==========================
-
-Abstract
-========
-
-.. _TNOVA: http://www.t-nova.eu/
-.. _TNOVAresults: http://www.t-nova.eu/results/
-.. _Yardstick: https://wiki.opnfv.org/yardstick
-
-This chapter provides an overview of the virtual Traffic Classifier, a
-contribution to OPNFV Yardstick_ from the EU Project TNOVA_.
-Additional documentation is available in TNOVAresults_.
-
-Overview
-========
-
-The virtual Traffic Classifier (:term:`VTC`) :term:`VNF`, comprises of a
-Virtual Network Function Component (:term:`VNFC`). The :term:`VNFC` contains
-both the Traffic Inspection module, and the Traffic forwarding module, needed
-to run the :term:`VNF`. The exploitation of Deep Packet Inspection
-(:term:`DPI`) methods for traffic classification is built around two basic
-assumptions:
-
-* third parties unaffiliated with either source or recipient are able to
- inspect each IP packet's payload
-
-* the classifier knows the relevant syntax of each application's packet
- payloads (protocol signatures, data patterns, etc.).
-
-The proposed :term:`DPI` based approach will only use an indicative, small
-number of the initial packets from each flow in order to identify the content
-and not inspect each packet.
-
-In this respect it follows the Packet Based per Flow State (term:`PBFS`). This
-method uses a table to track each session based on the 5-tuples (src address,
-dest address, src port,dest port, transport protocol) that is maintained for
-each flow.
-
-Concepts
-========
-
-* *Traffic Inspection*: The process of packet analysis and application
- identification of network traffic that passes through the :term:`VTC`.
-
-* *Traffic Forwarding*: The process of packet forwarding from an incoming
- network interface to a pre-defined outgoing network interface.
-
-* *Traffic Rule Application*: The process of packet tagging, based on a
- predefined set of rules. Packet tagging may include e.g. Type of Service
- (:term:`ToS`) field modification.
-
-Architecture
-============
-
-The Traffic Inspection module is the most computationally intensive component
-of the :term:`VNF`. It implements filtering and packet matching algorithms in
-order to support the enhanced traffic forwarding capability of the :term:`VNF`.
-The component supports a flow table (exploiting hashing algorithms for fast
-indexing of flows) and an inspection engine for traffic classification.
-
-The implementation used for these experiments exploits the nDPI library.
-The packet capturing mechanism is implemented using libpcap. When the
-:term:`DPI` engine identifies a new flow, the flow register is updated with the
-appropriate information and transmitted across the Traffic Forwarding module,
-which then applies any required policy updates.
-
-The Traffic Forwarding moudle is responsible for routing and packet forwarding.
-It accepts incoming network traffic, consults the flow table for classification
-information for each incoming flow and then applies pre-defined policies
-marking e.g. :term:`ToS`/Differentiated Services Code Point (:term:`DSCP`)
-multimedia traffic for Quality of Service (:term:`QoS`) enablement on the
-forwarded traffic.
-It is assumed that the traffic is forwarded using the default policy until it
-is identified and new policies are enforced.
-
-The expected response delay is considered to be negligible, as only a small
-number of packets are required to identify each flow.
-
-Graphical Overview
-==================
-
-.. code-block:: console
-
- +----------------------------+
- | |
- | Virtual Traffic Classifier |
- | |
- | Analysing/Forwarding |
- | ------------> |
- | ethA ethB |
- | |
- +----------------------------+
- | ^
- | |
- v |
- +----------------------------+
- | |
- | Virtual Switch |
- | |
- +----------------------------+
-
-Install
-=======
-
-run the vTC/build.sh with root privileges
-
-Run
-===
-
-::
-
- sudo ./pfbridge -a eth1 -b eth2
-
-
-.. note:: Virtual Traffic Classifier is not support in OPNFV Danube release.
-
-
-Development Environment
-=======================
-
-Ubuntu 14.04 Ubuntu 16.04
diff --git a/docs/testing/user/userguide/15-list-of-tcs.rst b/docs/testing/user/userguide/15-list-of-tcs.rst
index 37ce819f1..0efecebd1 100644
--- a/docs/testing/user/userguide/15-list-of-tcs.rst
+++ b/docs/testing/user/userguide/15-list-of-tcs.rst
@@ -17,8 +17,7 @@ Yardstick test cases are divided in two main categories:
described in :doc:`02-methodology`
* *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
- aspect of a feature delivered by an OPNFV Project, including the test cases
- developed for the :term:`VTC`.
+ aspect of a feature delivered by an OPNFV Project.
Generic NFVI Test Case Descriptions
===================================
diff --git a/docs/testing/user/userguide/glossary.rst b/docs/testing/user/userguide/glossary.rst
index f8ff41887..be98aa6c0 100644
--- a/docs/testing/user/userguide/glossary.rst
+++ b/docs/testing/user/userguide/glossary.rst
@@ -60,6 +60,3 @@ Glossary
ToS
Type of Service
-
- VTC
- Virtual Traffic Classifier
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index b936e723d..1cbd0858f 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -23,7 +23,6 @@ Yardstick User Guide
08-grafana
09-api
10-yardstick-user-interface
- 11-vtc-overview
12-nsb-overview
13-nsb-installation
14-nsb-operation
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
index 92cd51439..261a8bd95 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
@@ -19,16 +19,27 @@ Yardstick Test Case Description TC074
|metric | Storage performance |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Storperf integration with yardstick. The purpose of StorPerf |
-| | is to provide a tool to measure block and object storage |
-| | performance in an NFVI. When complemented with a |
-| | characterization of typical VF storage performance |
-| | requirements, it can provide pass/fail thresholds for test, |
-| | staging, and production NFVI environments. |
-| | |
-| | The benchmarks developed for block and object storage will |
-| | be sufficiently varied to provide a good preview of expected |
-| | storage performance behavior for any type of VNF workload. |
+|test purpose | To evaluate and report on the Cinder volume performance. |
+| | |
+| | This testcase integrates with OPNFV StorPerf to measure |
+| | block performance of the underlying Cinder drivers. Many |
+| | options are supported, and even the root disk (Glance |
+| | ephemeral storage can be profiled. |
+| | |
+| | The fundamental concept of the test case is to first fill |
+| | the volumes with random data to ensure reported metrics |
+| | are indicative of continued usage and not skewed by |
+| | transitional performance while the underlying storage |
+| | driver allocates blocks. |
+| | The metrics for filling the volumes with random data |
+| | are not reported in the final results. The test also |
+| | ensures the volumes are performing at a consistent level |
+| | of performance by measuring metrics every minute, and |
+| | comparing the trend of the metrics over the run. By |
+| | evaluating the min and max values, as well as the slope of |
+| | the trend, it can make the determination that the metrics |
+| | are stable, and not fluctuating beyond industry standard |
+| | norms. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc074.yaml |
@@ -38,7 +49,8 @@ Yardstick Test Case Description TC074
| | * public_network: "ext-net" - name of public network |
| | * volume_size: 2 - cinder volume size |
| | * block_sizes: "4096" - data block size |
-| | * queue_depths: "4" |
+| | * queue_depths: "4" - the number of simultaneous I/Os |
+| | to perform at all times |
| | * StorPerf_ip: "192.168.200.2" |
| | * query_interval: 10 - state query interval |
| | * timeout: 600 - maximum allowed job time |
@@ -50,7 +62,11 @@ Yardstick Test Case Description TC074
| | performance in an NFVI. |
| | |
| | StorPerf is delivered as a Docker container from |
-| | https://hub.docker.com/r/opnfv/storperf/tags/. |
+| | https://hub.docker.com/r/opnfv/storperf-master/tags/. |
+| | |
+| | The underlying tool used is FIO, and StorPerf supports |
+| | any FIO option in order to tailor the test to the exact |
+| | workload needed. |
| | |
+--------------+--------------------------------------------------------------+
|references | Storperf_ |
@@ -80,9 +96,17 @@ Yardstick Test Case Description TC074
| | - rr: 100% Read, random access |
| | - wr: 100% Write, random access |
| | - rw: 70% Read / 30% write, random access |
-| | * nossd: Do not perform SSD style preconditioning. |
-| | * nowarm: Do not perform a warmup prior to |
| | measurements. |
+| | * workloads={json maps} |
+| | This parameter supercedes the workload and calls the V2.0 |
+| | API in StorPerf. It allows for greater control of the |
+| | parameters to be passed to FIO. For example, running a |
+| | random read/write with a mix of 90% read and 10% write |
+| | would be expressed as follows: |
+| | {"9010randrw": {"rw":"randrw","rwmixread": "90"}} |
+| | Note: This must be passed in as a string, so don't forget |
+| | to escape or otherwise properly deal with the quotes. |
+| | |
| | * report= [job_id] |
| | Query the status of the supplied job_id and report on |
| | metrics. If a workload is supplied, will report on only |
@@ -92,8 +116,7 @@ Yardstick Test Case Description TC074
| | |
+--------------+--------------------------------------------------------------+
|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions | need to add one. A key pair for launching agents is also |
-| | required. |
+|conditions | need to add one. |
| | |
| | Storperf is required to be installed in the environment. |
| | There are two possible methods for Storperf installation: |
@@ -126,10 +149,21 @@ Yardstick Test Case Description TC074
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored |
-| | in glance. TC is invoked and logs are produced and stored. |
+|step 1 | Yardstick calls StorPerf to create the heat stack with the |
+| | number of VMs and size of Cinder volumes specified. The |
+| | VMs will be on their own private subnet, and take floating |
+| | IP addresses from the specified public network. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick calls StorPerf to fill all the volumes with |
+| | random data. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Yardstick calls StorPerf to perform the series of tests |
+| | specified by the workload, queue depths and block sizes. |
| | |
-| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 4 | Yardstick calls StorPerf to delete the stack it created. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | None. Storage performance results are fetched and stored. |
diff --git a/docs/testing/user/userguide/references.rst b/docs/testing/user/userguide/references.rst
index 05729ba75..3e18c96e9 100644
--- a/docs/testing/user/userguide/references.rst
+++ b/docs/testing/user/userguide/references.rst
@@ -13,7 +13,6 @@ OPNFV
* Parser wiki: https://wiki.opnfv.org/parser
* Pharos wiki: https://wiki.opnfv.org/pharos
-* VTC: https://wiki.opnfv.org/vtc
* Yardstick CI: https://build.opnfv.org/ci/view/yardstick/
* Yardstick and ETSI TST001 presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925205%2Fopnfv_summit_-_bridging_opnfv_and_etsi.pdf
* Yardstick Project presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925208%2Fopnfv_summit_-_yardstick_project.pdf
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index b34672907..507491446 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -52,12 +52,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -83,12 +85,14 @@ downlink_0:
dstip4: "{{get(flow, 'flow.public_ip_0', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -111,12 +115,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -142,9 +148,11 @@ downlink_1:
dstip4: "{{get(flow, 'flow.public_ip_1', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 513aefb40..3cbd7cd62 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -50,12 +50,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -76,12 +78,14 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_0', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -102,12 +106,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -128,9 +134,11 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_1', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index aad751549..edff3612e 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -72,6 +72,7 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -79,6 +80,7 @@ uplink_0:
srcport: "{{get(flow, 'flow.src_port_0', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
id: 2
ipv4:
@@ -97,6 +99,7 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -104,6 +107,7 @@ downlink_0:
srcport: "{{get(flow, 'flow.dst_port_0', '0') }}"
dstport: "{{get(flow, 'flow.src_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
id: 3
ipv4:
@@ -131,6 +135,8 @@ uplink_1:
proto: "tcp"
srcip4: "{{get(flow, 'flow.srcip_1', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dstip_1', '192.16.0.0-192.16.0.31') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -138,6 +144,7 @@ uplink_1:
srcport: "{{get(flow, 'flow.src_port_1', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
id: 4
ipv4:
@@ -156,6 +163,7 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -163,3 +171,4 @@ downlink_1:
srcport: "{{get(flow, 'flow.dst_port_1', '0') }}"
dstport: "{{get(flow, 'flow.src_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index fe8423d25..d08dbaa6e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -15,20 +15,30 @@ description: >
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
+{% set workload = workload or "" %}
+{% set workloads = workloads or "" %}
+{% set agent_count = agent_count or 1 %}
+{% set block_sizes = block_sizes or "4096" %}
+{% set queue_depths = queue_depths or "4" %}
+{% set steady_state_samples = steady_state_samples or 10 %}
+{% set volume_size = volume_size or 4 %}
scenarios:
-
type: StorPerf
options:
- agent_count: 1
+ agent_count: {{agent_count}}
agent_image: "Ubuntu-16.04"
agent_flavor: "storperf"
public_network: {{public_network}}
- volume_size: 4
- block_sizes: "4096"
- queue_depths: "4"
+ volume_size: {{volume_size}}
+ block_sizes: {{block_sizes}}
+ queue_depths: {{queue_depths}}
StorPerf_ip: {{StorPerf_ip}}
query_interval: 10
timeout: 300
+ workload: {{workload}}
+ workloads: {{workloads}}
+ steady_state_samples: {{steady_state_samples}}
runner:
type: Iteration
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 979e3ab14..4c79a4931 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -23,7 +23,7 @@ def _execute_shell_command(command, stdin=None):
output = []
try:
output = subprocess.check_output(command, stdin=stdin, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -34,6 +34,8 @@ class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
+ # baremetal down need to recover even sla pass
+ self.mandatory = True
LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
@@ -49,8 +51,7 @@ class BaremetalAttacker(BaseAttacker):
LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user'])
self.jump_connection = ssh.SSH.from_node(
jump_host,
- # why do we allow pwd for password?
- defaults={"user": "root", "password": jump_host.get("pwd")}
+ defaults={"user": "root", "password": jump_host.get("password")}
)
self.jump_connection.wait(timeout=600)
LOG.debug("ssh jump host success!")
@@ -59,7 +60,7 @@ class BaremetalAttacker(BaseAttacker):
self.ipmi_ip = host.get("ipmi_ip", None)
self.ipmi_user = host.get("ipmi_user", "root")
- self.ipmi_pwd = host.get("ipmi_pwd", None)
+ self.ipmi_pwd = host.get("ipmi_password", None)
self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
self.check_script = self.get_script_fullpath(
@@ -107,26 +108,3 @@ class BaremetalAttacker(BaseAttacker):
else:
_execute_shell_command(cmd, stdin=stdin_file)
LOG.info("Recover fault END")
-
-
-def _test(): # pragma: no cover
- host = {
- "ipmi_ip": "10.20.0.5",
- "ipmi_user": "root",
- "ipmi_pwd": "123456",
- "ip": "10.20.0.5",
- "user": "root",
- "key_filename": "/root/.ssh/id_rsa"
- }
- context = {"node1": host}
- attacker_cfg = {
- 'fault_type': 'bear-metal-down',
- 'host': 'node1',
- }
- ins = BaremetalAttacker(attacker_cfg, context)
- ins.setup()
- ins.inject_fault()
-
-
-if __name__ == '__main__': # pragma: no cover
- _test()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d67a16b98..7871cc918 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -63,6 +63,7 @@ class BaseAttacker(object):
self.data = {}
self.setup_done = False
self.intermediate_variables = {}
+ self.mandatory = False
@staticmethod
def get_attacker_cls(attacker_cfg):
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 7f976fdbc..fdfe7cbbe 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -88,9 +88,9 @@ class ServiceHA(base.Scenario):
def teardown(self):
"""scenario teardown"""
- # only recover when sla not pass
- if not self.sla_pass:
- for attacker in self.attackers:
+ # recover when mandatory or sla not pass
+ for attacker in self.attackers:
+ if attacker.mandatory or not self.sla_pass:
attacker.recover()
diff --git a/yardstick/benchmark/scenarios/lib/check_value.py b/yardstick/benchmark/scenarios/lib/check_value.py
index 759076068..4c9b27df4 100644
--- a/yardstick/benchmark/scenarios/lib/check_value.py
+++ b/yardstick/benchmark/scenarios/lib/check_value.py
@@ -13,6 +13,7 @@ from __future__ import absolute_import
import logging
from yardstick.benchmark.scenarios import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -34,24 +35,18 @@ class CheckValue(base.Scenario):
self.context_cfg = context_cfg
self.options = self.scenario_cfg['options']
- def run(self, result):
+ def run(self, _):
"""execute the test"""
op = self.options.get("operator")
LOG.debug("options=%s", self.options)
value1 = str(self.options.get("value1"))
value2 = str(self.options.get("value2"))
+ if (op == "eq" and value1 != value2) or (op == "ne" and
+ value1 == value2):
+ raise y_exc.ValueCheckError(
+ value1=value1, operator=op, value2=value2)
check_result = "PASS"
- if op == "eq" and value1 != value2:
- LOG.info("value1=%s, value2=%s, error: should equal!!!", value1,
- value2)
- check_result = "FAIL"
- assert value1 == value2, "Error %s!=%s" % (value1, value2)
- elif op == "ne" and value1 == value2:
- LOG.info("value1=%s, value2=%s, error: should not equal!!!",
- value1, value2)
- check_result = "FAIL"
- assert value1 != value2, "Error %s==%s" % (value1, value2)
LOG.info("Check result is %s", check_result)
keys = self.scenario_cfg.get('output', '').split()
values = [check_result]
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index a7557733b..10f10d4e6 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -118,6 +118,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
flow["dst_port_{}".format(index)] = dst_port
flow["count"] = fflow["count"]
+ flow["seed"] = fflow["seed"]
except KeyError:
flow = {}
return {"flow": flow}
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..8093cd2d2 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,10 +74,13 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
@@ -111,9 +109,13 @@ class StorPerf(base.Scenario):
elif setup_res.status_code == 200:
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
@@ -149,7 +151,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +165,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,8 +175,16 @@ class StorPerf(base.Scenario):
except KeyError:
pass
+ api_version = "v1.0"
+
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version),
json=job_args)
job_res_content = jsonutils.loads(job_res.content)
@@ -187,15 +200,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -216,14 +220,46 @@ class StorPerf(base.Scenario):
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+ job_res_content = jsonutils.loads(job_res.content)
+
+ if job_res.status_code != 200:
+ raise RuntimeError(
+ "Failed to start initialization job, error message:",
+ job_res_content["message"])
+ elif job_res.status_code == 200:
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 48f15c059..b39a0af9c 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -88,6 +88,10 @@ class YardstickBannedModuleImported(YardstickException):
message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
+class IXIAUnsupportedProtocol(YardstickException):
+ message = 'Protocol "%(protocol)" is not supported in IXIA'
+
+
class PayloadMissingAttributes(YardstickException):
message = ('Error instantiating a Payload class, missing attributes: '
'%(missing_attributes)s')
@@ -408,3 +412,7 @@ class AclUnknownActionTemplate(YardstickException):
class InvalidMacAddress(YardstickException):
message = 'Mac address "%(mac_address)s" is invalid'
+
+
+class ValueCheckError(YardstickException):
+ message = 'Constraint "%(value1)s %(operator)s %(value2)s" does not hold'
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index ba27d4d05..8274ff9ce 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -34,12 +34,21 @@ PROTO_UDP = 'udp'
PROTO_TCP = 'tcp'
PROTO_VLAN = 'vlan'
+SINGLE_VALUE = "singleValue"
+
+S_VLAN = 0
+C_VLAN = 1
+
+ETHER_TYPE_802_1ad = '0x88a8'
+
IP_VERSION_4_MASK = 24
IP_VERSION_6_MASK = 64
TRAFFIC_STATUS_STARTED = 'started'
TRAFFIC_STATUS_STOPPED = 'stopped'
+SUPPORTED_PROTO = [PROTO_UDP]
+
# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
class IxNextgen(object): # pragma: no cover
@@ -357,7 +366,6 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
type = traffic_param.get('traffic_type', 'fixedDuration')
- duration = traffic_param.get('duration', 30)
rate = traffic_param['rate']
rate_unit = (
'framesPerSecond' if traffic_param['rate_unit'] ==
@@ -366,10 +374,28 @@ class IxNextgen(object): # pragma: no cover
traffic_param['outer_l2']['framesize'])
srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02'))
- # NOTE(ralonsoh): add QinQ tagging when
- # traffic_param['outer_l2']['QinQ'] exists.
- # s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
- # c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ if traffic_param['outer_l2']['QinQ']:
+ s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'etherType')
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', ETHER_TYPE_802_1ad,
+ '-fieldValue', ETHER_TYPE_802_1ad,
+ '-valueType', SINGLE_VALUE)
+
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+
+ self._update_vlan_tag(fg_id, s_vlan, S_VLAN)
+ self._update_vlan_tag(fg_id, c_vlan, C_VLAN)
self.ixnet.setMultiAttribute(
config_element + '/transmissionControl',
@@ -390,6 +416,27 @@ class IxNextgen(object): # pragma: no cover
self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
'sourceAddress', srcmac)
+ def _update_vlan_tag(self, fg_id, params, vlan=0):
+ field_to_param_map = {
+ 'vlanUserPriority': 'priority',
+ 'cfi': 'cfi',
+ 'vlanID': 'id'
+ }
+ for field, param in field_to_param_map.items():
+ value = params.get(param)
+ if value:
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_VLAN)[vlan],
+ field)
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', value,
+ '-fieldValue', value,
+ '-valueType', SINGLE_VALUE)
+
+ self.ixnet.commit()
+
def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
mask, count):
"""Set the IPv4 address in a config element stack IP field
@@ -430,15 +477,75 @@ class IxNextgen(object): # pragma: no cover
count = traffic_param['outer_l3']['count']
srcip = str(traffic_param['outer_l3']['srcip'])
dstip = str(traffic_param['outer_l3']['dstip'])
+ seed = traffic_param['outer_l3']['seed']
srcmask = traffic_param['outer_l3']['srcmask'] or IP_VERSION_4_MASK
dstmask = traffic_param['outer_l3']['dstmask'] or IP_VERSION_4_MASK
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'srcIp', srcip, 1, srcmask, count)
+ 'srcIp', srcip, seed, srcmask, count)
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'dstIp', dstip, 1, dstmask, count)
+ 'dstIp', dstip, seed, dstmask, count)
+
+ def update_l4(self, traffic):
+ """Update the L4 headers
+
+ NOTE: Only UDP is currently supported
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ proto = traffic_param['outer_l3']['proto']
+ if proto not in SUPPORTED_PROTO:
+ raise exceptions.IXIAUnsupportedProtocol(protocol=proto)
+
+ count = traffic_param['outer_l4']['count']
+ seed = traffic_param['outer_l4']['seed']
+
+ srcport = traffic_param['outer_l4']['srcport']
+ srcmask = traffic_param['outer_l4']['srcportmask']
+
+ dstport = traffic_param['outer_l4']['dstport']
+ dstmask = traffic_param['outer_l4']['dstportmask']
+
+ if proto in SUPPORTED_PROTO:
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'srcPort', srcport, seed, srcmask, count)
+
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'dstPort', dstport, seed, dstmask, count)
+
+ def _update_udp_port(self, descriptor, field, value,
+ seed=1, mask=0, count=1):
+ """Set the UDP port in a config element stack UDP field
+
+ :param udp_descriptor: (str) UDP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ :param field: (str) field name, e.g.: scrPort, dstPort
+ :param value: (int) UDP port fixed bits
+ :param seed: (int) seed length
+ :param mask: (int) UDP port mask
+ :param count: (int) number of random ports to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(descriptor, field)
+
+ if mask == 0:
+ seed = count = 1
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-seed', seed,
+ '-fixedBits', value,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+
+ self.ixnet.commit()
def _build_stats_map(self, view_obj, name_map):
return {data_yardstick: self.ixnet.execute(
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 43455330f..760b1e8d3 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -42,6 +42,13 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
return _ip_range[0], mask
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
+
+ return int(_port_range[0]), int(_port_range[1])
+
def _get_ixia_traffic_profile(self, profile_data, mac=None):
mac = {} if mac is None else mac
result = {}
@@ -70,6 +77,9 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
srcip, srcmask = self._get_ip_and_mask(ip[src_key])
dstip, dstmask = self._get_ip_and_mask(ip[dst_key])
+ outer_l4 = value.get('outer_l4')
+ src_port, src_port_mask = self._get_fixed_and_mask(outer_l4['srcport'])
+ dst_port, dst_port_mask = self._get_fixed_and_mask(outer_l4['dstport'])
result[traffickey] = {
'bidir': False,
'id': port_id,
@@ -78,6 +88,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'outer_l2': {
'framesize': value['outer_l2']['framesize'],
'framesPerSecond': True,
+ 'QinQ': value['outer_l2'].get('QinQ'),
'srcmac': mac['src_mac_{}'.format(port_index)],
'dstmac': mac['dst_mac_{}'.format(port_index)],
},
@@ -85,6 +96,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'count': ip['count'],
'dscp': ip['dscp'],
'ttl': ip['ttl'],
+ 'seed': ip['seed'],
'srcip': srcip,
'dstip': dstip,
'srcmask': srcmask,
@@ -92,7 +104,15 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'type': key,
'proto': ip['proto'],
},
- 'outer_l4': value['outer_l4'],
+ 'outer_l4': {
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4['count'],
+ 'seed': outer_l4['seed'],
+ }
+
}
except KeyError:
continue
@@ -102,6 +122,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
def _ixia_traffic_generate(self, traffic, ixia_obj):
ixia_obj.update_frame(traffic, self.config.duration)
ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
ixia_obj.start_traffic()
def update_traffic_profile(self, traffic_generator):
@@ -147,12 +168,8 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['out_packets'] for iface in samples])
- rx_throughput = sum(
- [samples[iface]['RxThroughput'] for iface in samples])
- rx_throughput = round(float(rx_throughput), 2)
- tx_throughput = sum(
- [samples[iface]['TxThroughput'] for iface in samples])
- tx_throughput = round(float(tx_throughput), 2)
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
packet_drop = abs(out_packets_sum - in_packets_sum)
try:
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 9457096c8..506a880e0 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -88,11 +88,6 @@ class ProxBinSearchProfile(ProxProfile):
theor_max_thruput = actual_max_thruput = 0
result_samples = {}
- rate_samples = {}
- pos_retry = 0
- neg_retry = 0
- total_retry = 0
- ok_retry = 0
# Store one time only value in influxdb
single_samples = {
@@ -110,15 +105,11 @@ class ProxBinSearchProfile(ProxProfile):
"interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
- for test_value in self.bounds_iterator(LOG):
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
pos_retry = 0
neg_retry = 0
total_retry = 0
- rate_samples["MAX_Rate"] = self.current_upper
- rate_samples["MIN_Rate"] = self.current_lower
- rate_samples["Test_Rate"] = test_value
- self.queue.put(rate_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info("Checking MAX %s MIN %s TEST %s",
self.current_upper, self.lower_bound, test_value)
while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
@@ -188,6 +179,11 @@ class ProxBinSearchProfile(ProxProfile):
self.queue.put({'theor_max_throughput': theor_max_thruput})
LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 0e1dbd592..b54fc575f 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -70,7 +70,7 @@ class PortPgIDMap(object):
class RFC2544Profile(trex_traffic_profile.TrexProfile):
"""TRex RFC2544 traffic profile"""
- TOLERANCE_LIMIT = 0.05
+ TOLERANCE_LIMIT = 0.01
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
@@ -246,6 +246,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
def get_drop_percentage(self, samples, tol_low, tol_high,
correlated_traffic):
"""Calculate the drop percentage and run the traffic"""
+ completed = False
tx_rate_fps = 0
rx_rate_fps = 0
for sample in samples:
@@ -266,15 +267,15 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
drop_percent = round(
(float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
- tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
if drop_percent > tol_high:
self.max_rate = self.rate
elif drop_percent < tol_low:
self.min_rate = self.rate
- # else:
- # NOTE(ralonsoh): the test should finish here
- # pass
+ else:
+ completed = True
+
last_rate = self.rate
self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
@@ -295,4 +296,4 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
'Rate': last_rate,
'Latency': latency
}
- return output
+ return completed, output
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 3ef7c33c5..a09f2a7a9 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -426,7 +426,8 @@ class ClientResourceHelper(ResourceHelper):
iteration_index = 0
while self._terminated.value == 0:
iteration_index += 1
- self._run_traffic_once(traffic_profile)
+ if self._run_traffic_once(traffic_profile):
+ self._terminated.value = 1
mq_producer.tg_method_iteration(iteration_index)
self.client.stop(self.all_ports)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 4d3bc2ce5..8927ce156 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -60,7 +60,7 @@ class IxiaResourceHelper(ClientResourceHelper):
def stop_collect(self):
self._terminated.value = 1
- def generate_samples(self, ports, key=None):
+ def generate_samples(self, ports, duration):
stats = self.get_stats()
samples = {}
@@ -70,27 +70,23 @@ class IxiaResourceHelper(ClientResourceHelper):
try:
# reverse lookup port name from port_num so the stats dict is descriptive
intf = self.vnfd_helper.find_interface_by_port(port_num)
- port_name = intf["name"]
+ port_name = intf['name']
+ avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+ min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+ max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
samples[port_name] = {
- "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
- "out_packets": int(stats["Frames_Tx"][port_num]),
- # NOTE(ralonsoh): we need to make the traffic injection
- # time variable.
- "RxThroughput": int(stats["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(stats["Frames_Tx"][port_num]) / 30,
+ 'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]),
+ 'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]),
+ 'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]),
+ 'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]),
+ 'in_packets': int(stats['Valid_Frames_Rx'][port_num]),
+ 'out_packets': int(stats['Frames_Tx'][port_num]),
+ 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+ 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+ 'Store-Forward_Avg_latency_ns': avg_latency,
+ 'Store-Forward_Min_latency_ns': min_latency,
+ 'Store-Forward_Max_latency_ns': max_latency
}
- if key:
- avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
- max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
- samples[port_name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
except IndexError:
pass
@@ -129,13 +125,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self, self.client, mac)
self.client_started.value = 1
# pylint: disable=unnecessary-lambda
- utils.wait_until_true(lambda: self.client.is_traffic_stopped())
- samples = self.generate_samples(traffic_profile.ports)
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
- # NOTE(ralonsoh): the traffic injection duration is fixed to 30
- # seconds. This parameter is configurable and must be retrieved
- # from the traffic_profile.full_profile information.
- # Every flow must have the same duration.
completed, samples = traffic_profile.get_drop_percentage(
samples, min_tol, max_tol, first_run=first_run)
self._queue.put(samples)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index cdbb41485..7ecb12478 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -45,11 +45,12 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
time.sleep(self.SAMPLING_PERIOD)
traffic_profile.stop_traffic(self)
- output = traffic_profile.get_drop_percentage(
+ completed, output = traffic_profile.get_drop_percentage(
samples, self.rfc2544_helper.tolerance_low,
self.rfc2544_helper.tolerance_high,
self.rfc2544_helper.correlated_traffic)
self._queue.put(output)
+ return completed
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index e0c0db262..99a5760a3 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -227,14 +227,10 @@ name (i.e. %s).
def add_volume_attachment(self, server_name, volume_name, mountpoint=None):
"""add to the template an association of volume to instance"""
- log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ", server_name,
- volume_name)
-
+ log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ",
+ server_name, volume_name)
name = "%s-%s" % (server_name, volume_name)
-
- volume_id = op_utils.get_volume_id(volume_name)
- if not volume_id:
- volume_id = {'get_resource': volume_name}
+ volume_id = {'get_resource': volume_name}
self.resources[name] = {
'type': 'OS::Cinder::VolumeAttachment',
'properties': {'instance_uuid': {'get_resource': server_name},
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 69428f3af..8bdc32c7c 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -499,7 +499,7 @@ class AutoConnectSSH(SSH):
""" Don't close anything, just force creation of a new client """
self._client = False
- def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=True):
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
self._connect()
return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
raise_on_error)
diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
new file mode 100644
index 000000000..7b1e1e976
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
@@ -0,0 +1,220 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+import time
+
+from yardstick.benchmark.runners import arithmetic
+
+
+class ArithmeticRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ def __init__(self):
+ self.count = 101
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ return self.count
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {
+ 'interval': 0,
+ 'iter_type': 'nested_for_loops',
+ 'iterators': [
+ {
+ 'name': 'stride',
+ 'start': 64,
+ 'stop': 128,
+ 'step': 64
+ },
+ {
+ 'name': 'size',
+ 'start': 500,
+ 'stop': 2000,
+ 'step': 500
+ }
+ ]
+ },
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_process_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = arithmetic.ArithmeticRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='Arithmetic-some_type-101',
+ target=arithmetic._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_nested_for_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 8)
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 8)
+ self.assertEqual(mock_time_sleep.call_count, 8)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_tuple_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 2)
+ self.assertEqual(self.benchmark.my_method.call_count, 2)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 2)
+ self.assertEqual(mock_time_sleep.call_count, 2)
+
+ def test__worker_process_stored_options_nested_for_loops(self):
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_stored_options_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 1000})
+
+ def test__worker_process_aborted_set_early(self):
+ aborted = multiprocessing.Event()
+ aborted.set()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ aborted, mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.scenario_cfg['options'], {})
+ self.benchmark.my_method.assert_not_called()
+
+ def test__worker_process_output_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103, 104, 105, 106, 107, 108, 109])
+
+ def test__worker_process_output_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103])
+
+ def test__worker_process_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+
+ def test__worker_process_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
index 21e3874b8..d4801ef2c 100644
--- a/yardstick/tests/unit/benchmark/runner/test_duration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -11,17 +11,50 @@ import mock
import unittest
import multiprocessing
import os
+import time
from yardstick.benchmark.runners import duration
+from yardstick.common import exceptions as y_exc
class DurationRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
+ self.count = 101
+ self.side_effect = side_effect
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
+ return self.count
+
def setUp(self):
self.scenario_cfg = {
'runner': {'interval': 0, "duration": 0},
'type': 'some_type'
}
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ def _assert_defaults__worker_run_one_iteration(self):
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
@mock.patch.object(os, 'getpid')
@mock.patch.object(multiprocessing, 'Process')
def test__run_benchmark_called_with(self, mock_multiprocessing_process,
@@ -37,3 +70,246 @@ class DurationRunnerTest(unittest.TestCase):
target=duration._worker_process,
args=(runner.result_queue, benchmark_cls, 'my_method',
self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ def test__worker_process_called_with_cfg(self):
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_called_with_cfg_loop(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.call_count, 2)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ def test__worker_process_called_without_cfg(self):
+ scenario_cfg = {'runner': {}}
+ aborted = multiprocessing.Event()
+ aborted.set()
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ scenario_cfg, {}, aborted, mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(1)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(1)
+ self.benchmark.teardown.assert_called_once()
+
+ def test__worker_process_output_queue(self):
+ self.benchmark.my_method = mock.Mock(return_value='my_result')
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+ self.assertEquals(output_queue.get(), 'my_result')
+
+ def test__worker_process_output_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 101
+ while not output_queue.empty():
+ count += 1
+ self.assertEquals(output_queue.get(), count)
+
+ def test__worker_process_queue(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.5
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
+ self.scenario_cfg['sla'] = {}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_queue_on_sla_validation_error_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_queue_on_broad_exception(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertNotEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(SystemExit) as raised:
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index d5c95a086..0f68753fd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -48,7 +48,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
host = {
"ipmi_ip": "10.20.0.5",
"ipmi_user": "root",
- "ipmi_pwd": "123456",
+ "ipmi_password": "123456",
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
@@ -77,7 +77,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
self.attacker_cfg["jump_host"] = 'node1'
- self.context["node1"]["pwd"] = "123456"
+ self.context["node1"]["password"] = "123456"
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
new file mode 100644
index 000000000..74f86983b
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
+
+class BaseAttackerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.attacker_cfg = {
+ 'fault_type': 'test-attacker',
+ 'action_parameter': {'process_name': 'nova_api'},
+ 'rollback_parameter': {'process_name': 'nova_api'},
+ 'key': 'stop-service',
+ 'attack_key': 'stop-service',
+ 'host': 'node1',
+ }
+ self.base_attacker = baseattacker.BaseAttacker({}, {})
+
+ def test__init__(self):
+ self.assertEqual(self.base_attacker.data, {})
+ self.assertFalse(self.base_attacker.mandatory)
+ self.assertEqual(self.base_attacker.intermediate_variables, {})
+ self.assertFalse(self.base_attacker.mandatory)
+
+ def test_get_attacker_cls(self):
+ with self.assertRaises(RuntimeError):
+ baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index ec0e5973c..d61fa67c7 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -109,6 +109,23 @@ class ServicehaTestCase(unittest.TestCase):
ret = {}
p.run(ret)
attacker = mock.Mock()
+ attacker.mandatory = False
p.attackers = [attacker]
p.teardown()
attacker.recover.assert_not_called()
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_teardown_when_mandatory(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ p.setup()
+ self.assertTrue(p.setup_done)
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+ ret = {}
+ p.run(ret)
+ attacker = mock.Mock()
+ attacker.mandatory = True
+ p.attackers = [attacker]
+ p.teardown()
+ attacker.recover.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
index 7a2324b3d..b0488bacd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
@@ -8,28 +8,56 @@
##############################################################################
import unittest
-from yardstick.benchmark.scenarios.lib.check_value import CheckValue
+from yardstick.benchmark.scenarios.lib import check_value
+from yardstick.common import exceptions as y_exc
+
class CheckValueTestCase(unittest.TestCase):
- def setUp(self):
- self.result = {}
+ def test_eq_pass(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_ne_pass(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_result(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1},
+ 'output': 'foo'}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertDictEqual(result, {'foo': 'PASS'})
- def test_check_value_eq(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 2}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ def test_eq(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_eq_pass(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
- obj.run(self.result)
- self.assertEqual({}, self.result)
+ def test_ne(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_ne(self):
- scenario_cfg = {'options': {'operator': 'ne', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 5844746ab..2ba53cb93 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,18 +11,18 @@
from __future__ import absolute_import
+import json
import unittest
import mock
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios.storage import storperf
# pylint: disable=unused-argument
# disable this for now because I keep forgetting mock patch arg ordering
-
-
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost(object):
@@ -32,10 +32,24 @@ def mocked_requests_config_post(*args, **kwargs):
return MockResponseConfigPost(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "false"}',
+ '"stack_created": false}',
200)
+def mocked_requests_config_post_fail(*args, **kwargs):
+ class MockResponseConfigPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigPost(
+ '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+ 'Error validating value \'foo\': Unable to find network with ' +
+ 'name or id \'foo\'"}',
+ 400)
+
+
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet(object):
@@ -45,10 +59,47 @@ def mocked_requests_config_get(*args, **kwargs):
return MockResponseConfigGet(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "true"}',
+ '"stack_created": true}',
200)
+def mocked_requests_config_get_not_created(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{"stack_id": "",'
+ '"stack_created": false}',
+ 200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{}',
+ 200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet(object):
@@ -73,6 +124,18 @@ def mocked_requests_job_post(*args, **kwargs):
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
+def mocked_requests_job_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete(object):
@@ -100,10 +163,7 @@ def mocked_requests_delete_failed(*args, **kwargs):
self.json_data = json_data
self.status_code = status_code
- if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
- return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
- return MockResponseDeleteFailed('{}', 404)
+ return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
class StorPerfTestCase(unittest.TestCase):
@@ -119,11 +179,14 @@ class StorPerfTestCase(unittest.TestCase):
self.result = {}
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_config_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_config_get)
- def test_successful_setup(self, mock_post, mock_get):
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup(self, mock_get, mock_post):
+ mock_post.side_effect = [mocked_requests_config_post(),
+ mocked_requests_job_post()]
+ mock_get.side_effect = [mocked_requests_config_get(),
+ mocked_requests_job_get()]
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -146,14 +209,47 @@ class StorPerfTestCase(unittest.TestCase):
self.assertTrue(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_job_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_job_get)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
- def test_successful_run(self, mock_post, mock_get, mock_delete):
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_unsuccessful(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_not_created
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_no_payload(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_no_payload
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup_config_post_failed(self, mock_get, mock_post):
+ mock_post.side_effect = mocked_requests_config_post_fail
+
+ args = {
+ "options": {
+ "public_network": "foo"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.setup)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v1_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -165,6 +261,74 @@ class StorPerfTestCase(unittest.TestCase):
"query_interval": 0,
"timeout": 60
}
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ "workload": "rs",
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ sample_output = '{"Status": "Completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+ expected_result = jsonutils.loads(sample_output)
+
+ s.run(self.result)
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/jobs',
+ json=jsonutils.loads(json.dumps(expected_post)))
+
+ self.assertEqual(self.result, expected_result)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v2_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
args = {
"options": options
@@ -179,13 +343,126 @@ class StorPerfTestCase(unittest.TestCase):
expected_result = jsonutils.loads(sample_output)
s.run(self.result)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
self.assertEqual(self.result, expected_result)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete)
- def test_successful_teardown(self, mock_delete):
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ self.assertRaises(RuntimeError, s.run, self.ctx)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(storperf.StorPerf, 'setup')
+ def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ 'timeout': 60,
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.run(self.result)
+
+ mock_setup.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.initialize_disks()
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_initialize_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.initialize_disks)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch.object(requests, 'delete')
+ def test_teardown(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_job_delete
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -207,11 +484,12 @@ class StorPerfTestCase(unittest.TestCase):
s.teardown()
self.assertFalse(s.setup_done)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete_failed)
- def test_failed_teardown(self, mock_delete):
+ @mock.patch.object(requests, 'delete')
+ def test_teardown_request_delete_failed(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_delete_failed
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -230,4 +508,6 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
- self.assertRaises(AssertionError, s.teardown(), self.result)
+ self.assertRaises(RuntimeError, s.teardown)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
diff --git a/yardstick/tests/unit/common/test_template_format.py b/yardstick/tests/unit/common/test_template_format.py
index 56253efbc..6e4827e16 100644
--- a/yardstick/tests/unit/common/test_template_format.py
+++ b/yardstick/tests/unit/common/test_template_format.py
@@ -22,16 +22,21 @@ from yardstick.common import template_format
class TemplateFormatTestCase(unittest.TestCase):
- def test_parse_to_value_exception(self):
+ def test_parse_scanner(self):
- # TODO(elfoley): Don't hide the error that occurs in
- # template_format.parse
- # TODO(elfoley): Separate these tests; one per error type
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.scanner.ScannerError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_parser(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.parser.ParserError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_reader(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = \
yaml.reader.ReaderError('', '', '', '', '')
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 42ca7f769..e078d70ad 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -16,6 +16,8 @@ import mock
import IxNetwork
import unittest
+from copy import deepcopy
+
from yardstick.common import exceptions
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -31,10 +33,12 @@ TRAFFIC_PARAMETERS = {
'rate': 10000.5,
'rate_unit': 'fps',
'outer_l2': {
- 'framesize': {'64B': '25', '256B': '75'}
+ 'framesize': {'64B': '25', '256B': '75'},
+ 'QinQ': None
},
'outer_l3': {
'count': 512,
+ 'seed': 1,
'dscp': 0,
'proto': 'udp',
'ttl': 32,
@@ -44,8 +48,12 @@ TRAFFIC_PARAMETERS = {
'srcmask': 24
},
'outer_l4': {
- 'dstport': '2001',
- 'srcport': '1234'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 2001,
+ 'srcport': 1234,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
},
@@ -56,10 +64,12 @@ TRAFFIC_PARAMETERS = {
'rate': 75.2,
'rate_unit': '%',
'outer_l2': {
- 'framesize': {'128B': '35', '1024B': '65'}
+ 'framesize': {'128B': '35', '1024B': '65'},
+ 'QinQ': None
},
'outer_l3': {
'count': 1024,
+ 'seed': 1,
'dscp': 0,
'proto': 'udp',
'ttl': 32,
@@ -69,8 +79,12 @@ TRAFFIC_PARAMETERS = {
'srcmask': 64
},
'outer_l4': {
- 'dstport': '1234',
- 'srcport': '2001'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 1234,
+ 'srcport': 2001,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
}
@@ -329,6 +343,46 @@ class TestIxNextgen(unittest.TestCase):
self.assertEqual(6, len(self.ixnet_gen.ixnet.setMultiAttribute.mock_calls))
self.assertEqual(4, len(mock_update_frame.mock_calls))
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('cfg_element/transmissionControl',
+ '-type', 'continuous', '-duration', 50)
+ ])
+
+ def test_update_frame_qinq(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='cfg_element'), \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac'),\
+ mock.patch.object(self.ixnet_gen, '_get_stack_item',
+ return_value='item'), \
+ mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field'):
+
+ traffic_parameters = deepcopy(TRAFFIC_PARAMETERS)
+ traffic_parameters[UPLINK]['outer_l2']['QinQ'] = {
+ 'S-VLAN': {'id': 128,
+ 'priority': 1,
+ 'cfi': 0},
+ 'C-VLAN': {'id': 512,
+ 'priority': 0,
+ 'cfi': 2}
+ }
+
+ self.ixnet_gen.update_frame(traffic_parameters, 50)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('field', '-auto', 'false', '-singleValue', '0x88a8',
+ '-fieldValue', '0x88a8', '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 1,
+ '-fieldValue', 1, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 128,
+ '-fieldValue', 128, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 512,
+ '-fieldValue', 512, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 2,
+ '-fieldValue', 2, '-valueType', 'singleValue')
+ ], any_order=True)
+
def test_update_frame_flow_not_present(self):
with mock.patch.object(
self.ixnet_gen, '_get_config_element_by_flow_group_name',
@@ -357,6 +411,21 @@ class TestIxNextgen(unittest.TestCase):
'-randomMask', '0.0.0.63', '-valueType', 'random',
'-countValue', 25)
+ def test__update_udp_port(self):
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_udp_port(mock.ANY, mock.ANY, 1234,
+ 2, 0, 2)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-auto', 'false',
+ '-seed', 1,
+ '-fixedBits', 1234,
+ '-randomMask', 0,
+ '-valueType', 'random',
+ '-countValue', 1)
+
def test_update_ip_packet(self):
with mock.patch.object(self.ixnet_gen, '_update_ipv4_address') as \
mock_update_add, \
@@ -374,6 +443,38 @@ class TestIxNextgen(unittest.TestCase):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ def test_update_l4(self):
+ with mock.patch.object(self.ixnet_gen, '_update_udp_port') as \
+ mock_update_udp, \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name', return_value='celm'):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(4, len(mock_update_udp.mock_calls))
+
+ def test_update_l4_exception_no_config_element(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ def test_update_l4_exception_no_supported_proto(self):
+ traffic_parameters = {
+ UPLINK: {
+ 'id': 1,
+ 'outer_l3': {
+ 'proto': 'unsupported',
+ },
+ },
+ }
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='celm'):
+ with self.assertRaises(exceptions.IXIAUnsupportedProtocol):
+ self.ixnet_gen.update_l4(traffic_parameters)
+
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic(self, mock_ixnextgen_get_traffic_state):
self.ixnet_gen._ixnet.getList.return_value = [0]
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 1a33304a5..27ab4607b 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -462,6 +462,22 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertEqual('192.168.1.10', ip)
self.assertIsNone(mask)
+ def test__get_fixed_and_mask_range(self):
+ fixed_mask = '8-48'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(8, fixed)
+ self.assertEqual(48, mask)
+
+ def test__get_fixed_and_mask_single(self):
+ fixed_mask = 1234
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(1234, fixed)
+ self.assertEqual(0, mask)
+
def test__get_ixia_traffic_profile_default_args(self):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
self.TRAFFIC_PROFILE)
@@ -559,87 +575,77 @@ class TestIXIARFC2544Profile(unittest.TestCase):
def test_get_drop_percentage_completed(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
def test_get_drop_percentage_over_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 0.05)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_under_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
@mock.patch.object(ixia_rfc2544.LOG, 'info')
def test_get_drop_percentage_not_flow(self, *args):
samples = {'iface_name_1':
- {'RxThroughput': 0, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 0},
+ {'in_packets': 1000, 'out_packets': 0},
'iface_name_2':
- {'RxThroughput': 0, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 0}
+ {'in_packets': 1005, 'out_packets': 0}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(0, samples['RxThroughput'])
+ self.assertEqual(0.0, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(100, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_first_run(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 1, first_run=True)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index a4fdc8d04..2e0331e8e 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -238,15 +238,17 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
'in_packets': 4040,
'latency': 'Latency2'}}
]
- output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ completed, output = rfc2544_profile.get_drop_percentage(
+ samples, 0, 0, False)
expected = {'DropPercentage': 0.3963,
'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
'RxThroughput': 312.5,
'TxThroughput': 304.5,
'CurrentDropPercentage': 0.3963,
- 'Rate': 100,
+ 'Rate': 100.0,
'Throughput': 312.5}
self.assertEqual(expected, output)
+ self.assertFalse(completed)
class PortPgIDMapTestCase(base.BaseUnitTestCase):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index c35d2db35..4a1d8c30e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1091,7 +1091,8 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertIs(client_resource_helper._connect(client), client)
@mock.patch.object(ClientResourceHelper, '_build_ports')
- @mock.patch.object(ClientResourceHelper, '_run_traffic_once')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once',
+ return_value=(True, mock.ANY))
def test_run_traffic(self, mock_run_traffic_once, mock_build_ports):
client_resource_helper = ClientResourceHelper(mock.Mock())
client = mock.Mock()
@@ -1103,7 +1104,7 @@ class TestClientResourceHelper(unittest.TestCase):
as mock_terminated:
mock_connect.return_value = client
type(mock_terminated).value = mock.PropertyMock(
- side_effect=[0, 1, lambda x: x])
+ side_effect=[0, 1, 1, lambda x: x])
client_resource_helper.run_traffic(traffic_profile, mq_producer)
mock_build_ports.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index ddb63242e..ec0e6aa6d 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,6 +18,7 @@ import mock
import six
import unittest
+from yardstick.common import utils
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -57,6 +58,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
def test_run_traffic(self):
mock_tprofile = mock.Mock()
+ mock_tprofile.config.duration = 10
mock_tprofile.get_drop_percentage.return_value = True, 'fake_samples'
ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_rhelper.rfc_helper = mock.Mock()
@@ -64,7 +66,8 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
with mock.patch.object(ixia_rhelper, 'generate_samples'), \
mock.patch.object(ixia_rhelper, '_build_ports'), \
- mock.patch.object(ixia_rhelper, '_initialize_client'):
+ mock.patch.object(ixia_rhelper, '_initialize_client'), \
+ mock.patch.object(utils, 'wait_until_true'):
ixia_rhelper.run_traffic(mock_tprofile)
self.assertEqual('fake_samples', ixia_rhelper._queue.get())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 6aba41006..a5b9f258e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -30,13 +30,14 @@ class TestTrexRfcResouceHelper(unittest.TestCase):
mock_traffic_profile.config.duration = 3
mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
'port_pg_id_map')
- mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ mock_traffic_profile.get_drop_percentage.return_value = (True,
+ 'percentage')
rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
rfc_rh.TRANSIENT_PERIOD = 0
rfc_rh.rfc2544_helper = mock.Mock()
with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
- rfc_rh._run_traffic_once(mock_traffic_profile)
+ self.assertTrue(rfc_rh._run_traffic_once(mock_traffic_profile))
mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index b727e821d..71929f1a2 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -617,3 +617,26 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put_file('a', 'b')
mock_put_sftp.assert_called_once()
+
+ def test_execute(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(return_value=0)
+ exit_code, _, _ = auto_connect_ssh.execute('')
+ self.assertEqual(exit_code, 0)
+
+ def _mock_run(self, *args, **kwargs):
+ if args[0] == 'ls':
+ if kwargs.get('raise_on_error'):
+ raise exceptions.SSHError(error_msg='Command error')
+ return 1
+ return 0
+
+ def test_execute_command_error(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(side_effect=self._mock_run)
+ self.assertRaises(exceptions.SSHError, auto_connect_ssh.execute, 'ls',
+ raise_on_error=True)
+ exit_code, _, _ = auto_connect_ssh.execute('ls')
+ self.assertNotEqual(exit_code, 0)