summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/roles/install_yardstick/tasks/main.yml6
-rw-r--r--dashboard/opnfv_yardstick_tc058.json2
-rw-r--r--docker/Dockerfile.aarch64.patch26
-rw-r--r--docker/k8s/Dockerfile39
-rwxr-xr-xdocs/testing/developer/devguide/devguide.rst26
-rwxr-xr-xdocs/testing/user/userguide/01-introduction.rst2
-rwxr-xr-xdocs/testing/user/userguide/03-architecture.rst2
-rw-r--r--docs/testing/user/userguide/11-vtc-overview.rst128
-rw-r--r--docs/testing/user/userguide/15-list-of-tcs.rst3
-rw-r--r--docs/testing/user/userguide/glossary.rst3
-rw-r--r--docs/testing/user/userguide/index.rst1
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc074.rst72
-rw-r--r--docs/testing/user/userguide/references.rst1
-rw-r--r--samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml39
-rw-r--r--samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml171
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml9
-rw-r--r--samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml21
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml47
-rwxr-xr-xtests/ci/load_images.sh16
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml18
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py28
-rw-r--r--yardstick/benchmark/scenarios/lib/check_value.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py50
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py34
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py92
-rw-r--r--yardstick/common/ansible_common.py36
-rw-r--r--yardstick/common/constants.py1
-rw-r--r--yardstick/common/exceptions.py24
-rw-r--r--yardstick/common/kubernetes_utils.py85
-rw-r--r--yardstick/common/utils.py53
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py153
-rw-r--r--yardstick/network_services/pipeline.py11
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py1
-rw-r--r--yardstick/network_services/traffic_profile/base.py32
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py92
-rw-r--r--yardstick/network_services/traffic_profile/pktgen.py61
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py16
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py17
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py3
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_pktgen.py103
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py46
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py3
-rw-r--r--yardstick/orchestrator/heat.py10
-rw-r--r--yardstick/orchestrator/kubernetes.py8
-rw-r--r--yardstick/service/environment.py10
-rw-r--r--yardstick/ssh.py5
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py9
-rw-r--r--yardstick/tests/unit/benchmark/core/test_testcase.py14
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_arithmetic.py220
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py276
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py64
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py764
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py197
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py56
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py39
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py146
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py340
-rw-r--r--yardstick/tests/unit/common/test_ansible_common.py205
-rw-r--r--yardstick/tests/unit/common/test_kubernetes_utils.py207
-rw-r--r--yardstick/tests/unit/common/test_template_format.py13
-rw-r--r--yardstick/tests/unit/common/test_utils.py88
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py400
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py30
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py90
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py63
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py7
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py4
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py79
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py5
-rw-r--r--yardstick/tests/unit/orchestrator/test_kubernetes.py8
-rw-r--r--yardstick/tests/unit/service/test_environment.py19
-rw-r--r--yardstick/tests/unit/test_ssh.py23
80 files changed, 3341 insertions, 1701 deletions
diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml
index 973b2b027..203acc3e5 100644
--- a/ansible/roles/install_yardstick/tasks/main.yml
+++ b/ansible/roles/install_yardstick/tasks/main.yml
@@ -41,7 +41,7 @@
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
virtualenv: "{{ yardstick_dir }}/virtualenv"
- async: 300
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == True
@@ -49,7 +49,7 @@
- name: Install Yardstick requirements
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
- async: 300
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == False
@@ -59,7 +59,7 @@
jid: "{{ pip_installer.ansible_job_id }}"
register: job_result
until: job_result.finished
- retries: 100
+ retries: 180
- name: Install Yardstick code (venv)
pip:
diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json
index 55b5a5f33..ed2a1750c 100644
--- a/dashboard/opnfv_yardstick_tc058.json
+++ b/dashboard/opnfv_yardstick_tc058.json
@@ -6,7 +6,7 @@
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
- "id": 33,
+ "id": null,
"links": [],
"refresh": "1m",
"rows": [
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index 472310f96..2e2808ed1 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,14 +1,14 @@
-From: ting wu <ting.wu@enea.com>
-Date: Tue, 8 May 2018 14:02:52 +0200
-Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file
+From: Cristina Pauna <cristina.pauna@enea.com>
+Date: Mon, 23 Jul 2018 15:16:59 +0300
+Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
-Signed-off-by: ting wu <ting.wu@enea.com>
+Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
---
- docker/Dockerfile | 11 ++++++-----
- 1 file changed, 6 insertions(+), 5 deletions(-)
+ docker/Dockerfile | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 71ce6b58..952d0f78 100644
+index 71ce6b58..fce7c116 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,9 +7,9 @@
@@ -33,7 +33,17 @@ index 71ce6b58..952d0f78 100644
RUN easy_install -U setuptools==30.0.0
RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5
-@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+@@ -40,7 +41,8 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${Y
+ RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
+ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
+
+-RUN ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
++RUN sed -i -e '/- configure_gui/d' ${YARDSTICK_REPO_DIR}/ansible/install.yaml && \
++ ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+
+ RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
+
+@@ -48,8 +50,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile
new file mode 100644
index 000000000..2f8d9b161
--- /dev/null
+++ b/docker/k8s/Dockerfile
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+FROM ubuntu:16.04
+
+LABEL image=opnfv/yardstick-image-k8s
+
+ARG BRANCH=master
+
+# GIT repo directory
+ENV CLONE_DEST="/opt/tempT"
+
+RUN apt-get update && apt-get install -y \
+ git bc bonnie++ fio gcc iperf3 ethtool \
+ iproute2 linux-tools-common linux-tools-generic \
+ lmbench make netperf patch perl rt-tests stress \
+ sysstat iputils-ping openssh-server sudo && \
+ apt-get -y autoremove && apt-get clean
+
+RUN rm -rf -- ${CLONE_DEST}
+RUN git clone https://github.com/kdlucas/byte-unixbench.git ${CLONE_DEST}
+RUN mkdir -p ${CLONE_DEST}/UnixBench/
+
+RUN git clone https://github.com/beefyamoeba5/ramspeed.git ${CLONE_DEST}/RAMspeed
+WORKDIR ${CLONE_DEST}/RAMspeed/ramspeed-2.6.0
+RUN mkdir -p ${CLONE_DEST}/RAMspeed/ramspeed-2.6.0/temp
+RUN bash build.sh
+
+RUN git clone https://github.com/beefyamoeba5/cachestat.git ${CLONE_DEST}/Cachestat
+
+WORKDIR /
+
+CMD /bin/bash
diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst
index dbe92b846..91f2c2148 100755
--- a/docs/testing/developer/devguide/devguide.rst
+++ b/docs/testing/developer/devguide/devguide.rst
@@ -540,6 +540,32 @@ The final step consists in pushing the newly modified commit to Gerrit::
git review
+Backporting changes to stable branches
+--------------------------------------
+During the release cycle, when master and the ``stable/<release>`` branch have
+diverged, it may be necessary to backport (cherry-pick) changes top the
+``stable/<release>`` branch once they have merged to master.
+These changes should be identified by the committers reviewing the patch.
+Changes should be backported **as soon as possible** after merging of the
+original code.
+
+..note::
+ Besides the commit and review process below, the Jira tick must be updated to
+ add dual release versions and indicate that the change is to be backported.
+
+The process for backporting is as follows:
+
+* Committer A merges a change to master (process for normal changes).
+* Committer A cherry-picks the change to ``stable/<release>`` branch (if the
+ bug has been identified for backporting).
+* The original author should review the code and verify that it still works
+ (and give a ``+1``).
+* Committer B reviews the change, gives a ``+2`` and merges to
+ ``stable/<release>``.
+
+A backported change needs a ``+1`` and a ``+2`` from a committer who didn’t
+propose the change (i.e. minimum 3 people involved).
+
Plugins
-------
diff --git a/docs/testing/user/userguide/01-introduction.rst b/docs/testing/user/userguide/01-introduction.rst
index d846e759c..494b1ef3d 100755
--- a/docs/testing/user/userguide/01-introduction.rst
+++ b/docs/testing/user/userguide/01-introduction.rst
@@ -66,8 +66,6 @@ This document consists of the following chapters:
yardstick report CLI to view the test result in table format and also values
pinned on to a graph
-* Chapter :doc:`11-vtc-overview` provides information on the :term:`VTC`.
-
* Chapter :doc:`12-nsb-overview` describes the methodology implemented by the
Yardstick - Network service benchmarking to test real world usecase for a
given VNF.
diff --git a/docs/testing/user/userguide/03-architecture.rst b/docs/testing/user/userguide/03-architecture.rst
index 622002ee4..886631510 100755
--- a/docs/testing/user/userguide/03-architecture.rst
+++ b/docs/testing/user/userguide/03-architecture.rst
@@ -262,8 +262,6 @@ Yardstick Directory structure
*plugin/* - Plug-in configuration files are stored here.
-*vTC/* - Contains the files for running the virtual Traffic Classifier tests.
-
*yardstick/* - Contains the internals of Yardstick: Runners, Scenario, Contexts,
CLI parsing, keys, plotting tools, dispatcher, plugin
install/remove scripts and so on.
diff --git a/docs/testing/user/userguide/11-vtc-overview.rst b/docs/testing/user/userguide/11-vtc-overview.rst
deleted file mode 100644
index 47582358c..000000000
--- a/docs/testing/user/userguide/11-vtc-overview.rst
+++ /dev/null
@@ -1,128 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, National Center of Scientific Research "Demokritos" and others.
-
-==========================
-Virtual Traffic Classifier
-==========================
-
-Abstract
-========
-
-.. _TNOVA: http://www.t-nova.eu/
-.. _TNOVAresults: http://www.t-nova.eu/results/
-.. _Yardstick: https://wiki.opnfv.org/yardstick
-
-This chapter provides an overview of the virtual Traffic Classifier, a
-contribution to OPNFV Yardstick_ from the EU Project TNOVA_.
-Additional documentation is available in TNOVAresults_.
-
-Overview
-========
-
-The virtual Traffic Classifier (:term:`VTC`) :term:`VNF`, comprises of a
-Virtual Network Function Component (:term:`VNFC`). The :term:`VNFC` contains
-both the Traffic Inspection module, and the Traffic forwarding module, needed
-to run the :term:`VNF`. The exploitation of Deep Packet Inspection
-(:term:`DPI`) methods for traffic classification is built around two basic
-assumptions:
-
-* third parties unaffiliated with either source or recipient are able to
- inspect each IP packet's payload
-
-* the classifier knows the relevant syntax of each application's packet
- payloads (protocol signatures, data patterns, etc.).
-
-The proposed :term:`DPI` based approach will only use an indicative, small
-number of the initial packets from each flow in order to identify the content
-and not inspect each packet.
-
-In this respect it follows the Packet Based per Flow State (term:`PBFS`). This
-method uses a table to track each session based on the 5-tuples (src address,
-dest address, src port,dest port, transport protocol) that is maintained for
-each flow.
-
-Concepts
-========
-
-* *Traffic Inspection*: The process of packet analysis and application
- identification of network traffic that passes through the :term:`VTC`.
-
-* *Traffic Forwarding*: The process of packet forwarding from an incoming
- network interface to a pre-defined outgoing network interface.
-
-* *Traffic Rule Application*: The process of packet tagging, based on a
- predefined set of rules. Packet tagging may include e.g. Type of Service
- (:term:`ToS`) field modification.
-
-Architecture
-============
-
-The Traffic Inspection module is the most computationally intensive component
-of the :term:`VNF`. It implements filtering and packet matching algorithms in
-order to support the enhanced traffic forwarding capability of the :term:`VNF`.
-The component supports a flow table (exploiting hashing algorithms for fast
-indexing of flows) and an inspection engine for traffic classification.
-
-The implementation used for these experiments exploits the nDPI library.
-The packet capturing mechanism is implemented using libpcap. When the
-:term:`DPI` engine identifies a new flow, the flow register is updated with the
-appropriate information and transmitted across the Traffic Forwarding module,
-which then applies any required policy updates.
-
-The Traffic Forwarding moudle is responsible for routing and packet forwarding.
-It accepts incoming network traffic, consults the flow table for classification
-information for each incoming flow and then applies pre-defined policies
-marking e.g. :term:`ToS`/Differentiated Services Code Point (:term:`DSCP`)
-multimedia traffic for Quality of Service (:term:`QoS`) enablement on the
-forwarded traffic.
-It is assumed that the traffic is forwarded using the default policy until it
-is identified and new policies are enforced.
-
-The expected response delay is considered to be negligible, as only a small
-number of packets are required to identify each flow.
-
-Graphical Overview
-==================
-
-.. code-block:: console
-
- +----------------------------+
- | |
- | Virtual Traffic Classifier |
- | |
- | Analysing/Forwarding |
- | ------------> |
- | ethA ethB |
- | |
- +----------------------------+
- | ^
- | |
- v |
- +----------------------------+
- | |
- | Virtual Switch |
- | |
- +----------------------------+
-
-Install
-=======
-
-run the vTC/build.sh with root privileges
-
-Run
-===
-
-::
-
- sudo ./pfbridge -a eth1 -b eth2
-
-
-.. note:: Virtual Traffic Classifier is not support in OPNFV Danube release.
-
-
-Development Environment
-=======================
-
-Ubuntu 14.04 Ubuntu 16.04
diff --git a/docs/testing/user/userguide/15-list-of-tcs.rst b/docs/testing/user/userguide/15-list-of-tcs.rst
index 37ce819f1..0efecebd1 100644
--- a/docs/testing/user/userguide/15-list-of-tcs.rst
+++ b/docs/testing/user/userguide/15-list-of-tcs.rst
@@ -17,8 +17,7 @@ Yardstick test cases are divided in two main categories:
described in :doc:`02-methodology`
* *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
- aspect of a feature delivered by an OPNFV Project, including the test cases
- developed for the :term:`VTC`.
+ aspect of a feature delivered by an OPNFV Project.
Generic NFVI Test Case Descriptions
===================================
diff --git a/docs/testing/user/userguide/glossary.rst b/docs/testing/user/userguide/glossary.rst
index f8ff41887..be98aa6c0 100644
--- a/docs/testing/user/userguide/glossary.rst
+++ b/docs/testing/user/userguide/glossary.rst
@@ -60,6 +60,3 @@ Glossary
ToS
Type of Service
-
- VTC
- Virtual Traffic Classifier
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index b936e723d..1cbd0858f 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -23,7 +23,6 @@ Yardstick User Guide
08-grafana
09-api
10-yardstick-user-interface
- 11-vtc-overview
12-nsb-overview
13-nsb-installation
14-nsb-operation
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
index 92cd51439..261a8bd95 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
@@ -19,16 +19,27 @@ Yardstick Test Case Description TC074
|metric | Storage performance |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Storperf integration with yardstick. The purpose of StorPerf |
-| | is to provide a tool to measure block and object storage |
-| | performance in an NFVI. When complemented with a |
-| | characterization of typical VF storage performance |
-| | requirements, it can provide pass/fail thresholds for test, |
-| | staging, and production NFVI environments. |
-| | |
-| | The benchmarks developed for block and object storage will |
-| | be sufficiently varied to provide a good preview of expected |
-| | storage performance behavior for any type of VNF workload. |
+|test purpose | To evaluate and report on the Cinder volume performance. |
+| | |
+| | This testcase integrates with OPNFV StorPerf to measure |
+| | block performance of the underlying Cinder drivers. Many |
+| | options are supported, and even the root disk (Glance |
+| | ephemeral storage can be profiled. |
+| | |
+| | The fundamental concept of the test case is to first fill |
+| | the volumes with random data to ensure reported metrics |
+| | are indicative of continued usage and not skewed by |
+| | transitional performance while the underlying storage |
+| | driver allocates blocks. |
+| | The metrics for filling the volumes with random data |
+| | are not reported in the final results. The test also |
+| | ensures the volumes are performing at a consistent level |
+| | of performance by measuring metrics every minute, and |
+| | comparing the trend of the metrics over the run. By |
+| | evaluating the min and max values, as well as the slope of |
+| | the trend, it can make the determination that the metrics |
+| | are stable, and not fluctuating beyond industry standard |
+| | norms. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc074.yaml |
@@ -38,7 +49,8 @@ Yardstick Test Case Description TC074
| | * public_network: "ext-net" - name of public network |
| | * volume_size: 2 - cinder volume size |
| | * block_sizes: "4096" - data block size |
-| | * queue_depths: "4" |
+| | * queue_depths: "4" - the number of simultaneous I/Os |
+| | to perform at all times |
| | * StorPerf_ip: "192.168.200.2" |
| | * query_interval: 10 - state query interval |
| | * timeout: 600 - maximum allowed job time |
@@ -50,7 +62,11 @@ Yardstick Test Case Description TC074
| | performance in an NFVI. |
| | |
| | StorPerf is delivered as a Docker container from |
-| | https://hub.docker.com/r/opnfv/storperf/tags/. |
+| | https://hub.docker.com/r/opnfv/storperf-master/tags/. |
+| | |
+| | The underlying tool used is FIO, and StorPerf supports |
+| | any FIO option in order to tailor the test to the exact |
+| | workload needed. |
| | |
+--------------+--------------------------------------------------------------+
|references | Storperf_ |
@@ -80,9 +96,17 @@ Yardstick Test Case Description TC074
| | - rr: 100% Read, random access |
| | - wr: 100% Write, random access |
| | - rw: 70% Read / 30% write, random access |
-| | * nossd: Do not perform SSD style preconditioning. |
-| | * nowarm: Do not perform a warmup prior to |
| | measurements. |
+| | * workloads={json maps} |
+| | This parameter supercedes the workload and calls the V2.0 |
+| | API in StorPerf. It allows for greater control of the |
+| | parameters to be passed to FIO. For example, running a |
+| | random read/write with a mix of 90% read and 10% write |
+| | would be expressed as follows: |
+| | {"9010randrw": {"rw":"randrw","rwmixread": "90"}} |
+| | Note: This must be passed in as a string, so don't forget |
+| | to escape or otherwise properly deal with the quotes. |
+| | |
| | * report= [job_id] |
| | Query the status of the supplied job_id and report on |
| | metrics. If a workload is supplied, will report on only |
@@ -92,8 +116,7 @@ Yardstick Test Case Description TC074
| | |
+--------------+--------------------------------------------------------------+
|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions | need to add one. A key pair for launching agents is also |
-| | required. |
+|conditions | need to add one. |
| | |
| | Storperf is required to be installed in the environment. |
| | There are two possible methods for Storperf installation: |
@@ -126,10 +149,21 @@ Yardstick Test Case Description TC074
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored |
-| | in glance. TC is invoked and logs are produced and stored. |
+|step 1 | Yardstick calls StorPerf to create the heat stack with the |
+| | number of VMs and size of Cinder volumes specified. The |
+| | VMs will be on their own private subnet, and take floating |
+| | IP addresses from the specified public network. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick calls StorPerf to fill all the volumes with |
+| | random data. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Yardstick calls StorPerf to perform the series of tests |
+| | specified by the workload, queue depths and block sizes. |
| | |
-| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 4 | Yardstick calls StorPerf to delete the stack it created. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | None. Storage performance results are fetched and stored. |
diff --git a/docs/testing/user/userguide/references.rst b/docs/testing/user/userguide/references.rst
index 05729ba75..3e18c96e9 100644
--- a/docs/testing/user/userguide/references.rst
+++ b/docs/testing/user/userguide/references.rst
@@ -13,7 +13,6 @@ OPNFV
* Parser wiki: https://wiki.opnfv.org/parser
* Pharos wiki: https://wiki.opnfv.org/pharos
-* VTC: https://wiki.opnfv.org/vtc
* Yardstick CI: https://build.opnfv.org/ci/view/yardstick/
* Yardstick and ETSI TST001 presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925205%2Fopnfv_summit_-_bridging_opnfv_and_etsi.pdf
* Yardstick Project presentation: https://wiki.opnfv.org/display/yardstick/Yardstick?preview=%2F2925202%2F2925208%2Fopnfv_summit_-_yardstick_project.pdf
diff --git a/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
new file mode 100644
index 000000000..81323e71c
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: cmts-tg-topology
+ name: cmts-tg-topology
+ short-name: cmts-tg-topology
+ description: cmts-tg-topology
+ constituent-vnfd:
+ - member-vnf-index: '1'
+ vnfd-id-ref: tg__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+ - member-vnf-index: '2'
+ vnfd-id-ref: vnf__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+
+ vld: []
+# - id: uplink
+# name: tg__0 to vnf__0 link 1
+# type: ELAN
+# vnfd-connection-point-ref:
+# - member-vnf-index-ref: '1'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: tg__0
+# - member-vnf-index-ref: '2'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: vnf__0
diff --git a/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
new file mode 100644
index 000000000..cab8bb885
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
@@ -0,0 +1,171 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/pktgen_throughput.yaml
+ topology: cmts-tg-topology.yaml
+ nodes:
+ tg__0: trafficgen-k8syardstick
+ vnf__0: vnf-k8syardstick
+ options: {}
+ runner:
+ type: IterationIPC
+ iterations: 10
+ interval: 15
+ timeout: 10000
+context:
+ name: k8syardstick
+ type: Kubernetes
+
+ servers:
+ vnf:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ trafficgen:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ networks:
+ flannel:
+ args: '[{ "delegate": { "isDefaultGateway": true }}]'
+ plugin: flannel
+ sriov01:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
+ sriov02:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index b34672907..507491446 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -52,12 +52,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -83,12 +85,14 @@ downlink_0:
dstip4: "{{get(flow, 'flow.public_ip_0', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -111,12 +115,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -142,9 +148,11 @@ downlink_1:
dstip4: "{{get(flow, 'flow.public_ip_1', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 513aefb40..3cbd7cd62 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -50,12 +50,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -76,12 +78,14 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_0', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -102,12 +106,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -128,9 +134,11 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_1', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index aad751549..edff3612e 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -72,6 +72,7 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -79,6 +80,7 @@ uplink_0:
srcport: "{{get(flow, 'flow.src_port_0', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
id: 2
ipv4:
@@ -97,6 +99,7 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -104,6 +107,7 @@ downlink_0:
srcport: "{{get(flow, 'flow.dst_port_0', '0') }}"
dstport: "{{get(flow, 'flow.src_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
id: 3
ipv4:
@@ -131,6 +135,8 @@ uplink_1:
proto: "tcp"
srcip4: "{{get(flow, 'flow.srcip_1', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dstip_1', '192.16.0.0-192.16.0.31') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -138,6 +144,7 @@ uplink_1:
srcport: "{{get(flow, 'flow.src_port_1', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
id: 4
ipv4:
@@ -156,6 +163,7 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -163,3 +171,4 @@ downlink_1:
srcport: "{{get(flow, 'flow.dst_port_1', '0') }}"
dstport: "{{get(flow, 'flow.src_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
new file mode 100644
index 000000000..e222e1d8c
--- /dev/null
+++ b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: "nsb:traffic_profile:0.1"
+
+name: pktgen
+description: Traffic profile to run throughput tests
+traffic_profile:
+ traffic_type: PktgenTrafficProfile
+ duration: 15
diff --git a/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
new file mode 100644
index 000000000..17e631652
--- /dev/null
+++ b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: PktgenTrafficGen # NSB class mapping
+ name: pktgen_tg
+ short-name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+ mgmt-interface:
+ vdu-id: pktgen
+ {% if ip is defined %}
+ ip: '{{ ip }}'
+ {% endif %}
+ {% if service_ports is defined and service_ports %}
+ service_ports:
+ {% for port in service_ports %}
+ - port: "{{ port['port']|int }}"
+ node_port: "{{ port['node_port']|int }}"
+ target_port: "{{ port['target_port']|int }}"
+ {% endfor %}
+ {% endif %}
+
+ vdu:
+ - id: pktgen_tg
+ name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+
+ benchmark:
+ kpi:
+ - rx_throughput_fps
+ - tx_throughput_fps
+ - tx_throughput_mbps
+ - rx_throughput_mbps
+ - in_packets
+ - out_packets
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 1e1591ce3..7a86abb4e 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -29,11 +29,6 @@ if [ "${INSTALLER_TYPE}" == 'fuel' ]; then
fi
export YARD_IMG_ARCH
-HW_FW_TYPE=""
-if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
- HW_FW_TYPE=uefi
-fi
-export HW_FW_TYPE
UCA_HOST="cloud-images.ubuntu.com"
if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
@@ -104,18 +99,12 @@ load_yardstick_image()
echo
echo "========== Loading yardstick cloud image =========="
EXTRA_PARAMS=""
- if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
- EXTRA_PARAMS="--property hw_video_model=vga"
- fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
output=$(eval openstack ${SECURE} image create \
@@ -165,7 +154,7 @@ load_cirros_image()
if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
CIRROS_IMAGE_VERSION="cirros-d161201"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-d161201-aarch64-disk.img"
- EXTRA_PARAMS="--property hw_video_model=vga --property short_id=ubuntu16.04"
+ EXTRA_PARAMS="--property short_id=ubuntu16.04"
else
CIRROS_IMAGE_VERSION="cirros-0.3.5"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-0.3.5-x86_64-disk.img"
@@ -184,9 +173,6 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
output=$(openstack ${SECURE} image create \
--disk-format qcow2 \
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index fe8423d25..d08dbaa6e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -15,20 +15,30 @@ description: >
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
+{% set workload = workload or "" %}
+{% set workloads = workloads or "" %}
+{% set agent_count = agent_count or 1 %}
+{% set block_sizes = block_sizes or "4096" %}
+{% set queue_depths = queue_depths or "4" %}
+{% set steady_state_samples = steady_state_samples or 10 %}
+{% set volume_size = volume_size or 4 %}
scenarios:
-
type: StorPerf
options:
- agent_count: 1
+ agent_count: {{agent_count}}
agent_image: "Ubuntu-16.04"
agent_flavor: "storperf"
public_network: {{public_network}}
- volume_size: 4
- block_sizes: "4096"
- queue_depths: "4"
+ volume_size: {{volume_size}}
+ block_sizes: {{block_sizes}}
+ queue_depths: {{queue_depths}}
StorPerf_ip: {{StorPerf_ip}}
query_interval: 10
timeout: 300
+ workload: {{workload}}
+ workloads: {{workloads}}
+ steady_state_samples: {{steady_state_samples}}
runner:
type: Iteration
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 7534c4ea5..e1553c72b 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -110,7 +110,7 @@ class KubernetesContext(ctx_base.Context):
self._delete_rc(rc)
def _delete_rc(self, rc):
- k8s_utils.delete_replication_controller(rc)
+ k8s_utils.delete_replication_controller(rc, skip_codes=[404])
def _delete_pods(self):
for pod in self.template.pods:
@@ -159,7 +159,7 @@ class KubernetesContext(ctx_base.Context):
k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
def _delete_ssh_key(self):
- k8s_utils.delete_config_map(self.ssh_key)
+ k8s_utils.delete_config_map(self.ssh_key, skip_codes=[404])
utils.remove_file(self.key_path)
utils.remove_file(self.public_key_path)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 77efe0dae..4c79a4931 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -51,8 +51,7 @@ class BaremetalAttacker(BaseAttacker):
LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user'])
self.jump_connection = ssh.SSH.from_node(
jump_host,
- # why do we allow pwd for password?
- defaults={"user": "root", "password": jump_host.get("pwd")}
+ defaults={"user": "root", "password": jump_host.get("password")}
)
self.jump_connection.wait(timeout=600)
LOG.debug("ssh jump host success!")
@@ -61,7 +60,7 @@ class BaremetalAttacker(BaseAttacker):
self.ipmi_ip = host.get("ipmi_ip", None)
self.ipmi_user = host.get("ipmi_user", "root")
- self.ipmi_pwd = host.get("ipmi_pwd", None)
+ self.ipmi_pwd = host.get("ipmi_password", None)
self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
self.check_script = self.get_script_fullpath(
@@ -109,26 +108,3 @@ class BaremetalAttacker(BaseAttacker):
else:
_execute_shell_command(cmd, stdin=stdin_file)
LOG.info("Recover fault END")
-
-
-def _test(): # pragma: no cover
- host = {
- "ipmi_ip": "10.20.0.5",
- "ipmi_user": "root",
- "ipmi_pwd": "123456",
- "ip": "10.20.0.5",
- "user": "root",
- "key_filename": "/root/.ssh/id_rsa"
- }
- context = {"node1": host}
- attacker_cfg = {
- 'fault_type': 'bear-metal-down',
- 'host': 'node1',
- }
- ins = BaremetalAttacker(attacker_cfg, context)
- ins.setup()
- ins.inject_fault()
-
-
-if __name__ == '__main__': # pragma: no cover
- _test()
diff --git a/yardstick/benchmark/scenarios/lib/check_value.py b/yardstick/benchmark/scenarios/lib/check_value.py
index 759076068..4c9b27df4 100644
--- a/yardstick/benchmark/scenarios/lib/check_value.py
+++ b/yardstick/benchmark/scenarios/lib/check_value.py
@@ -13,6 +13,7 @@ from __future__ import absolute_import
import logging
from yardstick.benchmark.scenarios import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -34,24 +35,18 @@ class CheckValue(base.Scenario):
self.context_cfg = context_cfg
self.options = self.scenario_cfg['options']
- def run(self, result):
+ def run(self, _):
"""execute the test"""
op = self.options.get("operator")
LOG.debug("options=%s", self.options)
value1 = str(self.options.get("value1"))
value2 = str(self.options.get("value2"))
+ if (op == "eq" and value1 != value2) or (op == "ne" and
+ value1 == value2):
+ raise y_exc.ValueCheckError(
+ value1=value1, operator=op, value2=value2)
check_result = "PASS"
- if op == "eq" and value1 != value2:
- LOG.info("value1=%s, value2=%s, error: should equal!!!", value1,
- value2)
- check_result = "FAIL"
- assert value1 == value2, "Error %s!=%s" % (value1, value2)
- elif op == "ne" and value1 == value2:
- LOG.info("value1=%s, value2=%s, error: should not equal!!!",
- value1, value2)
- check_result = "FAIL"
- assert value1 != value2, "Error %s==%s" % (value1, value2)
LOG.info("Check result is %s", check_result)
keys = self.scenario_cfg.get('output', '').split()
values = [check_result]
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 7a11d3e76..10f10d4e6 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -64,37 +64,36 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._mq_ids = []
def _get_ip_flow_range(self, ip_start_range):
+ """Retrieve a CIDR first and last viable IPs
- # IP range is specified as 'x.x.x.x-y.y.y.y'
+ :param ip_start_range: could be the IP range itself or a dictionary
+ with the host name and the port.
+ :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
+ """
if isinstance(ip_start_range, six.string_types):
return ip_start_range
- node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+ node_name, range_or_interface = next(iter(ip_start_range.items()),
+ (None, '0.0.0.0'))
if node_name is None:
- # we are manually specifying the range
- ip_addr_range = range_or_interface
+ return range_or_interface
+
+ node = self.context_cfg['nodes'].get(node_name, {})
+ interface = node.get('interfaces', {}).get(range_or_interface)
+ if interface:
+ ip = interface['local_ip']
+ mask = interface['netmask']
else:
- node = self.context_cfg["nodes"].get(node_name, {})
- try:
- # the ip_range is the interface name
- interface = node.get("interfaces", {})[range_or_interface]
- except KeyError:
- ip = "0.0.0.0"
- mask = "255.255.255.0"
- else:
- ip = interface["local_ip"]
- # we can't default these values, they must both exist to be valid
- mask = interface["netmask"]
-
- ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
- hosts = list(ipaddr.hosts())
- if len(hosts) > 2:
- # skip the first host in case of gateway
- ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
- else:
- LOG.warning("Only single IP in range %s", ipaddr)
- # fall back to single IP range
- ip_addr_range = ip
+ ip = '0.0.0.0'
+ mask = '255.255.255.0'
+
+ ipaddr = ipaddress.ip_network(
+ six.text_type('{}/{}'.format(ip, mask)), strict=False)
+ if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
+ ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
+ else:
+ LOG.warning('Only single IP in range %s', ipaddr)
+ ip_addr_range = ip
return ip_addr_range
def _get_traffic_flow(self):
@@ -119,6 +118,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
flow["dst_port_{}".format(index)] = dst_port
flow["count"] = fflow["count"]
+ flow["seed"] = fflow["seed"]
except KeyError:
flow = {}
return {"flow": flow}
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 2b3474070..8344b1595 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -193,22 +193,19 @@ class Vsperf(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
- reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
# sla check; go through all defined SLAs and check if values measured
# by VSPERF are higher then those defined by SLAs
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 27bf40dcb..d5c8a3bfe 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario):
self.client.send_command(cmd)
else:
cmd = "cat ~/.testpmd.macaddr.port1"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port1_mac = stdout
+
cmd = "cat ~/.testpmd.macaddr.port2"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port2_mac = stdout
cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \
(self.moongen_port1_mac, self.moongen_port2_mac)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
time.sleep(1)
@@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario):
self.setup()
# remove results from previous tests
- self.client.execute("rm -rf /tmp/results*")
+ self.client.run("rm -rf /tmp/results*", raise_on_error=False)
# get vsperf options
options = self.scenario_cfg['options']
@@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario):
cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \
"root@%s -p 22" % (self.moongen_host_ip)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
@@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
result['nrFlows'] = multistream
# sla check; go through all defined SLAs and check if values measured
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..8093cd2d2 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,10 +74,13 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
@@ -111,9 +109,13 @@ class StorPerf(base.Scenario):
elif setup_res.status_code == 200:
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
@@ -149,7 +151,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +165,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,8 +175,16 @@ class StorPerf(base.Scenario):
except KeyError:
pass
+ api_version = "v1.0"
+
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version),
json=job_args)
job_res_content = jsonutils.loads(job_res.content)
@@ -187,15 +200,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -216,14 +220,46 @@ class StorPerf(base.Scenario):
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+ job_res_content = jsonutils.loads(job_res.content)
+
+ if job_res.status_code != 200:
+ raise RuntimeError(
+ "Failed to start initialization job, error message:",
+ job_res_content["message"])
+ elif job_res.status_code == 200:
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index ca5a110e2..dee7044a5 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
import cgitb
import collections
import contextlib as cl
@@ -23,11 +21,11 @@ import os
from collections import Mapping, MutableMapping, Iterable, Callable, deque
from functools import partial
from itertools import chain
-from subprocess import CalledProcessError, Popen, PIPE
-from tempfile import NamedTemporaryFile
+import subprocess
+import tempfile
import six
-import six.moves.configparser as ConfigParser
+from six.moves import configparser
import yaml
from six import StringIO
from chainmap import ChainMap
@@ -134,10 +132,9 @@ class CustomTemporaryFile(object):
else:
self.data_types = self.DEFAULT_DATA_TYPES
# must open "w+" so unicode is encoded correctly
- self.creator = partial(NamedTemporaryFile, mode="w+", delete=False,
- dir=directory,
- prefix=prefix,
- suffix=self.suffix)
+ self.creator = partial(
+ tempfile.NamedTemporaryFile, mode="w+", delete=False,
+ dir=directory, prefix=prefix, suffix=self.suffix)
def make_context(self, data, write_func, descriptor='data'):
return TempfileContext(data, write_func, descriptor, self.data_types,
@@ -191,8 +188,8 @@ class FileNameGenerator(object):
if not prefix.endswith('_'):
prefix += '_'
- temp_file = NamedTemporaryFile(delete=False, dir=directory,
- prefix=prefix, suffix=suffix)
+ temp_file = tempfile.NamedTemporaryFile(delete=False, dir=directory,
+ prefix=prefix, suffix=suffix)
with cl.closing(temp_file):
return temp_file.name
@@ -474,7 +471,7 @@ class AnsibleCommon(object):
prefix = '_'.join([self.prefix, prefix, 'inventory'])
ini_temp_file = IniMapTemporaryFile(directory=directory, prefix=prefix)
- inventory_config = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory_config = configparser.ConfigParser(allow_no_value=True)
# disable default lowercasing
inventory_config.optionxform = str
return ini_temp_file.make_context(self.inventory_dict, write_func,
@@ -510,7 +507,7 @@ class AnsibleCommon(object):
return timeout
def _generate_ansible_cfg(self, directory):
- parser = ConfigParser.ConfigParser()
+ parser = configparser.ConfigParser()
parser.add_section('defaults')
parser.set('defaults', 'host_key_checking', 'False')
@@ -541,12 +538,12 @@ class AnsibleCommon(object):
cmd = ['ansible', 'all', '-m', 'setup', '-i',
inventory_path, '--tree', sut_dir]
- proc = Popen(cmd, stdout=PIPE, cwd=directory)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=directory)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
def _gen_sut_info_dict(self, sut_dir):
sut_info = {}
@@ -617,12 +614,13 @@ class AnsibleCommon(object):
# 'timeout': timeout / 2,
})
with Timer() as timer:
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
timeout -= timer.total_seconds()
cmd.remove("--syntax-check")
@@ -632,10 +630,10 @@ class AnsibleCommon(object):
# TODO: add timeout support of use subprocess32 backport
# 'timeout': timeout,
})
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
return output
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 4ed40f8af..3d775d48e 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -175,3 +175,4 @@ SCOPE_CLUSTER = 'Cluster'
# VNF definition
SSH_PORT = 22
+LUA_PORT = 22022
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index cbb294989..b39a0af9c 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -85,17 +85,19 @@ class InfluxDBConfigurationMissing(YardstickException):
class YardstickBannedModuleImported(YardstickException):
- # pragma: no cover
message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
+class IXIAUnsupportedProtocol(YardstickException):
+ message = 'Protocol "%(protocol)" is not supported in IXIA'
+
+
class PayloadMissingAttributes(YardstickException):
message = ('Error instantiating a Payload class, missing attributes: '
'%(missing_attributes)s')
class HeatTemplateError(YardstickException):
- """Error in Heat during the stack deployment"""
message = ('Error in Heat during the creation of the OpenStack stack '
'"%(stack_name)s"')
@@ -108,6 +110,10 @@ class TrafficProfileNotImplemented(YardstickException):
message = 'No implementation for traffic profile %(profile_class)s.'
+class TrafficProfileRate(YardstickException):
+ message = 'Traffic profile rate must be "<number>[fps|%]"'
+
+
class DPDKSetupDriverError(YardstickException):
message = '"igb_uio" driver is not loaded'
@@ -210,6 +216,10 @@ class WaitTimeout(YardstickException):
message = 'Wait timeout while waiting for condition'
+class PktgenActionError(YardstickException):
+ message = 'Error in "%(action)s" action'
+
+
class KubernetesApiException(YardstickException):
message = ('Kubernetes API errors. Action: %(action)s, '
'resource: %(resource)s')
@@ -396,5 +406,13 @@ class AclMissingActionArguments(YardstickException):
'[action=%(action_name)s parameter=%(action_param)s]')
-class AclUknownActionTemplate(YardstickException):
+class AclUnknownActionTemplate(YardstickException):
message = 'No ACL CLI template found for "%(action_name)s" action'
+
+
+class InvalidMacAddress(YardstickException):
+ message = 'Mac address "%(mac_address)s" is invalid'
+
+
+class ValueCheckError(YardstickException):
+ message = 'Constraint "%(value1)s %(operator)s %(value2)s" does not hold'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index 35e590f2b..323f13abb 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -75,15 +75,18 @@ def create_service(template,
raise
-def delete_service(name,
- namespace='default',
- **kwargs): # pragma: no cover
+def delete_service(name, namespace='default', skip_codes=None, **kwargs):
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
try:
body = client.V1DeleteOptions()
core_v1_api.delete_namespaced_service(name, namespace, body, **kwargs)
- except ApiException:
- LOG.exception('Delete Service failed')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Service')
def get_service_list(namespace='default', **kwargs):
@@ -118,8 +121,10 @@ def create_replication_controller(template,
def delete_replication_controller(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -128,9 +133,12 @@ def delete_replication_controller(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete replication controller failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ReplicationController')
def delete_pod(name,
@@ -193,8 +201,10 @@ def create_config_map(name,
def delete_config_map(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -203,9 +213,12 @@ def delete_config_map(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete config map failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ConfigMap')
def create_custom_resource_definition(body):
@@ -223,14 +236,18 @@ def create_custom_resource_definition(body):
action='create', resource='CustomResourceDefinition')
-def delete_custom_resource_definition(name):
+def delete_custom_resource_definition(name, skip_codes=None):
+ skip_codes = [] if not skip_codes else skip_codes
api = get_extensions_v1beta_api()
body_obj = client.V1DeleteOptions()
try:
api.delete_custom_resource_definition(name, body_obj)
- except ApiException:
- raise exceptions.KubernetesApiException(
- action='delete', resource='CustomResourceDefinition')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='CustomResourceDefinition')
def get_custom_resource_definition(kind):
@@ -246,8 +263,28 @@ def get_custom_resource_definition(kind):
action='delete', resource='CustomResourceDefinition')
-def create_network(scope, group, version, plural, body, namespace='default'):
+def get_network(scope, group, version, plural, name, namespace='default'):
+ api = get_custom_objects_api()
+ try:
+ if scope == consts.SCOPE_CLUSTER:
+ network = api.get_cluster_custom_object(group, version, plural, name)
+ else:
+ network = api.get_namespaced_custom_object(
+ group, version, namespace, plural, name)
+ except ApiException as e:
+ if e.status in [404]:
+ return
+ else:
+ raise exceptions.KubernetesApiException(
+ action='get', resource='Custom Object: Network')
+ return network
+
+
+def create_network(scope, group, version, plural, body, name, namespace='default'):
api = get_custom_objects_api()
+ if get_network(scope, group, version, plural, name, namespace):
+ logging.info('Network %s already exists', name)
+ return
try:
if scope == consts.SCOPE_CLUSTER:
api.create_cluster_custom_object(group, version, plural, body)
@@ -259,7 +296,8 @@ def create_network(scope, group, version, plural, body, namespace='default'):
action='create', resource='Custom Object: Network')
-def delete_network(scope, group, version, plural, name, namespace='default'):
+def delete_network(scope, group, version, plural, name, namespace='default', skip_codes=None):
+ skip_codes = [] if not skip_codes else skip_codes
api = get_custom_objects_api()
try:
if scope == consts.SCOPE_CLUSTER:
@@ -267,9 +305,12 @@ def delete_network(scope, group, version, plural, name, namespace='default'):
else:
api.delete_namespaced_custom_object(
group, version, namespace, plural, name, {})
- except ApiException:
- raise exceptions.KubernetesApiException(
- action='delete', resource='Custom Object: Network')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Custom Object: Network')
def get_pod_list(namespace='default'): # pragma: no cover
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 85cecc714..c019cd264 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -28,6 +28,7 @@ import socket
import subprocess
import sys
import time
+import threading
import six
from flask import jsonify
@@ -193,20 +194,16 @@ def parse_ini_file(path):
def get_port_mac(sshclient, port):
cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
def get_port_ip(sshclient, port):
cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
"|cut -d ':' -f2 " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
@@ -281,11 +278,19 @@ def get_free_port(ip):
def mac_address_to_hex_list(mac):
- octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
- assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+ try:
+ octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+ except ValueError:
+ raise exceptions.InvalidMacAddress(mac_address=mac)
+ if len(octets) != 6 or all(len(octet) != 4 for octet in octets):
+ raise exceptions.InvalidMacAddress(mac_address=mac)
return octets
+def make_ipv4_address(ip_addr):
+ return ipaddress.IPv4Address(six.text_type(ip_addr))
+
+
def safe_ip_address(ip_addr):
""" get ip address version v6 or v4 """
try:
@@ -335,6 +340,14 @@ def ip_to_hex(ip_addr, separator=''):
return separator.join('{:02x}'.format(octet) for octet in address.packed)
+def get_mask_from_ip_range(ip_low, ip_high):
+ _ip_low = ipaddress.ip_address(ip_low)
+ _ip_high = ipaddress.ip_address(ip_high)
+ _ip_low_int = int(_ip_low)
+ _ip_high_int = int(_ip_high)
+ return _ip_high.max_prefixlen - (_ip_high_int ^ _ip_low_int).bit_length()
+
+
def try_int(s, *args):
"""Convert to integer if possible."""
try:
@@ -467,6 +480,9 @@ class Timer(object):
def __del__(self): # pragma: no cover
signal.alarm(0)
+ def delta_time_sec(self):
+ return (datetime.datetime.now() - self.start).total_seconds()
+
def read_meminfo(ssh_client):
"""Read "/proc/meminfo" file and parse all keys and values"""
@@ -513,17 +529,30 @@ def open_relative_file(path, task_path):
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
"""Wait until callable predicate is evaluated as True
+ When in a thread different from the main one, Timer(timeout) will fail
+ because signal is not handled. In this case
+
:param predicate: (func) callable deciding whether waiting should continue
:param timeout: (int) timeout in seconds how long should function wait
:param sleep: (int) polling interval for results in seconds
:param exception: exception instance to raise on timeout. If None is passed
(default) then WaitTimeout exception is raised.
"""
- try:
- with Timer(timeout=timeout):
- while not predicate():
+ if isinstance(threading.current_thread(), threading._MainThread):
+ try:
+ with Timer(timeout=timeout):
+ while not predicate():
+ time.sleep(sleep)
+ except exceptions.TimerTimeout:
+ if exception and issubclass(exception, Exception):
+ raise exception # pylint: disable=raising-bad-type
+ raise exceptions.WaitTimeout
+ else:
+ with Timer() as timer:
+ while timer.delta_time_sec() < timeout:
+ if predicate():
+ return
time.sleep(sleep)
- except exceptions.TimerTimeout:
if exception and issubclass(exception, Exception):
raise exception # pylint: disable=raising-bad-type
raise exceptions.WaitTimeout
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index 74deeecb5..8274ff9ce 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import ipaddress
import logging
import IxNetwork
from yardstick.common import exceptions
from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
log = logging.getLogger(__name__)
@@ -32,12 +34,21 @@ PROTO_UDP = 'udp'
PROTO_TCP = 'tcp'
PROTO_VLAN = 'vlan'
-IP_VERSION_4_MASK = '0.0.0.255'
-IP_VERSION_6_MASK = '0:0:0:0:0:0:0:ff'
+SINGLE_VALUE = "singleValue"
+
+S_VLAN = 0
+C_VLAN = 1
+
+ETHER_TYPE_802_1ad = '0x88a8'
+
+IP_VERSION_4_MASK = 24
+IP_VERSION_6_MASK = 64
TRAFFIC_STATUS_STARTED = 'started'
TRAFFIC_STATUS_STOPPED = 'stopped'
+SUPPORTED_PROTO = [PROTO_UDP]
+
# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
class IxNextgen(object): # pragma: no cover
@@ -328,7 +339,7 @@ class IxNextgen(object): # pragma: no cover
'-valueType', 'singleValue')
self.ixnet.commit()
- def update_frame(self, traffic):
+ def update_frame(self, traffic, duration):
"""Update the L2 frame
This function updates the L2 frame options:
@@ -336,8 +347,8 @@ class IxNextgen(object): # pragma: no cover
- Duration: in case of traffic_type="fixedDuration", amount of seconds
to inject traffic.
- Rate: in frames per seconds or percentage.
- - Type of rate: "framesPerSecond" ("bitsPerSecond" and
- "percentLineRate" no used)
+ - Type of rate: "framesPerSecond" or "percentLineRate" ("bitsPerSecond"
+ no used)
- Frame size: custom IMIX [1] definition; a list of packet size in
bytes and the weight. E.g.:
[[64, 64, 10], [128, 128, 15], [512, 512, 5]]
@@ -346,6 +357,7 @@ class IxNextgen(object): # pragma: no cover
:param traffic: list of traffic elements; each traffic element contains
the injection parameter for each flow group.
+ :param duration: (int) injection time in seconds.
"""
for traffic_param in traffic.values():
fg_id = str(traffic_param['id'])
@@ -354,23 +366,43 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
type = traffic_param.get('traffic_type', 'fixedDuration')
- duration = traffic_param.get('duration', 30)
- rate = traffic_param['iload']
+ rate = traffic_param['rate']
+ rate_unit = (
+ 'framesPerSecond' if traffic_param['rate_unit'] ==
+ tp_base.TrafficProfileConfig.RATE_FPS else 'percentLineRate')
weighted_range_pairs = self._parse_framesize(
traffic_param['outer_l2']['framesize'])
srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02'))
- # NOTE(ralonsoh): add QinQ tagging when
- # traffic_param['outer_l2']['QinQ'] exists.
- # s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
- # c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ if traffic_param['outer_l2']['QinQ']:
+ s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'etherType')
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', ETHER_TYPE_802_1ad,
+ '-fieldValue', ETHER_TYPE_802_1ad,
+ '-valueType', SINGLE_VALUE)
+
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+
+ self._update_vlan_tag(fg_id, s_vlan, S_VLAN)
+ self._update_vlan_tag(fg_id, c_vlan, C_VLAN)
self.ixnet.setMultiAttribute(
config_element + '/transmissionControl',
'-type', type, '-duration', duration)
self.ixnet.setMultiAttribute(
config_element + '/frameRate',
- '-rate', rate, '-type', 'framesPerSecond')
+ '-rate', rate, '-type', rate_unit)
self.ixnet.setMultiAttribute(
config_element + '/frameSize',
'-type', 'weightedPairs',
@@ -384,6 +416,27 @@ class IxNextgen(object): # pragma: no cover
self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
'sourceAddress', srcmac)
+ def _update_vlan_tag(self, fg_id, params, vlan=0):
+ field_to_param_map = {
+ 'vlanUserPriority': 'priority',
+ 'cfi': 'cfi',
+ 'vlanID': 'id'
+ }
+ for field, param in field_to_param_map.items():
+ value = params.get(param)
+ if value:
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_VLAN)[vlan],
+ field)
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', value,
+ '-fieldValue', value,
+ '-valueType', SINGLE_VALUE)
+
+ self.ixnet.commit()
+
def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
mask, count):
"""Set the IPv4 address in a config element stack IP field
@@ -393,15 +446,17 @@ class IxNextgen(object): # pragma: no cover
:param field: (str) field name, e.g.: scrIp, dstIp
:param ip_address: (str) IP address
:param seed: (int) seed length
- :param mask: (str) IP address mask
+ :param mask: (int) IP address mask length
:param count: (int) number of random IPs to generate
"""
field_descriptor = self._get_field_in_stack_item(ip_descriptor,
field)
+ random_mask = str(ipaddress.IPv4Address(
+ 2**(ipaddress.IPV4LENGTH - mask) - 1).compressed)
self.ixnet.setMultiAttribute(field_descriptor,
'-seed', seed,
'-fixedBits', ip_address,
- '-randomMask', mask,
+ '-randomMask', random_mask,
'-valueType', 'random',
'-countValue', count)
self.ixnet.commit()
@@ -420,15 +475,77 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
count = traffic_param['outer_l3']['count']
- srcip4 = str(traffic_param['outer_l3']['srcip4'])
- dstip4 = str(traffic_param['outer_l3']['dstip4'])
+ srcip = str(traffic_param['outer_l3']['srcip'])
+ dstip = str(traffic_param['outer_l3']['dstip'])
+ seed = traffic_param['outer_l3']['seed']
+ srcmask = traffic_param['outer_l3']['srcmask'] or IP_VERSION_4_MASK
+ dstmask = traffic_param['outer_l3']['dstmask'] or IP_VERSION_4_MASK
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'srcIp', srcip4, 1, IP_VERSION_4_MASK, count)
+ 'srcIp', srcip, seed, srcmask, count)
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'dstIp', dstip4, 1, IP_VERSION_4_MASK, count)
+ 'dstIp', dstip, seed, dstmask, count)
+
+ def update_l4(self, traffic):
+ """Update the L4 headers
+
+ NOTE: Only UDP is currently supported
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ proto = traffic_param['outer_l3']['proto']
+ if proto not in SUPPORTED_PROTO:
+ raise exceptions.IXIAUnsupportedProtocol(protocol=proto)
+
+ count = traffic_param['outer_l4']['count']
+ seed = traffic_param['outer_l4']['seed']
+
+ srcport = traffic_param['outer_l4']['srcport']
+ srcmask = traffic_param['outer_l4']['srcportmask']
+
+ dstport = traffic_param['outer_l4']['dstport']
+ dstmask = traffic_param['outer_l4']['dstportmask']
+
+ if proto in SUPPORTED_PROTO:
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'srcPort', srcport, seed, srcmask, count)
+
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'dstPort', dstport, seed, dstmask, count)
+
+ def _update_udp_port(self, descriptor, field, value,
+ seed=1, mask=0, count=1):
+ """Set the UDP port in a config element stack UDP field
+
+ :param udp_descriptor: (str) UDP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ :param field: (str) field name, e.g.: scrPort, dstPort
+ :param value: (int) UDP port fixed bits
+ :param seed: (int) seed length
+ :param mask: (int) UDP port mask
+ :param count: (int) number of random ports to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(descriptor, field)
+
+ if mask == 0:
+ seed = count = 1
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-seed', seed,
+ '-fixedBits', value,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+
+ self.ixnet.commit()
def _build_stats_map(self, view_obj, name_map):
return {data_yardstick: self.ixnet.execute(
diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py
index d781ba0cd..7155480d4 100644
--- a/yardstick/network_services/pipeline.py
+++ b/yardstick/network_services/pipeline.py
@@ -18,6 +18,8 @@ import itertools
from six.moves import zip
+from yardstick.common import utils
+
FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
@@ -59,8 +61,7 @@ class PipelineRules(object):
self.add_rule(FIREWALL_ADD_PRIO, ip)
def add_firewall_script(self, ip):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for i in range(256):
ip_addr[-2] = str(i)
@@ -87,8 +88,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_MPLS, ip, mac_addr, index)
def add_route_script(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for index in range(0, 256, 8):
ip_addr[-2] = str(index)
@@ -101,8 +101,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_QINQ, ip, mask, mac_addr, index)
def add_route_script2(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
mask = 24
for i in range(0, 256):
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index 356b36bd9..a1b26a24d 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -27,6 +27,7 @@ def register_modules():
'yardstick.network_services.traffic_profile.prox_profile',
'yardstick.network_services.traffic_profile.prox_ramp',
'yardstick.network_services.traffic_profile.rfc2544',
+ 'yardstick.network_services.traffic_profile.pktgen',
]
for module in modules:
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index f4b5b178c..a8f950b7b 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
+
from yardstick.common import exceptions
from yardstick.common import utils
@@ -21,10 +23,12 @@ class TrafficProfileConfig(object):
This object will parse and validate the traffic profile information.
"""
-
DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
- DEFAULT_FRAME_RATE = 100
+ DEFAULT_FRAME_RATE = '100'
DEFAULT_DURATION = 30
+ RATE_FPS = 'fps'
+ RATE_PERCENTAGE = '%'
+ RATE_REGEX = re.compile(r'([0-9]*\.[0-9]+|[0-9]+)\s*(fps|%)*(.*)')
def __init__(self, tp_config):
self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
@@ -32,7 +36,8 @@ class TrafficProfileConfig(object):
self.description = tp_config.get('description')
tprofile = tp_config['traffic_profile']
self.traffic_type = tprofile.get('traffic_type')
- self.frame_rate = tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE)
+ self.frame_rate, self.rate_unit = self._parse_rate(
+ tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE))
self.test_precision = tprofile.get('test_precision')
self.packet_sizes = tprofile.get('packet_sizes')
self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
@@ -40,6 +45,27 @@ class TrafficProfileConfig(object):
self.upper_bound = tprofile.get('upper_bound')
self.step_interval = tprofile.get('step_interval')
+ def _parse_rate(self, rate):
+ """Parse traffic profile rate
+
+ The line rate can be defined in fps or percentage over the maximum line
+ rate:
+ - frame_rate = 5000 (by default, unit is 'fps')
+ - frame_rate = 5000fps
+ - frame_rate = 25%
+
+ :param rate: (string, int) line rate in fps or %
+ :return: (tuple: int, string) line rate number and unit
+ """
+ match = self.RATE_REGEX.match(str(rate))
+ if not match:
+ exceptions.TrafficProfileRate()
+ rate = float(match.group(1))
+ unit = match.group(2) if match.group(2) else self.RATE_FPS
+ if match.group(3):
+ raise exceptions.TrafficProfileRate()
+ return rate, unit
+
class TrafficProfile(object):
"""
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 39336785e..760b1e8d3 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -14,25 +14,43 @@
import logging
-from yardstick.network_services.traffic_profile.trex_traffic_profile import \
- TrexProfile
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
+
LOG = logging.getLogger(__name__)
-class IXIARFC2544Profile(TrexProfile):
+class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
+ DROP_PERCENT_ROUND = 6
+ RATE_ROUND = 5
def __init__(self, yaml_data):
super(IXIARFC2544Profile, self).__init__(yaml_data)
self.rate = self.config.frame_rate
+ self.rate_unit = self.config.rate_unit
- def _get_ixia_traffic_profile(self, profile_data, mac=None):
- if mac is None:
- mac = {}
+ def _get_ip_and_mask(self, ip_range):
+ _ip_range = ip_range.split('-')
+ if len(_ip_range) == 1:
+ return _ip_range[0], None
+
+ mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
+ return _ip_range[0], mask
+
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
+ return int(_port_range[0]), int(_port_range[1])
+
+ def _get_ixia_traffic_profile(self, profile_data, mac=None):
+ mac = {} if mac is None else mac
result = {}
for traffickey, values in profile_data.items():
if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
@@ -48,21 +66,29 @@ class IXIARFC2544Profile(TrexProfile):
port_id = value.get('id', 1)
port_index = port_id - 1
- try:
- ip = value['outer_l3v6']
- except KeyError:
+
+ if value.get('outer_l3v4'):
ip = value['outer_l3v4']
src_key, dst_key = 'srcip4', 'dstip4'
else:
+ ip = value['outer_l3v6']
src_key, dst_key = 'srcip6', 'dstip6'
+ srcip, srcmask = self._get_ip_and_mask(ip[src_key])
+ dstip, dstmask = self._get_ip_and_mask(ip[dst_key])
+
+ outer_l4 = value.get('outer_l4')
+ src_port, src_port_mask = self._get_fixed_and_mask(outer_l4['srcport'])
+ dst_port, dst_port_mask = self._get_fixed_and_mask(outer_l4['dstport'])
result[traffickey] = {
'bidir': False,
- 'iload': '100',
'id': port_id,
+ 'rate': self.rate,
+ 'rate_unit': self.rate_unit,
'outer_l2': {
'framesize': value['outer_l2']['framesize'],
'framesPerSecond': True,
+ 'QinQ': value['outer_l2'].get('QinQ'),
'srcmac': mac['src_mac_{}'.format(port_index)],
'dstmac': mac['dst_mac_{}'.format(port_index)],
},
@@ -70,12 +96,23 @@ class IXIARFC2544Profile(TrexProfile):
'count': ip['count'],
'dscp': ip['dscp'],
'ttl': ip['ttl'],
- src_key: ip[src_key].split("-")[0],
- dst_key: ip[dst_key].split("-")[0],
+ 'seed': ip['seed'],
+ 'srcip': srcip,
+ 'dstip': dstip,
+ 'srcmask': srcmask,
+ 'dstmask': dstmask,
'type': key,
'proto': ip['proto'],
},
- 'outer_l4': value['outer_l4'],
+ 'outer_l4': {
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4['count'],
+ 'seed': outer_l4['seed'],
+ }
+
}
except KeyError:
continue
@@ -83,11 +120,9 @@ class IXIARFC2544Profile(TrexProfile):
return result
def _ixia_traffic_generate(self, traffic, ixia_obj):
- for key, value in traffic.items():
- if key.startswith((self.UPLINK, self.DOWNLINK)):
- value['iload'] = str(self.rate)
- ixia_obj.update_frame(traffic)
+ ixia_obj.update_frame(traffic, self.config.duration)
ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
ixia_obj.start_traffic()
def update_traffic_profile(self, traffic_generator):
@@ -114,34 +149,33 @@ class IXIARFC2544Profile(TrexProfile):
self.pg_id = 0
self.update_traffic_profile(traffic_generator)
self.max_rate = self.rate
- self.min_rate = 0
+ self.min_rate = 0.0
else:
- self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 2)
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0,
+ self.RATE_ROUND)
traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
self._ixia_traffic_generate(traffic, ixia_obj)
return first_run
- def get_drop_percentage(self, samples, tol_min, tolerance, duration=30.0,
+ def get_drop_percentage(self, samples, tol_min, tolerance,
first_run=False):
completed = False
drop_percent = 100
num_ifaces = len(samples)
+ duration = self.config.duration
in_packets_sum = sum(
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['out_packets'] for iface in samples])
- rx_throughput = sum(
- [samples[iface]['RxThroughput'] for iface in samples])
- rx_throughput = round(float(rx_throughput), 2)
- tx_throughput = sum(
- [samples[iface]['TxThroughput'] for iface in samples])
- tx_throughput = round(float(tx_throughput), 2)
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
packet_drop = abs(out_packets_sum - in_packets_sum)
try:
drop_percent = round(
- (packet_drop / float(out_packets_sum)) * 100, 2)
+ (packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
@@ -150,8 +184,10 @@ class IXIARFC2544Profile(TrexProfile):
samples['DropPercentage'] = drop_percent
if first_run:
- self.rate = out_packets_sum / duration / num_ifaces
completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
if drop_percent > tolerance:
self.max_rate = self.rate
diff --git a/yardstick/network_services/traffic_profile/pktgen.py b/yardstick/network_services/traffic_profile/pktgen.py
new file mode 100644
index 000000000..30f81b794
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/pktgen.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+class PktgenTrafficProfile(tp_base.TrafficProfile):
+ """This class handles Pktgen Trex Traffic profile execution"""
+
+ def __init__(self, tp_config): # pragma: no cover
+ super(PktgenTrafficProfile, self).__init__(tp_config)
+ self._host = None
+ self._port = None
+
+ def init(self, host, port): # pragma: no cover
+ """Initialize control parameters
+
+ :param host: (str) ip or host name
+ :param port: (int) TCP socket port number for Lua commands
+ """
+ self._host = host
+ self._port = port
+
+ def start(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.start("0")') != 0:
+ raise exceptions.PktgenActionError(action='start')
+
+ def stop(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.stop("0")') != 0:
+ raise exceptions.PktgenActionError(action='stop')
+
+ def rate(self, rate):
+ command = 'pktgen.set("0", "rate", ' + str(rate) + ')'
+ if utils.send_socket_command(self._host, self._port, command) != 0:
+ raise exceptions.PktgenActionError(action='rate')
+
+ def clear_all_stats(self):
+ if utils.send_socket_command(self._host, self._port, 'clr') != 0:
+ raise exceptions.PktgenActionError(action='clear all stats')
+
+ def help(self):
+ if utils.send_socket_command(self._host, self._port, 'help') != 0:
+ raise exceptions.PktgenActionError(action='help')
+
+ def execute_traffic(self, *args, **kwargs): # pragma: no cover
+ pass
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 9457096c8..506a880e0 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -88,11 +88,6 @@ class ProxBinSearchProfile(ProxProfile):
theor_max_thruput = actual_max_thruput = 0
result_samples = {}
- rate_samples = {}
- pos_retry = 0
- neg_retry = 0
- total_retry = 0
- ok_retry = 0
# Store one time only value in influxdb
single_samples = {
@@ -110,15 +105,11 @@ class ProxBinSearchProfile(ProxProfile):
"interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
- for test_value in self.bounds_iterator(LOG):
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
pos_retry = 0
neg_retry = 0
total_retry = 0
- rate_samples["MAX_Rate"] = self.current_upper
- rate_samples["MIN_Rate"] = self.current_lower
- rate_samples["Test_Rate"] = test_value
- self.queue.put(rate_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info("Checking MAX %s MIN %s TEST %s",
self.current_upper, self.lower_bound, test_value)
while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
@@ -188,6 +179,11 @@ class ProxBinSearchProfile(ProxProfile):
self.queue.put({'theor_max_throughput': theor_max_thruput})
LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index c24e2f65a..b54fc575f 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -52,7 +52,7 @@ class PortPgIDMap(object):
self._port_pg_id_map[port] = []
def get_pg_ids(self, port):
- return self._port_pg_id_map.get(port)
+ return self._port_pg_id_map.get(port, [])
def increase_pg_id(self, port=None):
port = self._last_port if not port else port
@@ -70,7 +70,7 @@ class PortPgIDMap(object):
class RFC2544Profile(trex_traffic_profile.TrexProfile):
"""TRex RFC2544 traffic profile"""
- TOLERANCE_LIMIT = 0.05
+ TOLERANCE_LIMIT = 0.01
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
@@ -246,6 +246,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
def get_drop_percentage(self, samples, tol_low, tol_high,
correlated_traffic):
"""Calculate the drop percentage and run the traffic"""
+ completed = False
tx_rate_fps = 0
rx_rate_fps = 0
for sample in samples:
@@ -266,15 +267,15 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
drop_percent = round(
(float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
- tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
if drop_percent > tol_high:
self.max_rate = self.rate
elif drop_percent < tol_low:
self.min_rate = self.rate
- # else:
- # NOTE(ralonsoh): the test should finish here
- # pass
+ else:
+ completed = True
+
last_rate = self.rate
self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
@@ -295,4 +296,4 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
'Rate': last_rate,
'Latency': latency
}
- return output
+ return completed, output
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 8e9bc87e1..11a602472 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -197,7 +197,7 @@ class AclApproxSetupEnvSetupEnvHelper(DpdkVnfSetupEnvHelper):
# e.g.: {"fwd": {"port": 0}}
# format action CLI command and add it to the list
if action_name not in _action_template_map.keys():
- raise exceptions.AclUknownActionTemplate(
+ raise exceptions.AclUnknownActionTemplate(
action_name=action_name)
template = _action_template_map[action_name]
try:
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 3ef7c33c5..a09f2a7a9 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -426,7 +426,8 @@ class ClientResourceHelper(ResourceHelper):
iteration_index = 0
while self._terminated.value == 0:
iteration_index += 1
- self._run_traffic_once(traffic_profile)
+ if self._run_traffic_once(traffic_profile):
+ self._terminated.value = 1
mq_producer.tg_method_iteration(iteration_index)
self.client.stop(self.all_ports)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
new file mode 100644
index 000000000..9d452213f
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import multiprocessing
+import time
+import uuid
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenTrafficGen(vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint):
+ """DPDK Pktgen traffic generator
+
+ Website: http://pktgen-dpdk.readthedocs.io/en/latest/index.html
+ """
+
+ TIMEOUT = 30
+
+ def __init__(self, name, vnfd, task_id):
+ vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id)
+ self.queue = multiprocessing.Queue()
+ self._id = uuid.uuid1().int
+ self._mq_producer = self._setup_mq_producer(self._id)
+ vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id],
+ self.queue)
+ self._consumer = vnf_base.GenericVNFConsumer([task_id], self)
+ self._consumer.start_rpc_server()
+ self._traffic_profile = None
+ self._node_ip = vnfd['mgmt-interface'].get('ip')
+ self._lua_node_port = self._get_lua_node_port(
+ vnfd['mgmt-interface'].get('service_ports', []))
+ self._rate = 1
+
+ def instantiate(self, scenario_cfg, context_cfg): # pragma: no cover
+ pass
+
+ def run_traffic(self, traffic_profile):
+ self._traffic_profile = traffic_profile
+ self._traffic_profile.init(self._node_ip, self._lua_node_port)
+ utils.wait_until_true(self._is_running, timeout=self.TIMEOUT,
+ sleep=2)
+
+ def terminate(self): # pragma: no cover
+ pass
+
+ def collect_kpi(self): # pragma: no cover
+ pass
+
+ def scale(self, flavor=''): # pragma: no cover
+ pass
+
+ def wait_for_instantiate(self): # pragma: no cover
+ pass
+
+ def runner_method_start_iteration(self, ctxt, **kwargs):
+ # pragma: no cover
+ LOG.debug('Start method')
+ # NOTE(ralonsoh): 'rate' should be modified between iterations. The
+ # current implementation is just for testing.
+ self._rate += 1
+ self._traffic_profile.start()
+ self._traffic_profile.rate(self._rate)
+ time.sleep(4)
+ self._traffic_profile.stop()
+ self._mq_producer.tg_method_iteration(1, 1, {})
+
+ def runner_method_stop_iteration(self, ctxt, **kwargs): # pragma: no cover
+ # pragma: no cover
+ LOG.debug('Stop method')
+
+ @staticmethod
+ def _get_lua_node_port(service_ports):
+ for port in (port for port in service_ports if
+ int(port['port']) == constants.LUA_PORT):
+ return int(port['node_port'])
+ # NOTE(ralonsoh): in case LUA port is not present, an exception should
+ # be raised.
+
+ def _is_running(self):
+ try:
+ self._traffic_profile.help()
+ return True
+ except exceptions.PktgenActionError:
+ return False
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 4d3bc2ce5..8927ce156 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -60,7 +60,7 @@ class IxiaResourceHelper(ClientResourceHelper):
def stop_collect(self):
self._terminated.value = 1
- def generate_samples(self, ports, key=None):
+ def generate_samples(self, ports, duration):
stats = self.get_stats()
samples = {}
@@ -70,27 +70,23 @@ class IxiaResourceHelper(ClientResourceHelper):
try:
# reverse lookup port name from port_num so the stats dict is descriptive
intf = self.vnfd_helper.find_interface_by_port(port_num)
- port_name = intf["name"]
+ port_name = intf['name']
+ avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+ min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+ max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
samples[port_name] = {
- "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
- "out_packets": int(stats["Frames_Tx"][port_num]),
- # NOTE(ralonsoh): we need to make the traffic injection
- # time variable.
- "RxThroughput": int(stats["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(stats["Frames_Tx"][port_num]) / 30,
+ 'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]),
+ 'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]),
+ 'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]),
+ 'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]),
+ 'in_packets': int(stats['Valid_Frames_Rx'][port_num]),
+ 'out_packets': int(stats['Frames_Tx'][port_num]),
+ 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+ 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+ 'Store-Forward_Avg_latency_ns': avg_latency,
+ 'Store-Forward_Min_latency_ns': min_latency,
+ 'Store-Forward_Max_latency_ns': max_latency
}
- if key:
- avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
- max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
- samples[port_name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
except IndexError:
pass
@@ -129,13 +125,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self, self.client, mac)
self.client_started.value = 1
# pylint: disable=unnecessary-lambda
- utils.wait_until_true(lambda: self.client.is_traffic_stopped())
- samples = self.generate_samples(traffic_profile.ports)
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
- # NOTE(ralonsoh): the traffic injection duration is fixed to 30
- # seconds. This parameter is configurable and must be retrieved
- # from the traffic_profile.full_profile information.
- # Every flow must have the same duration.
completed, samples = traffic_profile.get_drop_percentage(
samples, min_tol, max_tol, first_run=first_run)
self._queue.put(samples)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index cdbb41485..7ecb12478 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -45,11 +45,12 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
time.sleep(self.SAMPLING_PERIOD)
traffic_profile.stop_traffic(self)
- output = traffic_profile.get_drop_percentage(
+ completed, output = traffic_profile.get_drop_percentage(
samples, self.rfc2544_helper.tolerance_low,
self.rfc2544_helper.tolerance_high,
self.rfc2544_helper.correlated_traffic)
self._queue.put(output)
+ return completed
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index e0c0db262..99a5760a3 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -227,14 +227,10 @@ name (i.e. %s).
def add_volume_attachment(self, server_name, volume_name, mountpoint=None):
"""add to the template an association of volume to instance"""
- log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ", server_name,
- volume_name)
-
+ log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ",
+ server_name, volume_name)
name = "%s-%s" % (server_name, volume_name)
-
- volume_id = op_utils.get_volume_id(volume_name)
- if not volume_id:
- volume_id = {'get_resource': volume_name}
+ volume_id = {'get_resource': volume_name}
self.resources[name] = {
'type': 'OS::Cinder::VolumeAttachment',
'properties': {'instance_uuid': {'get_resource': server_name},
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 8d9fc41c9..b0b93a3c2 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -308,7 +308,7 @@ class ServiceNodePortObject(object):
k8s_utils.create_service(self.template)
def delete(self):
- k8s_utils.delete_service(self._name)
+ k8s_utils.delete_service(self._name, skip_codes=[404])
class CustomResourceDefinitionObject(object):
@@ -349,7 +349,7 @@ class CustomResourceDefinitionObject(object):
k8s_utils.create_custom_resource_definition(self._template)
def delete(self):
- k8s_utils.delete_custom_resource_definition(self._name)
+ k8s_utils.delete_custom_resource_definition(self._name, skip_codes=[404])
class NetworkObject(object):
@@ -437,11 +437,11 @@ class NetworkObject(object):
def create(self):
k8s_utils.create_network(self.scope, self.group, self.version,
- self.plural, self.template)
+ self.plural, self.template, self._name)
def delete(self):
k8s_utils.delete_network(self.scope, self.group, self.version,
- self.plural, self._name)
+ self.plural, self._name, skip_codes=[404])
class KubernetesTemplate(object):
diff --git a/yardstick/service/environment.py b/yardstick/service/environment.py
index 324589f79..d910e31e9 100644
--- a/yardstick/service/environment.py
+++ b/yardstick/service/environment.py
@@ -36,7 +36,7 @@ class Environment(Service):
return self._format_sut_info(sut_info)
- def _load_pod_info(self):
+ def _load_pod_info(self): # pragma: no cover
if self.pod is None:
raise MissingPodInfoError
@@ -51,10 +51,10 @@ class Environment(Service):
except (ValueError, KeyError):
raise UnsupportedPodFormatError
- def _format_sut_info(self, sut_info):
+ def _format_sut_info(self, sut_info): # pragma: no cover
return {k: self._format_node_info(v) for k, v in sut_info.items()}
- def _format_node_info(self, node_info):
+ def _format_node_info(self, node_info): # pragma: no cover
info = []
facts = node_info.get('ansible_facts', {})
@@ -93,9 +93,9 @@ class Environment(Service):
return info
- def _get_interface_info(self, facts, name):
+ def _get_interface_info(self, facts, name): # pragma: no cover
mac = facts.get('ansible_{}'.format(name), {}).get('macaddress')
return [name, mac] if mac else []
- def _get_device_info(self, name, info):
+ def _get_device_info(self, name, info): # pragma: no cover
return ['disk_{}'.format(name), info.get('size')]
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index e6a26ab6b..8bdc32c7c 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -499,9 +499,10 @@ class AutoConnectSSH(SSH):
""" Don't close anything, just force creation of a new client """
self._client = False
- def execute(self, cmd, stdin=None, timeout=3600):
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
self._connect()
- return super(AutoConnectSSH, self).execute(cmd, stdin, timeout)
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600,
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 35236637d..e1414c2ae 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -9,11 +9,13 @@
import copy
import io
+import logging
import os
import sys
import mock
import six
+from six.moves import builtins
import unittest
import uuid
@@ -322,9 +324,9 @@ class TaskTestCase(unittest.TestCase):
actual_result = t._parse_options(options)
self.assertEqual(expected_result, actual_result)
- @mock.patch('six.moves.builtins.open', side_effect=mock.mock_open())
+ @mock.patch.object(builtins, 'open', side_effect=mock.mock_open())
@mock.patch.object(task, 'utils')
- @mock.patch('logging.root')
+ @mock.patch.object(logging, 'root')
def test_set_log(self, mock_logging_root, *args):
task_obj = task.Task()
task_obj.task_id = 'task_id'
@@ -561,7 +563,8 @@ key2:
mock_open.assert_has_calls([mock.call('args_file'),
mock.call('task_file')])
- def test__render_task_error_arguments(self):
+ @mock.patch.object(builtins, 'print')
+ def test__render_task_error_arguments(self, *args):
with self.assertRaises(exceptions.TaskRenderArgumentError):
task.TaskParser('task_file')._render_task('value1="var3"', None)
diff --git a/yardstick/tests/unit/benchmark/core/test_testcase.py b/yardstick/tests/unit/benchmark/core/test_testcase.py
index 119465887..077848d77 100644
--- a/yardstick/tests/unit/benchmark/core/test_testcase.py
+++ b/yardstick/tests/unit/benchmark/core/test_testcase.py
@@ -7,28 +7,28 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.testcase
-
-from __future__ import absolute_import
-import unittest
+import mock
+from six.moves import builtins
from yardstick.benchmark.core import testcase
+from yardstick.tests.unit import base as ut_base
class Arg(object):
def __init__(self):
- self.casename = ('opnfv_yardstick_tc001',)
+ self.casename = ('opnfv_yardstick_tc001', )
-class TestcaseUT(unittest.TestCase):
+class TestcaseTestCase(ut_base.BaseUnitTestCase):
def test_list_all(self):
t = testcase.Testcase()
result = t.list_all("")
self.assertIsInstance(result, list)
- def test_show(self):
+ @mock.patch.object(builtins, 'print')
+ def test_show(self, *args):
t = testcase.Testcase()
casename = Arg()
result = t.show(casename)
diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
new file mode 100644
index 000000000..7b1e1e976
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
@@ -0,0 +1,220 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+import time
+
+from yardstick.benchmark.runners import arithmetic
+
+
+class ArithmeticRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ def __init__(self):
+ self.count = 101
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ return self.count
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {
+ 'interval': 0,
+ 'iter_type': 'nested_for_loops',
+ 'iterators': [
+ {
+ 'name': 'stride',
+ 'start': 64,
+ 'stop': 128,
+ 'step': 64
+ },
+ {
+ 'name': 'size',
+ 'start': 500,
+ 'stop': 2000,
+ 'step': 500
+ }
+ ]
+ },
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_process_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = arithmetic.ArithmeticRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='Arithmetic-some_type-101',
+ target=arithmetic._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_nested_for_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 8)
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 8)
+ self.assertEqual(mock_time_sleep.call_count, 8)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_tuple_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 2)
+ self.assertEqual(self.benchmark.my_method.call_count, 2)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 2)
+ self.assertEqual(mock_time_sleep.call_count, 2)
+
+ def test__worker_process_stored_options_nested_for_loops(self):
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_stored_options_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 1000})
+
+ def test__worker_process_aborted_set_early(self):
+ aborted = multiprocessing.Event()
+ aborted.set()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ aborted, mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.scenario_cfg['options'], {})
+ self.benchmark.my_method.assert_not_called()
+
+ def test__worker_process_output_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103, 104, 105, 106, 107, 108, 109])
+
+ def test__worker_process_output_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103])
+
+ def test__worker_process_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+
+ def test__worker_process_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
index 21e3874b8..d4801ef2c 100644
--- a/yardstick/tests/unit/benchmark/runner/test_duration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -11,17 +11,50 @@ import mock
import unittest
import multiprocessing
import os
+import time
from yardstick.benchmark.runners import duration
+from yardstick.common import exceptions as y_exc
class DurationRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
+ self.count = 101
+ self.side_effect = side_effect
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
+ return self.count
+
def setUp(self):
self.scenario_cfg = {
'runner': {'interval': 0, "duration": 0},
'type': 'some_type'
}
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ def _assert_defaults__worker_run_one_iteration(self):
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
@mock.patch.object(os, 'getpid')
@mock.patch.object(multiprocessing, 'Process')
def test__run_benchmark_called_with(self, mock_multiprocessing_process,
@@ -37,3 +70,246 @@ class DurationRunnerTest(unittest.TestCase):
target=duration._worker_process,
args=(runner.result_queue, benchmark_cls, 'my_method',
self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ def test__worker_process_called_with_cfg(self):
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_called_with_cfg_loop(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.call_count, 2)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ def test__worker_process_called_without_cfg(self):
+ scenario_cfg = {'runner': {}}
+ aborted = multiprocessing.Event()
+ aborted.set()
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ scenario_cfg, {}, aborted, mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(1)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(1)
+ self.benchmark.teardown.assert_called_once()
+
+ def test__worker_process_output_queue(self):
+ self.benchmark.my_method = mock.Mock(return_value='my_result')
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+ self.assertEquals(output_queue.get(), 'my_result')
+
+ def test__worker_process_output_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 101
+ while not output_queue.empty():
+ count += 1
+ self.assertEquals(output_queue.get(), count)
+
+ def test__worker_process_queue(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.5
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
+ self.scenario_cfg['sla'] = {}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_queue_on_sla_validation_error_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_queue_on_broad_exception(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertNotEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(SystemExit) as raised:
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index d5c95a086..0f68753fd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -48,7 +48,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
host = {
"ipmi_ip": "10.20.0.5",
"ipmi_user": "root",
- "ipmi_pwd": "123456",
+ "ipmi_password": "123456",
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
@@ -77,7 +77,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
self.attacker_cfg["jump_host"] = 'node1'
- self.context["node1"]["pwd"] = "123456"
+ self.context["node1"]["password"] = "123456"
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
index 7a2324b3d..b0488bacd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
@@ -8,28 +8,56 @@
##############################################################################
import unittest
-from yardstick.benchmark.scenarios.lib.check_value import CheckValue
+from yardstick.benchmark.scenarios.lib import check_value
+from yardstick.common import exceptions as y_exc
+
class CheckValueTestCase(unittest.TestCase):
- def setUp(self):
- self.result = {}
+ def test_eq_pass(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_ne_pass(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_result(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1},
+ 'output': 'foo'}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertDictEqual(result, {'foo': 'PASS'})
- def test_check_value_eq(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 2}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ def test_eq(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_eq_pass(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
- obj.run(self.result)
- self.assertEqual({}, self.result)
+ def test_ne(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_ne(self):
- scenario_cfg = {'options': {'operator': 'ne', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 4016f5055..5761e2403 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -9,18 +9,22 @@
import mock
import unittest
+import logging
from oslo_serialization import jsonutils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen
from yardstick.common import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
+logging.disable(logging.CRITICAL)
+
+
class PktgenTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -33,636 +37,416 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
+ }
- def test_pktgen_successful_setup(self, mock_ssh):
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.setup()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().run.return_value = 0
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ self.addCleanup(self._stop_mock)
- def test_pktgen_successful_iptables_setup(self, mock_ssh):
+ self.scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ self.scenario.setup()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+
+ def test_setup_successful(self):
+ self.assertIsNotNone(self.scenario.server)
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
- p._iptables_setup()
+ def test_iptables_setup_successful(self):
+ self.scenario.number_of_ports = 10
+ self.scenario._iptables_setup()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo iptables -F; "
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
% 1010, timeout=60)
- def test_pktgen_unsuccessful_iptables_setup(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_setup)
+ def test_iptables_setup_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- def test_pktgen_successful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_setup()
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def test_iptables_get_result_successful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.return_value = (0, '150000', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '150000', '')
- result = p._iptables_get_result()
- expected_result = 150000
- self.assertEqual(result, expected_result)
+ result = self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(result, 150000)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo iptables -L INPUT -vnx |"
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
% 1010, raise_on_error=True)
- def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_iptables_get_result_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_get_result)
+ def test_run_successful_no_sla(self):
+ self.scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_successful_no_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
result = {}
+ self.scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_successful_sla(self, mock_ssh):
+ def test_run_successful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 10000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 10000}
- }
result = {}
+ scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_unsuccessful_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_run_unsuccessful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 1000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p._iptables_get_result = mock.Mock(return_value=149300)
+ def test_run_ssh_error_not_caught(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
- def test_pktgen_unsuccessful_script_error(self, mock_ssh):
+ def test_get_vnic_driver_name(self):
+ self.mock_SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
+ vnic_driver_name = self.scenario._get_vnic_driver_name()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p.run, result)
-
- def test_pktgen_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
-
- vnic_driver_name = p._get_vnic_driver_name()
self.assertEqual(vnic_driver_name, 'ixgbevf')
- def test_pktgen_unsuccessful_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._get_vnic_driver_name)
+ def test_get_vnic_driver_name_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_vnic_driver_name()
- p.queue_number = p._get_sriov_queue_number()
- self.assertEqual(p.queue_number, 2)
+ def test_get_sriov_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
- def test_pktgen_unsuccessful_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ self.scenario.queue_number = self.scenario._get_sriov_queue_number()
+ self.assertEqual(self.scenario.queue_number, 2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_sriov_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_sriov_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_sriov_queue_number()
- def test_pktgen_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_available_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
-
- self.assertEqual(p._get_available_queue_number(), 4)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_available_queue_number(), 4)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | head -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_available_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_available_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_available_queue_number()
- def test_pktgen_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_get_usable_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
- self.assertEqual(p._get_usable_queue_number(), 1)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_usable_queue_number(), 1)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | tail -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_usable_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_usable_queue_number()
- self.assertRaises(y_exc.SSHError, p._get_usable_queue_number)
+ def test_enable_ovs_multiqueue(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.mock_SSH.from_node().run.assert_has_calls(
+ (mock.call("sudo ethtool -L eth0 combined 4"),
+ mock.call("sudo ethtool -L eth0 combined 4")))
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ def test_enable_ovs_multiqueue_1q(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 4)
-
- def test_pktgen_enable_ovs_multiqueue_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.mock_SSH.from_node().run.assert_not_called()
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_enable_ovs_multiqueue_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
-
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_unsuccessful_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._enable_ovs_multiqueue()
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
-
- self.assertRaises(y_exc.SSHError, p._enable_ovs_multiqueue)
-
- def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
-
- p._setup_irqmapping_ovs(4)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 8 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_ovs(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 4)
+ def test_setup_irqmapping_ovs_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(4)
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 1)
-
- def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_setup_irqmapping_ovs_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_sriov(2)
+ def test_setup_irqmapping_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 2 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(1)
- p._setup_irqmapping_sriov(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 2)
+ def test_setup_irqmapping_sriov_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_sriov_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 1)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(1)
- def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
- result = p._is_irqbalance_disabled()
- self.assertFalse(result)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertFalse(self.scenario._is_irqbalance_disabled())
+ self.mock_SSH.from_node().execute.assert_called_with(
"grep ENABLED /etc/default/irqbalance", raise_on_error=True)
- def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._is_irqbalance_disabled)
-
- def test_pktgen_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().run.return_value = (0, '', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._is_irqbalance_disabled()
- p._disable_irqbalance()
+ def test_disable_irqbalance(self):
+ self.scenario._disable_irqbalance()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo service irqbalance disable")
- def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._disable_irqbalance)
-
- def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ def test_disable_irqbalance_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._disable_irqbalance()
- p.multiqueue_setup()
+ def test_multiqueue_setup_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- self.assertEqual(p.queue_number, 4)
+ self.scenario.multiqueue_setup()
- def test_pktgen_multiqueue_setup_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_multiqueue_setup_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.multiqueue_setup()
- p.multiqueue_setup()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- self.assertEqual(p.queue_number, 1)
+ def test_multiqueue_setup_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- def test_pktgen_multiqueue_setup_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.scenario.multiqueue_setup()
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ self.assertEqual(self.scenario.queue_number, 2)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ def test_multiqueue_setup_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p.multiqueue_setup()
+ self.scenario.multiqueue_setup()
- self.assertEqual(p.queue_number, 2)
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- def test_pktgen_multiqueue_setup_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
-
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
-
- p.multiqueue_setup()
-
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_run_with_setup_done(self, mock_ssh):
- args = {
+ def test_run_with_setup_done(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
- 'max_ppm': 1}}
- result = {}
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ 'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.server = self.mock_SSH.from_node()
+ scenario.client = self.mock_SSH.from_node()
+ scenario.setup_done = True
+ scenario.multiqueue_setup_done = True
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.setup_done = True
- p.multiqueue_setup_done = True
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_ovs_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_ovs_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
- p._enable_ovs_multiqueue = mock.Mock(return_value=4)
- p._setup_irqmapping_ovs = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ scenario._get_available_queue_number = mock.Mock(return_value=4)
+ scenario._enable_ovs_multiqueue = mock.Mock(return_value=4)
+ scenario._setup_irqmapping_ovs = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_sriov_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_sriov_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ scenario._get_sriov_queue_number = mock.Mock(return_value=2)
+ scenario._setup_irqmapping_sriov = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p._get_sriov_queue_number = mock.Mock(return_value=2)
- p._setup_irqmapping_sriov = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index bcd417830..70cd8ad40 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -9,16 +9,22 @@
import mock
import unittest
+import time
+import logging
import yardstick.common.utils as utils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
from yardstick.common import exceptions as y_exc
+logging.disable(logging.CRITICAL)
+
+
class PktgenDPDKLatencyTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -31,165 +37,100 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
-
- self._mock_ssh = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.ssh')
- self.mock_ssh = self._mock_ssh.start()
- self._mock_time = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.time')
- self.mock_time = self._mock_time.start()
-
- self.addCleanup(self._stop_mock)
-
- def _stop_mock(self):
- self._mock_ssh.stop()
- self._mock_time.stop()
-
- def test_pktgen_dpdk_successful_setup(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.setup()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- def test_pktgen_dpdk_successful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- utils.get_port_ip(p.server, "eth1")
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig eth1 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ")
-
- def test_pktgen_dpdk_unsuccessful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self._mock_utils_get_port_ip = mock.patch.object(utils, 'get_port_ip')
+ self.mock_utils_get_port_ip = self._mock_utils_get_port_ip.start()
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_ip, p.server, "eth1")
+ self._mock_utils_get_port_mac = mock.patch.object(utils,
+ 'get_port_mac')
+ self.mock_utils_get_port_mac = self._mock_utils_get_port_mac.start()
- def test_pktgen_dpdk_successful_get_port_mac(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.addCleanup(self._stop_mock)
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ self.scenario.server = self.mock_SSH.from_node()
+ self.scenario.client = self.mock_SSH.from_node()
- utils.get_port_mac(p.server, "eth1")
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_time_sleep.stop()
+ self._mock_utils_get_port_ip.stop()
+ self._mock_utils_get_port_mac.stop()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig |grep HWaddr |grep eth1 |awk '{print $5}' ")
+ def test_setup(self):
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ scenario.setup()
- def test_pktgen_dpdk_unsuccessful_get_port_mac(self):
+ self.assertIsNotNone(scenario.server)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
- args = {
- 'options': {'packetsize': 60},
- }
+ def test_run_get_port_ip_command(self):
+ self.scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.mock_utils_get_port_ip.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_mac, p.server, "eth1")
+ def test_get_port_mac_command(self):
+ self.scenario.run({})
- def test_pktgen_dpdk_successful_no_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
-
- result = {}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.mock_utils_get_port_mac.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens5'),
+ mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
+ def test_run_no_sla(self):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.run(result)
+ result = {}
+ self.scenario.run(result)
# with python 3 we get float, might be due python division changes
# AssertionError: {'avg_latency': 132.33333333333334} != {
# 'avg_latency': 132}
delta = result['avg_latency'] - 132
self.assertLessEqual(delta, 1)
- def test_pktgen_dpdk_successful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_sla(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n100\n100\n100\n100\n100\n100\n100\n100\n100\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
-
- self.assertEqual(result, {"avg_latency": 100})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_unsuccessful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
result = {}
+ scenario.run(result)
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.assertEqual(result, {"avg_latency": 100})
- p.server = self.mock_ssh.SSH.from_node()
- p.client = self.mock_ssh.SSH.from_node()
+ def test_run_sla_error(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_run_unsuccessful_get_port_mac(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
-
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, result)
-
- def test_pktgen_dpdk_run_unsuccessful_script_error(self):
- args = {
- 'options': {'packetsize': 60}
- }
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_last_command_raise_on_error(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.mock_ssh.SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- y_exc.SSHError)
- self.assertRaises(y_exc.SSHError, p.run, {})
- self.assertEqual(self.mock_ssh.SSH.from_node().execute.call_count, 6)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index cdb91f66d..49578b383 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -358,35 +358,49 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.assertIsNotNone(self.topology)
def test__get_ip_flow_range_string(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
result = '152.16.100.2-152.16.100.254'
self.assertEqual(result, self.s._get_ip_flow_range(
'152.16.100.2-152.16.100.254'))
- def test__get_ip_flow_range(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
- result = '152.16.100.2-152.16.100.254'
- self.assertEqual(result, self.s._get_ip_flow_range({"tg__1": 'xe0'}))
+ def test__get_ip_flow_range_no_nodes(self):
+ self.assertEqual('0.0.0.0', self.s._get_ip_flow_range({}))
- @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.ipaddress')
- def test__get_ip_flow_range_no_node_data(self, mock_ipaddress):
- scenario_cfg = deepcopy(self.scenario_cfg)
- scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
+ def test__get_ip_flow_range_no_node_data(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {}
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('0.0.0.2-0.0.0.254', result)
- mock_ipaddress.ip_network.return_value = ipaddr = mock.Mock()
- ipaddr.hosts.return_value = []
+ def test__et_ip_flow_range_ipv4(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15',
+ 'netmask': '255.255.255.128'}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.2-192.168.1.126', result)
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({"tg__2": 'xe0'})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv4_mask_30(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15', 'netmask': 30}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.15', result)
- def test__get_ip_flow_range_no_nodes(self):
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv6(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '2001::11', 'netmask': 64}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('2001::2-2001::ffff:ffff:ffff:fffe', result)
def test___get_traffic_flow(self):
self.scenario_cfg["traffic_options"]["flow"] = \
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index a606543e5..a1c27f5fb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -54,7 +54,8 @@ class VsperfTestCase(unittest.TestCase):
self._mock_SSH = mock.patch.object(ssh, 'SSH')
self.mock_SSH = self._mock_SSH.start()
- self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
@@ -104,40 +105,23 @@ class VsperfTestCase(unittest.TestCase):
def test_run_ok(self):
self.scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
def test_run_ok_setup_not_done(self):
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertTrue(self.scenario.setup_done)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (1, '', ''))
+ def test_run_ssh_command_call_counts(self):
+ self.scenario.run({})
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
-
- def test_run_failed_csv_report(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (1, '', ''))
-
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
- self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
+ self.mock_SSH.from_node().run.assert_called_once()
def test_run_sla_fail(self):
self.mock_SSH.from_node().execute.return_value = (
@@ -160,14 +144,21 @@ class VsperfTestCase(unittest.TestCase):
self.assertTrue('throughput_rx_fps was not collected by VSPERF'
in str(raised.exception))
+ def test_run_faulty_result_csv(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
+
def test_run_sla_fail_metric_not_defined_in_sla(self):
del self.scenario_cfg['sla']['throughput_rx_fps']
scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
with self.assertRaises(y_exc.SLAValidationError) as raised:
scenario.run({})
self.assertTrue('throughput_rx_fps is not defined in SLA'
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index db6f9cc89..8bbe6911e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -20,6 +20,8 @@ import unittest
from yardstick import exceptions as y_exc
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+from yardstick.common import exceptions as y_exc
+from yardstick import ssh
class VsperfDPDKTestCase(unittest.TestCase):
@@ -56,80 +58,51 @@ class VsperfDPDKTestCase(unittest.TestCase):
'action': 'monitor',
}
}
-
- self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- self._mock_ssh = mock.patch.object(vsperf_dpdk, 'ssh')
+ self._mock_ssh = mock.patch.object(ssh, 'SSH')
self.mock_ssh = self._mock_ssh.start()
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
+ mock_call_obj = mock.Mock()
+ mock_call_obj.execute.return_value = None
+ self.mock_subprocess_call.return_value = mock_call_obj
+
self._mock_log_info = mock.patch.object(vsperf_dpdk.LOG, 'info')
self.mock_log_info = self._mock_log_info.start()
+
self.addCleanup(self._cleanup)
+ self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
+ self.scenario.setup()
+
def _cleanup(self):
self._mock_ssh.stop()
self._mock_subprocess_call.stop()
self._mock_log_info.stop()
def test_setup(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
def test_teardown(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
self.scenario.teardown()
self.assertFalse(self.scenario.setup_done)
def test_is_dpdk_setup_no(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+ self.mock_ssh.from_node().execute.return_value = (0, 'dummy', '')
- result = self.scenario._is_dpdk_setup()
- self.assertFalse(result)
+ self.assertFalse(self.scenario._is_dpdk_setup())
def test_is_dpdk_setup_yes(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_ssh.from_node().execute.return_value = (0, '', '')
- result = self.scenario._is_dpdk_setup()
- self.assertTrue(result)
+ self.assertTrue(self.scenario._is_dpdk_setup())
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_first(self, *args):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+ self.mock_ssh.from_node().execute.return_value = (0, 'dummy', '')
self.scenario.dpdk_setup()
self.assertFalse(self.scenario._is_dpdk_setup())
@@ -137,89 +110,26 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_next(self, *args):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
+ self.mock_ssh.from_node().execute.return_value = (0, '', '')
self.scenario.dpdk_setup()
self.assertTrue(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
- @mock.patch.object(time, 'sleep')
- def test_dpdk_setup_runtime_error(self, *args):
-
- # setup specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- self.assertTrue(self.scenario.setup_done)
-
- self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
-
@mock.patch.object(subprocess, 'check_output')
- @mock.patch('time.sleep')
def test_run_ok(self, *args):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# run() specific mocks
- self.mock_subprocess_call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
result = {}
self.scenario.run(result)
-
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, self.scenario.run, result)
-
- def test_run_falied_csv_report(self):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
- # run() specific mocks
- self.mock_subprocess_call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, self.scenario.run, result)
-
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail(self, *args):
- self.scenario.setup()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n123456.000\r\n', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
@@ -232,10 +142,22 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail_metric_not_collected(self, *args):
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected_faulty_csv(self, *args):
self.scenario.setup()
- self.mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
self.scenario.run({})
@@ -249,7 +171,7 @@ class VsperfDPDKTestCase(unittest.TestCase):
del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
self.scenario.setup()
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 5844746ab..2ba53cb93 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,18 +11,18 @@
from __future__ import absolute_import
+import json
import unittest
import mock
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios.storage import storperf
# pylint: disable=unused-argument
# disable this for now because I keep forgetting mock patch arg ordering
-
-
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost(object):
@@ -32,10 +32,24 @@ def mocked_requests_config_post(*args, **kwargs):
return MockResponseConfigPost(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "false"}',
+ '"stack_created": false}',
200)
+def mocked_requests_config_post_fail(*args, **kwargs):
+ class MockResponseConfigPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigPost(
+ '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+ 'Error validating value \'foo\': Unable to find network with ' +
+ 'name or id \'foo\'"}',
+ 400)
+
+
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet(object):
@@ -45,10 +59,47 @@ def mocked_requests_config_get(*args, **kwargs):
return MockResponseConfigGet(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "true"}',
+ '"stack_created": true}',
200)
+def mocked_requests_config_get_not_created(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{"stack_id": "",'
+ '"stack_created": false}',
+ 200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{}',
+ 200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet(object):
@@ -73,6 +124,18 @@ def mocked_requests_job_post(*args, **kwargs):
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
+def mocked_requests_job_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete(object):
@@ -100,10 +163,7 @@ def mocked_requests_delete_failed(*args, **kwargs):
self.json_data = json_data
self.status_code = status_code
- if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
- return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
- return MockResponseDeleteFailed('{}', 404)
+ return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
class StorPerfTestCase(unittest.TestCase):
@@ -119,11 +179,14 @@ class StorPerfTestCase(unittest.TestCase):
self.result = {}
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_config_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_config_get)
- def test_successful_setup(self, mock_post, mock_get):
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup(self, mock_get, mock_post):
+ mock_post.side_effect = [mocked_requests_config_post(),
+ mocked_requests_job_post()]
+ mock_get.side_effect = [mocked_requests_config_get(),
+ mocked_requests_job_get()]
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -146,14 +209,47 @@ class StorPerfTestCase(unittest.TestCase):
self.assertTrue(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_job_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_job_get)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
- def test_successful_run(self, mock_post, mock_get, mock_delete):
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_unsuccessful(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_not_created
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_no_payload(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_no_payload
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup_config_post_failed(self, mock_get, mock_post):
+ mock_post.side_effect = mocked_requests_config_post_fail
+
+ args = {
+ "options": {
+ "public_network": "foo"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.setup)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v1_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -165,6 +261,74 @@ class StorPerfTestCase(unittest.TestCase):
"query_interval": 0,
"timeout": 60
}
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ "workload": "rs",
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ sample_output = '{"Status": "Completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+ expected_result = jsonutils.loads(sample_output)
+
+ s.run(self.result)
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/jobs',
+ json=jsonutils.loads(json.dumps(expected_post)))
+
+ self.assertEqual(self.result, expected_result)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v2_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
args = {
"options": options
@@ -179,13 +343,126 @@ class StorPerfTestCase(unittest.TestCase):
expected_result = jsonutils.loads(sample_output)
s.run(self.result)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
self.assertEqual(self.result, expected_result)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete)
- def test_successful_teardown(self, mock_delete):
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ self.assertRaises(RuntimeError, s.run, self.ctx)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(storperf.StorPerf, 'setup')
+ def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ 'timeout': 60,
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.run(self.result)
+
+ mock_setup.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.initialize_disks()
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_initialize_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.initialize_disks)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch.object(requests, 'delete')
+ def test_teardown(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_job_delete
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -207,11 +484,12 @@ class StorPerfTestCase(unittest.TestCase):
s.teardown()
self.assertFalse(s.setup_done)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete_failed)
- def test_failed_teardown(self, mock_delete):
+ @mock.patch.object(requests, 'delete')
+ def test_teardown_request_delete_failed(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_delete_failed
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -230,4 +508,6 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
- self.assertRaises(AssertionError, s.teardown(), self.result)
+ self.assertRaises(RuntimeError, s.teardown)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
diff --git a/yardstick/tests/unit/common/test_ansible_common.py b/yardstick/tests/unit/common/test_ansible_common.py
index 48d8a60c8..bf82f6288 100644
--- a/yardstick/tests/unit/common/test_ansible_common.py
+++ b/yardstick/tests/unit/common/test_ansible_common.py
@@ -12,28 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from __future__ import absolute_import
-
-import os
-import tempfile
+import collections
import shutil
-from collections import defaultdict
+import subprocess
+import tempfile
import mock
-import unittest
-
-from six.moves.configparser import ConfigParser
-from six.moves import StringIO
+from six import moves
+from six.moves import configparser
from yardstick.common import ansible_common
+from yardstick.tests.unit import base as ut_base
-PREFIX = 'yardstick.common.ansible_common'
+class OverwriteDictTestCase(ut_base.BaseUnitTestCase):
-class OverwriteDictTestCase(unittest.TestCase):
def test_overwrite_dict_cfg(self):
- c = ConfigParser(allow_no_value=True)
+ c = configparser.ConfigParser(allow_no_value=True)
d = {
"section_a": "empty_value",
"section_b": {"key_c": "Val_d", "key_d": "VAL_D"},
@@ -43,86 +38,78 @@ class OverwriteDictTestCase(unittest.TestCase):
# Python3 and Python2 convert empty values into None or ''
# we don't really care but we need to compare correctly for unittest
self.assertTrue(c.has_option("section_a", "empty_value"))
- self.assertEqual(sorted(c.items("section_b")), [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
+ self.assertEqual(sorted(c.items("section_b")),
+ [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
self.assertTrue(c.has_option("section_c", "key_c"))
self.assertTrue(c.has_option("section_c", "key_d"))
-class FilenameGeneratorTestCase(unittest.TestCase):
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
+class FilenameGeneratorTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
def test__handle_existing_file(self, _):
- ansible_common.FileNameGenerator._handle_existing_file("/dev/null")
+ ansible_common.FileNameGenerator._handle_existing_file('/dev/null')
def test_get_generator_from_file(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "", "")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', '')
def test_get_generator_from_file_middle(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "",
- "null")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', 'null')
def test_get_generator_from_file_prefix(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "null",
- "middle")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', 'null', 'middle')
-class AnsibleNodeTestCase(unittest.TestCase):
- def test_ansible_node(self):
- ansible_common.AnsibleNode()
+class AnsibleNodeTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_len(self):
- a = ansible_common.AnsibleNode()
- len(a)
+ self.assertEqual(0, len(ansible_common.AnsibleNode()))
def test_ansible_node_repr(self):
- a = ansible_common.AnsibleNode()
- repr(a)
+ self.assertEqual('AnsibleNode<{}>', repr(ansible_common.AnsibleNode()))
def test_ansible_node_iter(self):
- a = ansible_common.AnsibleNode()
- for _ in a:
- pass
+ node = ansible_common.AnsibleNode(data={'a': 1, 'b': 2, 'c': 3})
+ for key in node:
+ self.assertIn(key, ('a', 'b', 'c'))
def test_is_role(self):
- a = ansible_common.AnsibleNode()
- self.assertFalse(a.is_role("", default="foo"))
+ node = ansible_common.AnsibleNode()
+ self.assertFalse(node.is_role('', default='foo'))
def test_ansible_node_get_tuple(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertEqual(a.get_tuple(), ('name', a))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(node.get_tuple(), ('name', node))
def test_gen_inventory_line(self):
- a = ansible_common.AnsibleNode(defaultdict(str))
+ a = ansible_common.AnsibleNode(collections.defaultdict(str))
self.assertEqual(a.gen_inventory_line(), "")
def test_ansible_node_delitem(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- del a['name']
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(1, len(node))
+ del node['name']
+ self.assertEqual(0, len(node))
def test_ansible_node_getattr(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertIsNone(getattr(a, "nosuch", None))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertIsNone(getattr(node, 'nosuch', None))
-class AnsibleNodeDictTestCase(unittest.TestCase):
- def test_ansible_node_dict(self):
- n = ansible_common.AnsibleNode
- ansible_common.AnsibleNodeDict(n, {})
+class AnsibleNodeDictTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_dict_len(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- len(a)
+ self.assertEqual(0, len(a))
def test_ansible_node_dict_repr(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- repr(a)
-
- def test_ansible_node_dict_iter(self):
- n = ansible_common.AnsibleNode
- a = ansible_common.AnsibleNodeDict(n, {})
- for _ in a:
- pass
+ self.assertEqual('{}', repr(a))
def test_ansible_node_dict_get(self):
n = ansible_common.AnsibleNode
@@ -144,12 +131,15 @@ class AnsibleNodeDictTestCase(unittest.TestCase):
["name ansible_ssh_pass=PASS ansible_user=user"])
-class AnsibleCommonTestCase(unittest.TestCase):
- def test_get_timeouts(self):
- self.assertAlmostEqual(ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
+class AnsibleCommonTestCase(ut_base.BaseUnitTestCase):
- def test__init__(self):
- ansible_common.AnsibleCommon({})
+ @staticmethod
+ def _delete_tmpdir(dir):
+ shutil.rmtree(dir)
+
+ def test_get_timeouts(self):
+ self.assertAlmostEqual(
+ ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
def test_reset(self):
a = ansible_common.AnsibleCommon({})
@@ -184,81 +174,68 @@ class AnsibleCommonTestCase(unittest.TestCase):
a.deploy_dir = "d"
self.assertEqual(a.deploy_dir, "d")
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list(self, _):
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_inventory_file(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_inventory_file(self, *args):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon(nodes)
- a.gen_inventory_ini_dict()
- inv_context = a._gen_ansible_inventory_file(d)
- with inv_context:
- c = StringIO()
- inv_context.write_func(c)
- self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list_multiple(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon(nodes)
+ a.gen_inventory_ini_dict()
+ inv_context = a._gen_ansible_inventory_file(d)
+ with inv_context:
+ c = moves.StringIO()
+ inv_context.write_func(c)
+ self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list_multiple(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a", "b"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_do_install_tmp_dir(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a", "b"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(subprocess, 'Popen')
+ @mock.patch.object(moves.builtins, 'open')
+ def test_do_install_tmp_dir(self, _, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.do_install('', d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_execute_ansible_check(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.do_install('', d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ @mock.patch.object(subprocess, 'Popen')
+ def test_execute_ansible_check(self, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.execute_ansible('', d, ansible_check=True, verbose=True)
- finally:
- os.rmdir(d)
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.execute_ansible('', d, ansible_check=True, verbose=True)
def test_get_sut_info(self):
d = tempfile.mkdtemp()
a = ansible_common.AnsibleCommon({})
- try:
+ self.addCleanup(self._delete_tmpdir, d)
+ with mock.patch.object(a, '_exec_get_sut_info_cmd'):
a.get_sut_info(d)
- finally:
- shutil.rmtree(d)
def test_get_sut_info_not_exist(self):
a = ansible_common.AnsibleCommon({})
- try:
+ with self.assertRaises(OSError):
a.get_sut_info('/hello/world')
- except OSError:
- pass
diff --git a/yardstick/tests/unit/common/test_kubernetes_utils.py b/yardstick/tests/unit/common/test_kubernetes_utils.py
index bdc2c12d5..ba6b5f388 100644
--- a/yardstick/tests/unit/common/test_kubernetes_utils.py
+++ b/yardstick/tests/unit/common/test_kubernetes_utils.py
@@ -121,6 +121,23 @@ class DeleteCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
mock_delete_crd.delete_custom_resource_definition.\
assert_called_once_with('name', 'del_obj')
+ @mock.patch.object(client, 'V1DeleteOptions', return_value='del_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api, mock_delobj):
+ mock_delete_crd = mock.Mock()
+ mock_delete_crd.delete_custom_resource_definition.side_effect = rest.ApiException(
+ status=404)
+
+ mock_get_api.return_value = mock_delete_crd
+ kubernetes_utils.delete_custom_resource_definition('name', skip_codes=[404])
+
+ mock_delobj.assert_called_once()
+ mock_delete_crd.delete_custom_resource_definition.assert_called_once_with(
+ 'name', 'del_obj')
+
+ mock_log.info.assert_called_once()
+
class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
@@ -159,7 +176,7 @@ class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
kubernetes_utils.get_custom_resource_definition('kind')
-class CreateNetworkTestCase(base.BaseUnitTestCase):
+class GetNetworkTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
def test_execute_correct(self, mock_get_api):
mock_api = mock.Mock()
@@ -167,29 +184,97 @@ class CreateNetworkTestCase(base.BaseUnitTestCase):
group = 'group.com'
version = mock.Mock()
plural = 'networks'
+ name = 'net_one'
+
+ kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ mock_api.get_cluster_custom_object.assert_called_once_with(
+ group, version, plural, name)
+
+ mock_api.reset_mock()
+ kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ mock_api.get_namespaced_custom_object.assert_called_once_with(
+ group, version, 'default', plural, name)
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.get_cluster_custom_object.side_effect = rest.ApiException(404)
+ mock_api.get_namespaced_custom_object.side_effect = rest.ApiException(404)
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ name = 'net_one'
+
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+ mock_api.reset_mock()
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+
+class CreateNetworkTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_correct(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
body = mock.Mock()
+ name = 'net_one'
kubernetes_utils.create_network(
- constants.SCOPE_CLUSTER, group, version, plural, body)
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
mock_api.create_cluster_custom_object.assert_called_once_with(
group, version, plural, body)
mock_api.reset_mock()
kubernetes_utils.create_network(
- constants.SCOPE_NAMESPACED, group, version, plural, body)
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
mock_api.create_namespaced_custom_object.assert_called_once_with(
group, version, 'default', plural, body)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_network_already_created(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = mock.Mock
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ body = mock.Mock()
+ name = 'net_one'
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
+ mock_api.create_cluster_custom_object.assert_not_called()
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
+ mock_api.create_namespaced_custom_object.assert_not_called()
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
- def test_execute_exception(self, mock_get_api):
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_exception(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
mock_api = mock.Mock()
mock_api.create_cluster_custom_object.side_effect = rest.ApiException
mock_get_api.return_value = mock_api
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.create_network(
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
- mock.ANY)
+ mock.ANY, mock.ANY)
class DeleteNetworkTestCase(base.BaseUnitTestCase):
@@ -223,6 +308,19 @@ class DeleteNetworkTestCase(base.BaseUnitTestCase):
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
mock.ANY)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_cluster_custom_object.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_network(
+ constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
class DeletePodTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_core_api')
@@ -243,10 +341,107 @@ class DeletePodTestCase(base.BaseUnitTestCase):
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+ @mock.patch.object(kubernetes_utils, 'LOG')
@mock.patch.object(kubernetes_utils, 'get_core_api')
- def test_execute_skip_exception(self, mock_get_api):
+ def test_execute_skip_exception(self, mock_get_api, *args):
mock_api = mock.Mock()
mock_api.delete_namespaced_pod.side_effect = rest.ApiException(status=404)
mock_get_api.return_value = mock_api
kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+
+
+class DeleteServiceTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(client, "V1DeleteOptions")
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api, mock_options):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ mock_options.return_value = None
+ kubernetes_utils.delete_service("name", "default", None)
+ mock_api.delete_namespaced_service.assert_called_once_with(
+ "name", 'default', None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_service.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_skip_exception(self, mock_get_api, *args):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_service.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
+
+
+class DeleteReplicationControllerTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(
+ "name", "default", body=None)
+
+ mock_api.delete_namespaced_replication_controller.assert_called_once_with(
+ "name", "default", None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=200)
+ )
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=404)
+ )
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
+
+class DeleteConfigMapTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map("name", body=None)
+ mock_api.delete_namespaced_config_map.assert_called_once_with(
+ "name", "default", None
+ )
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
+ mock_log.info.assert_called_once()
diff --git a/yardstick/tests/unit/common/test_template_format.py b/yardstick/tests/unit/common/test_template_format.py
index 56253efbc..6e4827e16 100644
--- a/yardstick/tests/unit/common/test_template_format.py
+++ b/yardstick/tests/unit/common/test_template_format.py
@@ -22,16 +22,21 @@ from yardstick.common import template_format
class TemplateFormatTestCase(unittest.TestCase):
- def test_parse_to_value_exception(self):
+ def test_parse_scanner(self):
- # TODO(elfoley): Don't hide the error that occurs in
- # template_format.parse
- # TODO(elfoley): Separate these tests; one per error type
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.scanner.ScannerError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_parser(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.parser.ParserError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_reader(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = \
yaml.reader.ReaderError('', '', '', '', '')
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 446afdd38..ef4142148 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -12,12 +12,14 @@ import errno
import importlib
import ipaddress
from itertools import product, chain
-import mock
import os
-import six
-from six.moves import configparser
import socket
import time
+import threading
+
+import mock
+import six
+from six.moves import configparser
import unittest
import yardstick
@@ -194,6 +196,14 @@ class TestMacAddressToHex(ut_base.BaseUnitTestCase):
self.assertEqual(utils.mac_address_to_hex_list("ea:3e:e1:9a:99:e8"),
['0xea', '0x3e', '0xe1', '0x9a', '0x99', '0xe8'])
+ def test_mac_address_to_hex_list_too_short_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("ea:3e:e1:9a")
+
+ def test_mac_address_to_hex_list_no_int_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("invalid_mac")
+
class TranslateToStrTestCase(ut_base.BaseUnitTestCase):
@@ -1113,6 +1123,19 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
u'123:4567:89ab:cdef:123:4567:89ab:cdef/129',
]
+ def test_make_ipv4_address(self):
+ for addr in self.GOOD_IP_V4_ADDRESS_STR_LIST:
+ # test with no mask
+ expected = ipaddress.IPv4Address(addr)
+ self.assertEqual(utils.make_ipv4_address(addr), expected, addr)
+
+ def test_make_ipv4_address_error(self):
+ addr_list = self.INVALID_IP_ADDRESS_STR_LIST +\
+ self.GOOD_IP_V6_ADDRESS_STR_LIST
+ for addr in addr_list:
+ self.assertRaises(Exception, utils.make_ipv4_address, addr)
+
+
def test_safe_ip_address(self):
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
for addr in addr_list:
@@ -1196,6 +1219,20 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
for value in chain(value_iter, self.INVALID_IP_ADDRESS_STR_LIST):
self.assertEqual(utils.ip_to_hex(value), value)
+ def test_get_mask_from_ip_range_ipv4(self):
+ ip_str = '1.1.1.1'
+ for mask in range(8, 30):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
+ def test_get_mask_from_ip_range_ipv6(self):
+ ip_str = '2001::1'
+ for mask in range(8, 120):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
class SafeDecodeUtf8TestCase(ut_base.BaseUnitTestCase):
@@ -1263,6 +1300,10 @@ class TimerTestCase(ut_base.BaseUnitTestCase):
time.sleep(1.1)
self.assertEqual(2, len(iterations))
+ def test_delta_time_sec(self):
+ with utils.Timer() as timer:
+ self.assertIsInstance(timer.delta_time_sec(), float)
+
class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
@@ -1284,6 +1325,15 @@ class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
utils.wait_until_true(lambda: False, timeout=1, sleep=1,
exception=MyTimeoutException))
+ def _run_thread(self):
+ with self.assertRaises(exceptions.WaitTimeout):
+ utils.wait_until_true(lambda: False, timeout=1, sleep=1)
+
+ def test_timeout_no_main_thread(self):
+ new_thread = threading.Thread(target=self._run_thread)
+ new_thread.start()
+ new_thread.join(timeout=3)
+
class SendSocketCommandTestCase(unittest.TestCase):
@@ -1309,3 +1359,35 @@ class SendSocketCommandTestCase(unittest.TestCase):
mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
mock_socket_obj.close.assert_called_once()
+
+
+class GetPortMacTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_mac(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig |grep HWaddr |grep 99 |awk '{print $5}' ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_mac(self.ssh_client, 99))
+
+
+class GetPortIPTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_ip(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig 99 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 541855aa8..e078d70ad 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -16,6 +16,8 @@ import mock
import IxNetwork
import unittest
+from copy import deepcopy
+
from yardstick.common import exceptions
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -28,36 +30,30 @@ TRAFFIC_PARAMETERS = {
'id': 1,
'bidir': 'False',
'duration': 60,
- 'iload': '100',
+ 'rate': 10000.5,
+ 'rate_unit': 'fps',
'outer_l2': {
- 'framesize': {'64B': '25', '256B': '75'}
+ 'framesize': {'64B': '25', '256B': '75'},
+ 'QinQ': None
},
'outer_l3': {
'count': 512,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.40.20',
'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'dscp': 0,
- 'dstip4': '152.16.40.20',
- 'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '152.16.40.20',
+ 'srcip': '152.16.100.20',
+ 'dstmask': 24,
+ 'srcmask': 24
},
'outer_l4': {
- 'dstport': '2001',
- 'srcport': '1234'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 2001,
+ 'srcport': 1234,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
},
@@ -65,37 +61,30 @@ TRAFFIC_PARAMETERS = {
'id': 2,
'bidir': 'False',
'duration': 60,
- 'iload': '100',
+ 'rate': 75.2,
+ 'rate_unit': '%',
'outer_l2': {
- 'framesize': {'128B': '35', '1024B': '65'}
+ 'framesize': {'128B': '35', '1024B': '65'},
+ 'QinQ': None
},
'outer_l3': {
'count': 1024,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.100.20',
'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '2001::10',
+ 'srcip': '2021::10',
+ 'dstmask': 64,
+ 'srcmask': 64
},
'outer_l4': {
- 'dstport': '1234',
- 'srcport': '2001'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 1234,
+ 'srcport': 2001,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
}
@@ -108,6 +97,8 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet = mock.Mock()
self.ixnet.execute = mock.Mock()
self.ixnet.getRoot.return_value = 'my_root'
+ self.ixnet_gen = ixnet_api.IxNextgen()
+ self.ixnet_gen._ixnet = self.ixnet
def test_get_config(self):
tg_cfg = {
@@ -145,64 +136,50 @@ class TestIxNextgen(unittest.TestCase):
self.assertEqual(result, expected)
def test__get_config_element_by_flow_group_name(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertEqual('traffic_item/configElement:flow_group_01', output)
def test__get_config_element_by_flow_group_name_no_match(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertIsNone(output)
def test__get_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
+ self.ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name') as \
+ self.ixnet_gen, '_get_config_element_by_flow_group_name') as \
mock_get_cfg_element:
mock_get_cfg_element.return_value = 'cfg_element'
- output = ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
+ output = self.ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
self.assertEqual(['tcp1', 'tcp2'], output)
def test__get_stack_item_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
+ self.ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
def test__get_field_in_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
- output = ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ output = self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
self.assertEqual('field2', output)
def test__get_field_in_stack_item_no_field_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
with self.assertRaises(exceptions.IxNetworkFieldNotPresentInStackItem):
- ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
+ self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
def test__parse_framesize(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
framesize = {'64B': '75', '512b': '25'}
- output = ixnet_gen._parse_framesize(framesize)
+ output = self.ixnet_gen._parse_framesize(framesize)
self.assertEqual(2, len(output))
self.assertIn([64, 64, 75], output)
self.assertIn([512, 512, 25], output)
@@ -210,55 +187,44 @@ class TestIxNextgen(unittest.TestCase):
@mock.patch.object(IxNetwork, 'IxNet')
def test_connect(self, mock_ixnet):
mock_ixnet.return_value = self.ixnet
- ixnet_gen = ixnet_api.IxNextgen()
- with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ with mock.patch.object(self.ixnet_gen, 'get_config') as mock_config:
mock_config.return_value = {'machine': 'machine_fake',
'port': 'port_fake',
'version': 12345}
- ixnet_gen.connect(mock.ANY)
+ self.ixnet_gen.connect(mock.ANY)
self.ixnet.connect.assert_called_once_with(
'machine_fake', '-port', 'port_fake', '-version', '12345')
mock_config.assert_called_once()
def test_connect_invalid_config_no_machine(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'port': 'port_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_port(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_version(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'port': 'port_fake'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet_gen.get_config = mock.Mock(return_value={})
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_clear_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.clear_config()
+ self.ixnet_gen.clear_config()
self.ixnet.execute.assert_called_once_with('newConfig')
@mock.patch.object(ixnet_api, 'log')
@@ -268,11 +234,9 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['2', '2']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
+ self.ixnet_gen._cfg = config
- self.assertIsNone(ixnet_gen.assign_ports())
+ self.assertIsNone(self.ixnet_gen.assign_ports())
self.assertEqual(self.ixnet.execute.call_count, 2)
self.assertEqual(self.ixnet.commit.call_count, 4)
self.assertEqual(self.ixnet.getAttribute.call_count, 2)
@@ -284,25 +248,19 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['3', '4']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
- ixnet_gen.assign_ports()
+ self.ixnet_gen._cfg = config
+ self.ixnet_gen.assign_ports()
mock_log.warning.assert_called()
def test_assign_ports_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = {}
- self.assertRaises(KeyError, ixnet_gen.assign_ports)
+ self.ixnet_gen._cfg = {}
+ self.assertRaises(KeyError, self.ixnet_gen.assign_ports)
def test__create_traffic_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
self.ixnet.add.return_value = 'my_new_traffic_item'
self.ixnet.remapIds.return_value = ['my_traffic_item_id']
- ixnet_gen._create_traffic_item()
+ self.ixnet_gen._create_traffic_item()
self.ixnet.add.assert_called_once_with(
'my_root/traffic', 'trafficItem')
self.ixnet.setMultiAttribute.assert_called_once_with(
@@ -313,41 +271,35 @@ class TestIxNextgen(unittest.TestCase):
'-trackBy', 'trafficGroupId0')
def test__create_flow_groups(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
- ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
- ixnet_gen._create_flow_groups()
- ixnet_gen.ixnet.add.assert_has_calls([
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
+ self.ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
+ self.ixnet_gen._create_flow_groups()
+ self.ixnet_gen.ixnet.add.assert_has_calls([
mock.call('traffic_item', 'endpointSet'),
mock.call('traffic_item', 'endpointSet')])
- ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
mock.call('endp1', '-name', '1', '-sources', ['1/protocols'],
'-destinations', ['2/protocols']),
mock.call('endp2', '-name', '2', '-sources', ['2/protocols'],
'-destinations', ['1/protocols'])])
def test__append_protocol_to_stack(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
+ self.ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
self.ixnet.execute.assert_called_with(
'append', 'prev_element',
'my_root/traffic/protocolTemplate:"my_protocol"')
def test__setup_config_elements(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
['cfg_element']]
- with mock.patch.object(ixnet_gen, '_append_procotol_to_stack') as \
+ with mock.patch.object(self.ixnet_gen, '_append_procotol_to_stack') as \
mock_append_proto:
- ixnet_gen._setup_config_elements()
+ self.ixnet_gen._setup_config_elements()
mock_append_proto.assert_has_calls([
mock.call(ixnet_api.PROTO_UDP, 'cfg_element/stack:"ethernet-1"'),
mock.call(ixnet_api.PROTO_IPV4, 'cfg_element/stack:"ethernet-1"')])
- ixnet_gen.ixnet.setAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setAttribute.assert_has_calls([
mock.call('cfg_element/frameRateDistribution', '-portDistribution',
'splitRateEvenly'),
mock.call('cfg_element/frameRateDistribution',
@@ -359,150 +311,214 @@ class TestIxNextgen(unittest.TestCase):
def test_create_traffic_model(self, mock__setup_config_elements,
mock__create_flow_groups,
mock__create_traffic_item):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.create_traffic_model()
+ self.ixnet_gen.create_traffic_model()
mock__create_traffic_item.assert_called_once()
mock__create_flow_groups.assert_called_once()
mock__setup_config_elements.assert_called_once()
def test__update_frame_mac(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item') as \
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item') as \
mock_get_field:
mock_get_field.return_value = 'field_descriptor'
- ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
+ self.ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
mock_get_field.assert_called_once_with('ethernet_descriptor', 'field')
- ixnet_gen.ixnet.setMultiAttribute(
+ self.ixnet_gen.ixnet.setMultiAttribute(
'field_descriptor', '-singleValue', 'mac', '-fieldValue', 'mac',
'-valueType', 'singleValue')
- ixnet_gen.ixnet.commit.assert_called_once()
+ self.ixnet_gen.ixnet.commit.assert_called_once()
def test_update_frame(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value='cfg_element'), \
- mock.patch.object(ixnet_gen, '_update_frame_mac') as \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac') as \
mock_update_frame, \
- mock.patch.object(ixnet_gen, '_get_stack_item') as \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item') as \
mock_get_stack_item:
mock_get_stack_item.side_effect = [['item1'], ['item2'],
['item3'], ['item4']]
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 50)
- self.assertEqual(6, len(ixnet_gen.ixnet.setMultiAttribute.mock_calls))
+ self.assertEqual(6, len(self.ixnet_gen.ixnet.setMultiAttribute.mock_calls))
self.assertEqual(4, len(mock_update_frame.mock_calls))
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('cfg_element/transmissionControl',
+ '-type', 'continuous', '-duration', 50)
+ ])
+
+ def test_update_frame_qinq(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='cfg_element'), \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac'),\
+ mock.patch.object(self.ixnet_gen, '_get_stack_item',
+ return_value='item'), \
+ mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field'):
+
+ traffic_parameters = deepcopy(TRAFFIC_PARAMETERS)
+ traffic_parameters[UPLINK]['outer_l2']['QinQ'] = {
+ 'S-VLAN': {'id': 128,
+ 'priority': 1,
+ 'cfi': 0},
+ 'C-VLAN': {'id': 512,
+ 'priority': 0,
+ 'cfi': 2}
+ }
+
+ self.ixnet_gen.update_frame(traffic_parameters, 50)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('field', '-auto', 'false', '-singleValue', '0x88a8',
+ '-fieldValue', '0x88a8', '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 1,
+ '-fieldValue', 1, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 128,
+ '-fieldValue', 128, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 512,
+ '-fieldValue', 512, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 2,
+ '-fieldValue', 2, '-valueType', 'singleValue')
+ ], any_order=True)
+
def test_update_frame_flow_not_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 40)
def test_get_statistics(self):
- ixnet_gen = ixnet_api.IxNextgen()
port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
- with mock.patch.object(ixnet_gen, '_build_stats_map') as \
+ with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \
mock_build_stats:
- ixnet_gen.get_statistics()
+ self.ixnet_gen.get_statistics()
mock_build_stats.assert_has_calls([
- mock.call(port_statistics, ixnet_gen.PORT_STATS_NAME_MAP),
- mock.call(flow_statistics, ixnet_gen.LATENCY_NAME_MAP)])
+ mock.call(port_statistics, self.ixnet_gen.PORT_STATS_NAME_MAP),
+ mock.call(flow_statistics, self.ixnet_gen.LATENCY_NAME_MAP)])
def test__update_ipv4_address(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item',
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
return_value='field_desc'):
- ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
- 100, '255.255.255.0', 25)
- ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ self.ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
+ 100, 26, 25)
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
'field_desc', '-seed', 100, '-fixedBits', '192.168.1.1',
- '-randomMask', '255.255.255.0', '-valueType', 'random',
+ '-randomMask', '0.0.0.63', '-valueType', 'random',
'-countValue', 25)
+ def test__update_udp_port(self):
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_udp_port(mock.ANY, mock.ANY, 1234,
+ 2, 0, 2)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-auto', 'false',
+ '-seed', 1,
+ '-fixedBits', 1234,
+ '-randomMask', 0,
+ '-valueType', 'random',
+ '-countValue', 1)
+
def test_update_ip_packet(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_update_ipv4_address') as \
+ with mock.patch.object(self.ixnet_gen, '_update_ipv4_address') as \
mock_update_add, \
- mock.patch.object(ixnet_gen, '_get_stack_item'), \
- mock.patch.object(ixnet_gen,
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name', return_value='celm'):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
self.assertEqual(4, len(mock_update_add.mock_calls))
def test_update_ip_packet_exception_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen,
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+
+ def test_update_l4(self):
+ with mock.patch.object(self.ixnet_gen, '_update_udp_port') as \
+ mock_update_udp, \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name', return_value='celm'):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(4, len(mock_update_udp.mock_calls))
+
+ def test_update_l4_exception_no_config_element(self):
+ with mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ def test_update_l4_exception_no_supported_proto(self):
+ traffic_parameters = {
+ UPLINK: {
+ 'id': 1,
+ 'outer_l3': {
+ 'proto': 'unsupported',
+ },
+ },
+ }
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='celm'):
+ with self.assertRaises(exceptions.IXIAUnsupportedProtocol):
+ self.ixnet_gen.update_l4(traffic_parameters)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic(self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'started', 'started', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_traffic_running(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_to_stop(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'started', 'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_start(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'stopped', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 55276af58..0dc3e0579 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -80,9 +80,33 @@ class TrafficProfileConfigTestCase(unittest.TestCase):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual({'64B': 100}, tp_config_obj.packet_sizes)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
+ tp_config_obj.duration)
+
+ def test__init_set_duration(self):
+ tp_config = {'traffic_profile': {'duration': 15}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual(base.TrafficProfileConfig.DEFAULT_SCHEMA,
tp_config_obj.schema)
- self.assertEqual(base.TrafficProfileConfig.DEFAULT_FRAME_RATE,
+ self.assertEqual(float(base.TrafficProfileConfig.DEFAULT_FRAME_RATE),
tp_config_obj.frame_rate)
- self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
- tp_config_obj.duration)
+ self.assertEqual(15, tp_config_obj.duration)
+
+ def test__parse_rate(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ self.assertEqual((100.0, 'fps'), tp_config_obj._parse_rate('100 '))
+ self.assertEqual((200.5, 'fps'), tp_config_obj._parse_rate('200.5'))
+ self.assertEqual((300.8, 'fps'), tp_config_obj._parse_rate('300.8fps'))
+ self.assertEqual((400.2, 'fps'),
+ tp_config_obj._parse_rate('400.2 fps'))
+ self.assertEqual((500.3, '%'), tp_config_obj._parse_rate('500.3%'))
+ self.assertEqual((600.1, '%'), tp_config_obj._parse_rate('600.1 %'))
+
+ def test__parse_rate_exception(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ with self.assertRaises(exceptions.TrafficProfileRate):
+ tp_config_obj._parse_rate('100Fps')
+ with self.assertRaises(exceptions.TrafficProfileRate):
+ tp_config_obj._parse_rate('100 kbps')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 3bb8b9192..27ab4607b 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -446,6 +446,38 @@ class TestIXIARFC2544Profile(unittest.TestCase):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(t_profile_data)
self.assertEqual(12345678, r_f_c2544_profile.rate)
+ def test__get_ip_and_mask_range(self):
+ ip_range = '1.2.0.2-1.2.255.254'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('1.2.0.2', ip)
+ self.assertEqual(16, mask)
+
+ def test__get_ip_and_mask_single(self):
+ ip_range = '192.168.1.10'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('192.168.1.10', ip)
+ self.assertIsNone(mask)
+
+ def test__get_fixed_and_mask_range(self):
+ fixed_mask = '8-48'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(8, fixed)
+ self.assertEqual(48, mask)
+
+ def test__get_fixed_and_mask_single(self):
+ fixed_mask = 1234
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(1234, fixed)
+ self.assertEqual(0, mask)
+
def test__get_ixia_traffic_profile_default_args(self):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
self.TRAFFIC_PROFILE)
@@ -543,87 +575,77 @@ class TestIXIARFC2544Profile(unittest.TestCase):
def test_get_drop_percentage_completed(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
def test_get_drop_percentage_over_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 0.05)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_under_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
@mock.patch.object(ixia_rfc2544.LOG, 'info')
def test_get_drop_percentage_not_flow(self, *args):
samples = {'iface_name_1':
- {'RxThroughput': 0, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 0},
+ {'in_packets': 1000, 'out_packets': 0},
'iface_name_2':
- {'RxThroughput': 0, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 0}
+ {'in_packets': 1005, 'out_packets': 0}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(0, samples['RxThroughput'])
+ self.assertEqual(0.0, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(100, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_first_run(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 1, first_run=True)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
new file mode 100644
index 000000000..08542b4f1
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class TestIXIARFC2544Profile(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._tp_config = {'traffic_profile': {}}
+ self._host = 'localhost'
+ self._port = '12345'
+ self.tp = pktgen.PktgenTrafficProfile(self._tp_config)
+ self.tp.init(self._host, self._port)
+ self._mock_send_socket_command = mock.patch.object(
+ utils, 'send_socket_command', return_value=0)
+ self.mock_send_socket_command = self._mock_send_socket_command.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_send_socket_command.stop()
+
+ def test_start(self):
+ self.tp.start()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.start("0")')
+
+ def test_stop(self):
+ self.tp.stop()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.stop("0")')
+
+ def test_rate(self):
+ rate = 75
+ self.tp.rate(rate)
+ command = 'pktgen.set("0", "rate", 75)'
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, command)
+
+ def test_clear_all_stats(self):
+ self.tp.clear_all_stats()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'clr')
+
+ def test_help(self):
+ self.tp.help()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'help')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index 0cf93f9ae..2e0331e8e 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -238,15 +238,17 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
'in_packets': 4040,
'latency': 'Latency2'}}
]
- output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ completed, output = rfc2544_profile.get_drop_percentage(
+ samples, 0, 0, False)
expected = {'DropPercentage': 0.3963,
'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
'RxThroughput': 312.5,
'TxThroughput': 304.5,
'CurrentDropPercentage': 0.3963,
- 'Rate': 100,
+ 'Rate': 100.0,
'Throughput': 312.5}
self.assertEqual(expected, output)
+ self.assertFalse(completed)
class PortPgIDMapTestCase(base.BaseUnitTestCase):
@@ -266,6 +268,7 @@ class PortPgIDMapTestCase(base.BaseUnitTestCase):
port_pg_id_map.increase_pg_id()
self.assertEqual([1, 2], port_pg_id_map.get_pg_ids(10))
self.assertEqual([3], port_pg_id_map.get_pg_ids(20))
+ self.assertEqual([], port_pg_id_map.get_pg_ids(30))
def test_increase_pg_id_no_port(self):
port_pg_id_map = rfc2544.PortPgIDMap()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index 01fc19aa0..69a5fb484 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -450,8 +450,8 @@ class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
# duplicate config and add invald action
acl_config = copy.deepcopy(self.ACL_CONFIG)
acl_config['access-list-entries'][0]["actions"].append({"xnat": {}})
- self.assertRaises(exceptions.AclUknownActionTemplate,
- setup_helper.get_flows_config, acl_config)
+ self.assertRaises(exceptions.AclUnknownActionTemplate,
+ setup_helper.get_flows_config, acl_config)
@mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
def test_get_flows_config_invalid_action_param(self, get_default_flows):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index c35d2db35..4a1d8c30e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1091,7 +1091,8 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertIs(client_resource_helper._connect(client), client)
@mock.patch.object(ClientResourceHelper, '_build_ports')
- @mock.patch.object(ClientResourceHelper, '_run_traffic_once')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once',
+ return_value=(True, mock.ANY))
def test_run_traffic(self, mock_run_traffic_once, mock_build_ports):
client_resource_helper = ClientResourceHelper(mock.Mock())
client = mock.Mock()
@@ -1103,7 +1104,7 @@ class TestClientResourceHelper(unittest.TestCase):
as mock_terminated:
mock_connect.return_value = client
type(mock_terminated).value = mock.PropertyMock(
- side_effect=[0, 1, lambda x: x])
+ side_effect=[0, 1, 1, lambda x: x])
client_resource_helper.run_traffic(traffic_profile, mq_producer)
mock_build_ports.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
new file mode 100644
index 000000000..d341b970b
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+
+import mock
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+from yardstick.network_services.vnf_generic.vnf import tg_pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class PktgenTrafficGenTestCase(ut_base.BaseUnitTestCase):
+
+ SERVICE_PORTS = [{'port': constants.LUA_PORT,
+ 'node_port': '34501'}]
+ VNFD = {'mgmt-interface': {'ip': '1.2.3.4',
+ 'service_ports': SERVICE_PORTS},
+ 'vdu': [{'external-interface': 'interface'}],
+ 'benchmark': {'kpi': 'fake_kpi'}
+ }
+
+ def setUp(self):
+ self._id = uuid.uuid1().int
+ self._mock_vnf_consumer = mock.patch.object(vnf_base,
+ 'GenericVNFConsumer')
+ self.mock_vnf_consumer = self._mock_vnf_consumer.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_vnf_consumer.stop()
+
+ def test__init(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ self.assertTrue(isinstance(tg, (vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint)))
+
+ def test_run_traffic(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ mock_tp = mock.Mock()
+ with mock.patch.object(tg, '_is_running', return_value=True):
+ tg.run_traffic(mock_tp)
+
+ mock_tp.init.assert_called_once_with(tg._node_ip, tg._lua_node_port)
+
+ def test__get_lua_node_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': constants.LUA_PORT,
+ 'node_port': '12345'}]
+ self.assertEqual(12345, tg._get_lua_node_port(service_ports))
+
+ def test__get_lua_node_port_no_lua_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': '333'}]
+ self.assertIsNone(tg._get_lua_node_port(service_ports))
+
+ def test__is_running(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile'):
+ self.assertTrue(tg._is_running())
+
+ def test__is_running_exception(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile') as mock_tp:
+ mock_tp.help.side_effect = exceptions.PktgenActionError()
+ self.assertFalse(tg._is_running())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index ddb63242e..ec0e6aa6d 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,6 +18,7 @@ import mock
import six
import unittest
+from yardstick.common import utils
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -57,6 +58,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
def test_run_traffic(self):
mock_tprofile = mock.Mock()
+ mock_tprofile.config.duration = 10
mock_tprofile.get_drop_percentage.return_value = True, 'fake_samples'
ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_rhelper.rfc_helper = mock.Mock()
@@ -64,7 +66,8 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
with mock.patch.object(ixia_rhelper, 'generate_samples'), \
mock.patch.object(ixia_rhelper, '_build_ports'), \
- mock.patch.object(ixia_rhelper, '_initialize_client'):
+ mock.patch.object(ixia_rhelper, '_initialize_client'), \
+ mock.patch.object(utils, 'wait_until_true'):
ixia_rhelper.run_traffic(mock_tprofile)
self.assertEqual('fake_samples', ixia_rhelper._queue.get())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 6aba41006..a5b9f258e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -30,13 +30,14 @@ class TestTrexRfcResouceHelper(unittest.TestCase):
mock_traffic_profile.config.duration = 3
mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
'port_pg_id_map')
- mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ mock_traffic_profile.get_drop_percentage.return_value = (True,
+ 'percentage')
rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
rfc_rh.TRANSIENT_PERIOD = 0
rfc_rh.rfc2544_helper = mock.Mock()
with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
- rfc_rh._run_traffic_once(mock_traffic_profile)
+ self.assertTrue(rfc_rh._run_traffic_once(mock_traffic_profile))
mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)
diff --git a/yardstick/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py
index a73a4a132..2d5c4a26f 100644
--- a/yardstick/tests/unit/orchestrator/test_kubernetes.py
+++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py
@@ -529,9 +529,10 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._version = 'version'
net_obj._plural = 'plural'
net_obj._template = 'template'
+ net_obj._name = 'fake_name'
net_obj.create()
mock_create_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'template')
+ 'scope', 'group', 'version', 'plural', 'template', 'fake_name')
@mock.patch.object(kubernetes_utils, 'delete_network')
def test_delete(self, mock_delete_network):
@@ -543,7 +544,7 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._name = 'name'
net_obj.delete()
mock_delete_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'name')
+ 'scope', 'group', 'version', 'plural', 'name', skip_codes=[404])
class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
@@ -611,7 +612,8 @@ class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
def test_delete(self, mock_delete_service):
nodeport_object = kubernetes.ServiceNodePortObject('fake_name')
nodeport_object.delete()
- mock_delete_service.assert_called_once_with('fake_name-service')
+ mock_delete_service.assert_called_once_with('fake_name-service',
+ skip_codes=[404])
class KubernetesTemplate(base.BaseUnitTestCase):
diff --git a/yardstick/tests/unit/service/test_environment.py b/yardstick/tests/unit/service/test_environment.py
index be4882e30..779e6eaa0 100644
--- a/yardstick/tests/unit/service/test_environment.py
+++ b/yardstick/tests/unit/service/test_environment.py
@@ -9,9 +9,8 @@
import mock
-from yardstick.common.exceptions import UnsupportedPodFormatError
-from yardstick.service.environment import Environment
-from yardstick.service.environment import AnsibleCommon
+from yardstick.common import exceptions
+from yardstick.service import environment
from yardstick.tests.unit import base as ut_base
@@ -31,15 +30,17 @@ class EnvironmentTestCase(ut_base.BaseUnitTestCase):
]
}
- with mock.patch.object(AnsibleCommon, 'gen_inventory_ini_dict'), \
- mock.patch.object(AnsibleCommon, 'get_sut_info',
- return_value={'node1': {}}):
- env = Environment(pod=pod_info)
+ with mock.patch.object(environment.AnsibleCommon,
+ 'gen_inventory_ini_dict'), \
+ mock.patch.object(environment.AnsibleCommon, 'get_sut_info',
+ return_value={'node1': {}}), \
+ mock.patch.object(environment.Environment, '_format_sut_info'):
+ env = environment.Environment(pod=pod_info)
env.get_sut_info()
def test_get_sut_info_pod_str(self):
pod_info = 'nodes'
- env = Environment(pod=pod_info)
- with self.assertRaises(UnsupportedPodFormatError):
+ env = environment.Environment(pod=pod_info)
+ with self.assertRaises(exceptions.UnsupportedPodFormatError):
env.get_sut_info()
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index b727e821d..71929f1a2 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -617,3 +617,26 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put_file('a', 'b')
mock_put_sftp.assert_called_once()
+
+ def test_execute(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(return_value=0)
+ exit_code, _, _ = auto_connect_ssh.execute('')
+ self.assertEqual(exit_code, 0)
+
+ def _mock_run(self, *args, **kwargs):
+ if args[0] == 'ls':
+ if kwargs.get('raise_on_error'):
+ raise exceptions.SSHError(error_msg='Command error')
+ return 1
+ return 0
+
+ def test_execute_command_error(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(side_effect=self._mock_run)
+ self.assertRaises(exceptions.SSHError, auto_connect_ssh.execute, 'ls',
+ raise_on_error=True)
+ exit_code, _, _ = auto_connect_ssh.execute('ls')
+ self.assertNotEqual(exit_code, 0)