aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc074.rst72
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg4
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg4
-rw-r--r--samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg2
-rw-r--r--samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg2
-rw-r--r--samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua99
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml2
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml1
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml18
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py1
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py6
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py92
-rw-r--r--yardstick/common/exceptions.py4
-rw-r--r--yardstick/common/utils.py22
-rw-r--r--yardstick/network_services/traffic_profile/base.py1
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py8
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py13
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py46
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py36
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py340
-rw-r--r--yardstick/tests/unit/common/test_utils.py16
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py50
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py15
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py5
36 files changed, 722 insertions, 169 deletions
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
index 92cd51439..261a8bd95 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
@@ -19,16 +19,27 @@ Yardstick Test Case Description TC074
|metric | Storage performance |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Storperf integration with yardstick. The purpose of StorPerf |
-| | is to provide a tool to measure block and object storage |
-| | performance in an NFVI. When complemented with a |
-| | characterization of typical VF storage performance |
-| | requirements, it can provide pass/fail thresholds for test, |
-| | staging, and production NFVI environments. |
-| | |
-| | The benchmarks developed for block and object storage will |
-| | be sufficiently varied to provide a good preview of expected |
-| | storage performance behavior for any type of VNF workload. |
+|test purpose | To evaluate and report on the Cinder volume performance. |
+| | |
+| | This testcase integrates with OPNFV StorPerf to measure |
+| | block performance of the underlying Cinder drivers. Many |
+| | options are supported, and even the root disk (Glance |
+| | ephemeral storage can be profiled. |
+| | |
+| | The fundamental concept of the test case is to first fill |
+| | the volumes with random data to ensure reported metrics |
+| | are indicative of continued usage and not skewed by |
+| | transitional performance while the underlying storage |
+| | driver allocates blocks. |
+| | The metrics for filling the volumes with random data |
+| | are not reported in the final results. The test also |
+| | ensures the volumes are performing at a consistent level |
+| | of performance by measuring metrics every minute, and |
+| | comparing the trend of the metrics over the run. By |
+| | evaluating the min and max values, as well as the slope of |
+| | the trend, it can make the determination that the metrics |
+| | are stable, and not fluctuating beyond industry standard |
+| | norms. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc074.yaml |
@@ -38,7 +49,8 @@ Yardstick Test Case Description TC074
| | * public_network: "ext-net" - name of public network |
| | * volume_size: 2 - cinder volume size |
| | * block_sizes: "4096" - data block size |
-| | * queue_depths: "4" |
+| | * queue_depths: "4" - the number of simultaneous I/Os |
+| | to perform at all times |
| | * StorPerf_ip: "192.168.200.2" |
| | * query_interval: 10 - state query interval |
| | * timeout: 600 - maximum allowed job time |
@@ -50,7 +62,11 @@ Yardstick Test Case Description TC074
| | performance in an NFVI. |
| | |
| | StorPerf is delivered as a Docker container from |
-| | https://hub.docker.com/r/opnfv/storperf/tags/. |
+| | https://hub.docker.com/r/opnfv/storperf-master/tags/. |
+| | |
+| | The underlying tool used is FIO, and StorPerf supports |
+| | any FIO option in order to tailor the test to the exact |
+| | workload needed. |
| | |
+--------------+--------------------------------------------------------------+
|references | Storperf_ |
@@ -80,9 +96,17 @@ Yardstick Test Case Description TC074
| | - rr: 100% Read, random access |
| | - wr: 100% Write, random access |
| | - rw: 70% Read / 30% write, random access |
-| | * nossd: Do not perform SSD style preconditioning. |
-| | * nowarm: Do not perform a warmup prior to |
| | measurements. |
+| | * workloads={json maps} |
+| | This parameter supercedes the workload and calls the V2.0 |
+| | API in StorPerf. It allows for greater control of the |
+| | parameters to be passed to FIO. For example, running a |
+| | random read/write with a mix of 90% read and 10% write |
+| | would be expressed as follows: |
+| | {"9010randrw": {"rw":"randrw","rwmixread": "90"}} |
+| | Note: This must be passed in as a string, so don't forget |
+| | to escape or otherwise properly deal with the quotes. |
+| | |
| | * report= [job_id] |
| | Query the status of the supplied job_id and report on |
| | metrics. If a workload is supplied, will report on only |
@@ -92,8 +116,7 @@ Yardstick Test Case Description TC074
| | |
+--------------+--------------------------------------------------------------+
|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions | need to add one. A key pair for launching agents is also |
-| | required. |
+|conditions | need to add one. |
| | |
| | Storperf is required to be installed in the environment. |
| | There are two possible methods for Storperf installation: |
@@ -126,10 +149,21 @@ Yardstick Test Case Description TC074
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored |
-| | in glance. TC is invoked and logs are produced and stored. |
+|step 1 | Yardstick calls StorPerf to create the heat stack with the |
+| | number of VMs and size of Cinder volumes specified. The |
+| | VMs will be on their own private subnet, and take floating |
+| | IP addresses from the specified public network. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick calls StorPerf to fill all the volumes with |
+| | random data. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Yardstick calls StorPerf to perform the series of tests |
+| | specified by the workload, queue depths and block sizes. |
| | |
-| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 4 | Yardstick calls StorPerf to delete the stack it created. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | None. Storage performance results are fetched and stored. |
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
index a70ea658b..60f21bd70 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
@@ -94,7 +94,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
@@ -113,7 +113,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
index a70ea658b..60f21bd70 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
@@ -94,7 +94,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
@@ -113,7 +113,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
diff --git a/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg b/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
index 7d350bd91..c191d29d5 100644
--- a/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
@@ -14,7 +14,7 @@
#
[lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
user_table = dofile("gre_table.lua")
[eal options]
diff --git a/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg b/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
index f65b7cbf9..b873fb9af 100644
--- a/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
@@ -14,7 +14,7 @@
#
[lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
user_table = dofile("gre_table.lua")
dscp_table = dofile("dscp.lua")
diff --git a/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua b/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua
new file mode 100644
index 000000000..22697b06a
--- /dev/null
+++ b/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua
@@ -0,0 +1,99 @@
+-- Copyright (c) 2016-2017 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+require("parameters")
+
+local lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac(tester_mac1), mpls = 0x112},
+ {id = 1, port_id = 1, ip = ip("2.1.1.1"), mac = mac(tester_mac1), mpls = 0x212},
+ {id = 2, port_id = 0, ip = ip("3.1.1.1"), mac = mac(tester_mac3), mpls = 0x312},
+ {id = 3, port_id = 1, ip = ip("4.1.1.1"), mac = mac(tester_mac3), mpls = 0x412},
+ {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac(tester_mac1), mpls = 0x512},
+ {id = 5, port_id = 1, ip = ip("6.1.1.1"), mac = mac(tester_mac1), mpls = 0x612},
+ {id = 6, port_id = 0, ip = ip("7.1.1.1"), mac = mac(tester_mac3), mpls = 0x712},
+ {id = 7, port_id = 1, ip = ip("8.1.1.1"), mac = mac(tester_mac3), mpls = 0x812},
+ {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac(tester_mac1), mpls = 0x912},
+ {id = 9, port_id = 1, ip = ip("10.1.1.1"), mac = mac(tester_mac1), mpls = 0x1012},
+ {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac(tester_mac3), mpls = 0x1112},
+ {id = 11, port_id = 1, ip = ip("12.1.1.1"), mac = mac(tester_mac3), mpls = 0x1212},
+ {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac(tester_mac1), mpls = 0x1312},
+ {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac(tester_mac1), mpls = 0x1412},
+ {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac(tester_mac3), mpls = 0x1512},
+ {id = 15, port_id = 1, ip = ip("16.1.1.1"), mac = mac(tester_mac3), mpls = 0x1612},
+ {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac(tester_mac1), mpls = 0x1712},
+ {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac(tester_mac1), mpls = 0x1812},
+ {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac(tester_mac3), mpls = 0x1912},
+ {id = 19, port_id = 1, ip = ip("20.1.1.1"), mac = mac(tester_mac3), mpls = 0x2012},
+ {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac(tester_mac1), mpls = 0x2112},
+ {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac(tester_mac1), mpls = 0x2212},
+ {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac(tester_mac3), mpls = 0x2312},
+ {id = 23, port_id = 1, ip = ip("24.1.1.1"), mac = mac(tester_mac3), mpls = 0x2412},
+ {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac(tester_mac1), mpls = 0x2512},
+ {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac(tester_mac1), mpls = 0x2612},
+ {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac(tester_mac3), mpls = 0x2712},
+ {id = 27, port_id = 1, ip = ip("28.1.1.1"), mac = mac(tester_mac3), mpls = 0x2812},
+ {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac(tester_mac1), mpls = 0x2912},
+ {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac(tester_mac1), mpls = 0x3012},
+ {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac(tester_mac3), mpls = 0x3112},
+ {id = 31, port_id = 1, ip = ip("32.1.1.1"), mac = mac(tester_mac3), mpls = 0x3212},
+ {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac(tester_mac1), mpls = 0x3312},
+ {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac(tester_mac1), mpls = 0x3412},
+ {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac(tester_mac3), mpls = 0x3512},
+ {id = 35, port_id = 1, ip = ip("36.1.1.1"), mac = mac(tester_mac3), mpls = 0x3612},
+ {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac(tester_mac1), mpls = 0x3712},
+ {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac(tester_mac1), mpls = 0x3812},
+ {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac(tester_mac3), mpls = 0x3912},
+ {id = 39, port_id = 1, ip = ip("40.1.1.1"), mac = mac(tester_mac3), mpls = 0x4012},
+ {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac(tester_mac1), mpls = 0x4112},
+ {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac(tester_mac1), mpls = 0x4212},
+ {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac(tester_mac3), mpls = 0x4312},
+ {id = 43, port_id = 1, ip = ip("44.1.1.1"), mac = mac(tester_mac3), mpls = 0x4412},
+ {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac(tester_mac1), mpls = 0x4512},
+ {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac(tester_mac1), mpls = 0x4612},
+ {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac(tester_mac3), mpls = 0x4712},
+ {id = 47, port_id = 1, ip = ip("48.1.1.1"), mac = mac(tester_mac3), mpls = 0x4812},
+ {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac(tester_mac1), mpls = 0x4912},
+ {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac(tester_mac1), mpls = 0x5012},
+ {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac(tester_mac3), mpls = 0x5112},
+ {id = 51, port_id = 1, ip = ip("52.1.1.1"), mac = mac(tester_mac3), mpls = 0x5212},
+ {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac(tester_mac1), mpls = 0x5312},
+ {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac(tester_mac1), mpls = 0x5412},
+ {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac(tester_mac3), mpls = 0x5512},
+ {id = 55, port_id = 1, ip = ip("56.1.1.1"), mac = mac(tester_mac3), mpls = 0x5612},
+ {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac(tester_mac1), mpls = 0x5712},
+ {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac(tester_mac1), mpls = 0x5812},
+ {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac(tester_mac3), mpls = 0x5912},
+ {id = 59, port_id = 1, ip = ip("60.1.1.1"), mac = mac(tester_mac3), mpls = 0x6012},
+ {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac(tester_mac1), mpls = 0x6112},
+ {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac(tester_mac1), mpls = 0x6212},
+ {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac(tester_mac3), mpls = 0x6312},
+ {id = 63, port_id = 1, ip = ip("64.1.1.1"), mac = mac(tester_mac3), mpls = 0x6412},
+}
+
+lpm4.routes = {};
+
+base_ip = 10 * 2^24;
+
+for i = 1,2^13 do
+ res = ip(base_ip + (1 * 2^12) * (i - 1));
+
+ lpm4.routes[i] = {
+ cidr = {ip = res, depth = 24},
+ next_hop_id = (i - 1) % 64,
+ }
+end
+
+return lpm4
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
index 1711c561a..f86913941 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
prox_generate_parameter: True
tg__0:
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
index a7d2d3846..707fc1d30 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
"configs/dscp.lua" : ""
prox_generate_parameter: True
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
index e4cd546bc..d580bd8cc 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
prox_generate_parameter: True
tg__0:
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
index 60002f0b1..7f447b164 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
"configs/dscp.lua" : ""
prox_generate_parameter: True
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
index 98b1bf96d..c1acb69a4 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
index ee0415371..54f42b2bc 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
index 19f083646..06fb220da 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
index 95fa0b6d8..f6a12eb31 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
index c267e7677..194bcd978 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
@@ -43,6 +43,7 @@ traffic_profile:
traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index 507491446..906793740 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -29,6 +29,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 3cbd7cd62..6e2f8ec7c 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -29,6 +29,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index edff3612e..cfc5f1ea3 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -43,6 +43,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
injection_time: {{ injection_time }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index fe8423d25..d08dbaa6e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -15,20 +15,30 @@ description: >
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
+{% set workload = workload or "" %}
+{% set workloads = workloads or "" %}
+{% set agent_count = agent_count or 1 %}
+{% set block_sizes = block_sizes or "4096" %}
+{% set queue_depths = queue_depths or "4" %}
+{% set steady_state_samples = steady_state_samples or 10 %}
+{% set volume_size = volume_size or 4 %}
scenarios:
-
type: StorPerf
options:
- agent_count: 1
+ agent_count: {{agent_count}}
agent_image: "Ubuntu-16.04"
agent_flavor: "storperf"
public_network: {{public_network}}
- volume_size: 4
- block_sizes: "4096"
- queue_depths: "4"
+ volume_size: {{volume_size}}
+ block_sizes: {{block_sizes}}
+ queue_depths: {{queue_depths}}
StorPerf_ip: {{StorPerf_ip}}
query_interval: 10
timeout: 300
+ workload: {{workload}}
+ workloads: {{workloads}}
+ steady_state_samples: {{steady_state_samples}}
runner:
type: Iteration
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 53abd586b..4c79a4931 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -34,6 +34,8 @@ class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
+ # baremetal down need to recover even sla pass
+ self.mandatory = True
LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d67a16b98..7871cc918 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -63,6 +63,7 @@ class BaseAttacker(object):
self.data = {}
self.setup_done = False
self.intermediate_variables = {}
+ self.mandatory = False
@staticmethod
def get_attacker_cls(attacker_cfg):
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 7f976fdbc..fdfe7cbbe 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -88,9 +88,9 @@ class ServiceHA(base.Scenario):
def teardown(self):
"""scenario teardown"""
- # only recover when sla not pass
- if not self.sla_pass:
- for attacker in self.attackers:
+ # recover when mandatory or sla not pass
+ for attacker in self.attackers:
+ if attacker.mandatory or not self.sla_pass:
attacker.recover()
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..8093cd2d2 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,10 +74,13 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
@@ -111,9 +109,13 @@ class StorPerf(base.Scenario):
elif setup_res.status_code == 200:
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
@@ -149,7 +151,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +165,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,8 +175,16 @@ class StorPerf(base.Scenario):
except KeyError:
pass
+ api_version = "v1.0"
+
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version),
json=job_args)
job_res_content = jsonutils.loads(job_res.content)
@@ -187,15 +200,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -216,14 +220,46 @@ class StorPerf(base.Scenario):
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+ job_res_content = jsonutils.loads(job_res.content)
+
+ if job_res.status_code != 200:
+ raise RuntimeError(
+ "Failed to start initialization job, error message:",
+ job_res_content["message"])
+ elif job_res.status_code == 200:
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index b39a0af9c..10c1f3f27 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -79,6 +79,10 @@ class FunctionNotImplemented(YardstickException):
'"%(class_name)" class.')
+class InvalidType(YardstickException):
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
class InfluxDBConfigurationMissing(YardstickException):
message = ('InfluxDB configuration is not available. Add "influxdb" as '
'a dispatcher and the configuration section')
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index c019cd264..31885c073 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -21,6 +21,7 @@ import importlib
import ipaddress
import logging
import os
+import pydoc
import random
import re
import signal
@@ -578,3 +579,24 @@ def send_socket_command(host, port, command):
finally:
sock.close()
return ret
+
+
+def safe_cast(value, type_to_convert, default_value):
+ """Convert value to type, in case of error return default_value
+
+ :param value: value to convert
+ :param type_to_convert: type to convert, could be "type" or "string"
+ :param default_value: default value to return
+ :return: converted value or default_value
+ """
+ if isinstance(type_to_convert, type):
+ _type = type_to_convert
+ else:
+ _type = pydoc.locate(type_to_convert)
+ if not _type:
+ raise exceptions.InvalidType(type_to_convert=type_to_convert)
+
+ try:
+ return _type(value)
+ except ValueError:
+ return default_value
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index a8f950b7b..4fbceea9b 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -44,6 +44,7 @@ class TrafficProfileConfig(object):
self.lower_bound = tprofile.get('lower_bound')
self.upper_bound = tprofile.get('upper_bound')
self.step_interval = tprofile.get('step_interval')
+ self.enable_latency = tprofile.get('enable_latency', False)
def _parse_rate(self, rate):
"""Parse traffic profile rate
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 26dc1fe04..760b1e8d3 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -168,12 +168,8 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['out_packets'] for iface in samples])
- rx_throughput = sum(
- [samples[iface]['RxThroughput'] for iface in samples])
- rx_throughput = round(float(rx_throughput), 2)
- tx_throughput = sum(
- [samples[iface]['TxThroughput'] for iface in samples])
- tx_throughput = round(float(tx_throughput), 2)
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
packet_drop = abs(out_packets_sum - in_packets_sum)
try:
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 898315671..987029373 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -118,7 +118,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
ports.append(port_num)
port_pg_id.add_port(port_num)
profile = self._create_profile(profile_data,
- self.rate, port_pg_id)
+ self.rate, port_pg_id,
+ self.config.enable_latency)
self.generator.client.add_streams(profile, ports=[port_num])
self.generator.client.start(ports=ports,
@@ -126,7 +127,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
force=True)
return ports, port_pg_id
- def _create_profile(self, profile_data, rate, port_pg_id):
+ def _create_profile(self, profile_data, rate, port_pg_id, enable_latency):
"""Create a STL profile (list of streams) for a port"""
streams = []
for packet_name in profile_data:
@@ -134,7 +135,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
get('outer_l2', {}).get('framesize'))
imix_data = self._create_imix_data(imix)
self._create_vm(profile_data[packet_name])
- _streams = self._create_streams(imix_data, rate, port_pg_id)
+ _streams = self._create_streams(imix_data, rate, port_pg_id,
+ enable_latency)
streams.extend(_streams)
return trex_stl_streams.STLProfile(streams)
@@ -213,7 +215,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
return trex_stl_packet_builder_scapy.STLPktBuilder(
pkt=base_pkt / pad, vm=self.trex_vm)
- def _create_streams(self, imix_data, rate, port_pg_id):
+ def _create_streams(self, imix_data, rate, port_pg_id, enable_latency):
"""Create a list of streams per packet size
The STL TX mode speed of the generated streams will depend on the frame
@@ -237,7 +239,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
in imix_data.items() if float(weight) > 0):
packet = self._create_single_packet(size)
pg_id = port_pg_id.increase_pg_id()
- stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ stl_flow = (trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) if
+ enable_latency else None)
mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
streams.append(trex_stl_client.STLStream(
packet=packet, flow_stats=stl_flow, mode=mode))
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 4d3bc2ce5..94ab06980 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -60,7 +60,7 @@ class IxiaResourceHelper(ClientResourceHelper):
def stop_collect(self):
self._terminated.value = 1
- def generate_samples(self, ports, key=None):
+ def generate_samples(self, ports, duration):
stats = self.get_stats()
samples = {}
@@ -70,27 +70,23 @@ class IxiaResourceHelper(ClientResourceHelper):
try:
# reverse lookup port name from port_num so the stats dict is descriptive
intf = self.vnfd_helper.find_interface_by_port(port_num)
- port_name = intf["name"]
+ port_name = intf['name']
+ avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+ min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+ max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
samples[port_name] = {
- "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
- "out_packets": int(stats["Frames_Tx"][port_num]),
- # NOTE(ralonsoh): we need to make the traffic injection
- # time variable.
- "RxThroughput": int(stats["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(stats["Frames_Tx"][port_num]) / 30,
+ 'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]),
+ 'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]),
+ 'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]),
+ 'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]),
+ 'in_packets': int(stats['Valid_Frames_Rx'][port_num]),
+ 'out_packets': int(stats['Frames_Tx'][port_num]),
+ 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+ 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+ 'Store-Forward_Avg_latency_ns': utils.safe_cast(avg_latency, int, 0),
+ 'Store-Forward_Min_latency_ns': utils.safe_cast(min_latency, int, 0),
+ 'Store-Forward_Max_latency_ns': utils.safe_cast(max_latency, int, 0)
}
- if key:
- avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
- max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
- samples[port_name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
except IndexError:
pass
@@ -129,13 +125,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self, self.client, mac)
self.client_started.value = 1
# pylint: disable=unnecessary-lambda
- utils.wait_until_true(lambda: self.client.is_traffic_stopped())
- samples = self.generate_samples(traffic_profile.ports)
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
- # NOTE(ralonsoh): the traffic injection duration is fixed to 30
- # seconds. This parameter is configurable and must be retrieved
- # from the traffic_profile.full_profile information.
- # Every flow must have the same duration.
completed, samples = traffic_profile.get_drop_percentage(
samples, min_tol, max_tol, first_run=first_run)
self._queue.put(samples)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
new file mode 100644
index 000000000..74f86983b
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
+
+class BaseAttackerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.attacker_cfg = {
+ 'fault_type': 'test-attacker',
+ 'action_parameter': {'process_name': 'nova_api'},
+ 'rollback_parameter': {'process_name': 'nova_api'},
+ 'key': 'stop-service',
+ 'attack_key': 'stop-service',
+ 'host': 'node1',
+ }
+ self.base_attacker = baseattacker.BaseAttacker({}, {})
+
+ def test__init__(self):
+ self.assertEqual(self.base_attacker.data, {})
+ self.assertFalse(self.base_attacker.mandatory)
+ self.assertEqual(self.base_attacker.intermediate_variables, {})
+ self.assertFalse(self.base_attacker.mandatory)
+
+ def test_get_attacker_cls(self):
+ with self.assertRaises(RuntimeError):
+ baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index ec0e5973c..d61fa67c7 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -109,6 +109,23 @@ class ServicehaTestCase(unittest.TestCase):
ret = {}
p.run(ret)
attacker = mock.Mock()
+ attacker.mandatory = False
p.attackers = [attacker]
p.teardown()
attacker.recover.assert_not_called()
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_teardown_when_mandatory(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ p.setup()
+ self.assertTrue(p.setup_done)
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+ ret = {}
+ p.run(ret)
+ attacker = mock.Mock()
+ attacker.mandatory = True
+ p.attackers = [attacker]
+ p.teardown()
+ attacker.recover.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 5844746ab..2ba53cb93 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,18 +11,18 @@
from __future__ import absolute_import
+import json
import unittest
import mock
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios.storage import storperf
# pylint: disable=unused-argument
# disable this for now because I keep forgetting mock patch arg ordering
-
-
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost(object):
@@ -32,10 +32,24 @@ def mocked_requests_config_post(*args, **kwargs):
return MockResponseConfigPost(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "false"}',
+ '"stack_created": false}',
200)
+def mocked_requests_config_post_fail(*args, **kwargs):
+ class MockResponseConfigPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigPost(
+ '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+ 'Error validating value \'foo\': Unable to find network with ' +
+ 'name or id \'foo\'"}',
+ 400)
+
+
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet(object):
@@ -45,10 +59,47 @@ def mocked_requests_config_get(*args, **kwargs):
return MockResponseConfigGet(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "true"}',
+ '"stack_created": true}',
200)
+def mocked_requests_config_get_not_created(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{"stack_id": "",'
+ '"stack_created": false}',
+ 200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{}',
+ 200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet(object):
@@ -73,6 +124,18 @@ def mocked_requests_job_post(*args, **kwargs):
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
+def mocked_requests_job_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete(object):
@@ -100,10 +163,7 @@ def mocked_requests_delete_failed(*args, **kwargs):
self.json_data = json_data
self.status_code = status_code
- if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
- return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
- return MockResponseDeleteFailed('{}', 404)
+ return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
class StorPerfTestCase(unittest.TestCase):
@@ -119,11 +179,14 @@ class StorPerfTestCase(unittest.TestCase):
self.result = {}
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_config_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_config_get)
- def test_successful_setup(self, mock_post, mock_get):
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup(self, mock_get, mock_post):
+ mock_post.side_effect = [mocked_requests_config_post(),
+ mocked_requests_job_post()]
+ mock_get.side_effect = [mocked_requests_config_get(),
+ mocked_requests_job_get()]
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -146,14 +209,47 @@ class StorPerfTestCase(unittest.TestCase):
self.assertTrue(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_job_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_job_get)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
- def test_successful_run(self, mock_post, mock_get, mock_delete):
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_unsuccessful(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_not_created
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_no_payload(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_no_payload
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup_config_post_failed(self, mock_get, mock_post):
+ mock_post.side_effect = mocked_requests_config_post_fail
+
+ args = {
+ "options": {
+ "public_network": "foo"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.setup)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v1_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -165,6 +261,74 @@ class StorPerfTestCase(unittest.TestCase):
"query_interval": 0,
"timeout": 60
}
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ "workload": "rs",
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ sample_output = '{"Status": "Completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+ expected_result = jsonutils.loads(sample_output)
+
+ s.run(self.result)
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/jobs',
+ json=jsonutils.loads(json.dumps(expected_post)))
+
+ self.assertEqual(self.result, expected_result)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v2_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
args = {
"options": options
@@ -179,13 +343,126 @@ class StorPerfTestCase(unittest.TestCase):
expected_result = jsonutils.loads(sample_output)
s.run(self.result)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
self.assertEqual(self.result, expected_result)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete)
- def test_successful_teardown(self, mock_delete):
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ self.assertRaises(RuntimeError, s.run, self.ctx)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(storperf.StorPerf, 'setup')
+ def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ 'timeout': 60,
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.run(self.result)
+
+ mock_setup.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.initialize_disks()
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_initialize_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.initialize_disks)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch.object(requests, 'delete')
+ def test_teardown(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_job_delete
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -207,11 +484,12 @@ class StorPerfTestCase(unittest.TestCase):
s.teardown()
self.assertFalse(s.setup_done)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete_failed)
- def test_failed_teardown(self, mock_delete):
+ @mock.patch.object(requests, 'delete')
+ def test_teardown_request_delete_failed(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_delete_failed
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -230,4 +508,6 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
- self.assertRaises(AssertionError, s.teardown(), self.result)
+ self.assertRaises(RuntimeError, s.teardown)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index ef4142148..3cf6c4d05 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -1391,3 +1391,19 @@ class GetPortIPTestCase(unittest.TestCase):
def test_return_value(self):
self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
+
+
+class SafeCaseTestCase(unittest.TestCase):
+
+ def test_correct_type_int(self):
+ self.assertEqual(35, utils.safe_cast('35', int, 0))
+
+ def test_correct_int_as_string(self):
+ self.assertEqual(25, utils.safe_cast('25', 'int', 0))
+
+ def test_incorrect_type_as_string(self):
+ with self.assertRaises(exceptions.InvalidType):
+ utils.safe_cast('100', 'intt', 0)
+
+ def test_default_value(self):
+ self.assertEqual(0, utils.safe_cast('', 'int', 0))
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 6f76eb77c..27ab4607b 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -575,87 +575,77 @@ class TestIXIARFC2544Profile(unittest.TestCase):
def test_get_drop_percentage_completed(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
def test_get_drop_percentage_over_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 0.05)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_under_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
@mock.patch.object(ixia_rfc2544.LOG, 'info')
def test_get_drop_percentage_not_flow(self, *args):
samples = {'iface_name_1':
- {'RxThroughput': 0, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 0},
+ {'in_packets': 1000, 'out_packets': 0},
'iface_name_2':
- {'RxThroughput': 0, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 0}
+ {'in_packets': 1005, 'out_packets': 0}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(0, samples['RxThroughput'])
+ self.assertEqual(0.0, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(100, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_first_run(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 1, first_run=True)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index d0ad77110..cfeebaa3a 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -103,10 +103,10 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
mock_create_profile:
rfc2544_profile.execute_traffic(traffic_generator=mock_generator)
mock_create_profile.assert_has_calls([
- mock.call('profile1', rfc2544_profile.rate, mock.ANY),
- mock.call('profile1', rfc2544_profile.rate, mock.ANY),
- mock.call('profile2', rfc2544_profile.rate, mock.ANY),
- mock.call('profile2', rfc2544_profile.rate, mock.ANY)])
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY, False)])
mock_generator.client.add_streams.assert_has_calls([
mock.call(mock.ANY, ports=[10]),
mock.call(mock.ANY, ports=[20]),
@@ -130,13 +130,14 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
mock_create_streams:
mock_create_imix.return_value = 'imix_data'
mock_create_streams.return_value = ['stream1']
- rfc2544_profile._create_profile(profile_data, rate, port_pg_id)
+ rfc2544_profile._create_profile(profile_data, rate, port_pg_id,
+ True)
mock_create_imix.assert_called_once_with('imix_info')
mock_create_vm.assert_called_once_with(
{'outer_l2': {'framesize': 'imix_info'}})
mock_create_streams.assert_called_once_with('imix_data', 100,
- port_pg_id)
+ port_pg_id, True)
mock_stl_profile.assert_called_once_with(['stream1'])
def test__create_imix_data(self):
@@ -209,7 +210,7 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
with mock.patch.object(rfc2544_profile, '_create_single_packet'):
output = rfc2544_profile._create_streams(imix_data, rate,
- port_pg_id)
+ port_pg_id, True)
self.assertEqual(['stream1', 'stream2'], output)
mock_latency.assert_has_calls([
mock.call(pg_id=1), mock.call(pg_id=2)])
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index ddb63242e..ec0e6aa6d 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,6 +18,7 @@ import mock
import six
import unittest
+from yardstick.common import utils
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -57,6 +58,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
def test_run_traffic(self):
mock_tprofile = mock.Mock()
+ mock_tprofile.config.duration = 10
mock_tprofile.get_drop_percentage.return_value = True, 'fake_samples'
ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_rhelper.rfc_helper = mock.Mock()
@@ -64,7 +66,8 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
with mock.patch.object(ixia_rhelper, 'generate_samples'), \
mock.patch.object(ixia_rhelper, '_build_ports'), \
- mock.patch.object(ixia_rhelper, '_initialize_client'):
+ mock.patch.object(ixia_rhelper, '_initialize_client'), \
+ mock.patch.object(utils, 'wait_until_true'):
ixia_rhelper.run_traffic(mock_tprofile)
self.assertEqual('fake_samples', ixia_rhelper._queue.get())