summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/install-inventory.ini30
-rw-r--r--ansible/install.yaml73
-rw-r--r--ansible/roles/configure_nginx/tasks/main.yml2
-rw-r--r--ansible/roles/install_yardstick/tasks/regular_install.yml3
-rw-r--r--ansible/yardstick-install-inventory.ini20
-rw-r--r--docs/release/results/euphrates_fraser_comparison.rst602
-rw-r--r--docs/release/results/euphrates_fraser_comparsion.rst8
-rw-r--r--docs/release/results/images/tc002_pod_fraser.pngbin0 -> 22935 bytes
-rw-r--r--docs/release/results/images/tc002_scenario_fraser.pngbin0 -> 25892 bytes
-rw-r--r--docs/release/results/images/tc010_pod_fraser.pngbin0 -> 37525 bytes
-rw-r--r--docs/release/results/images/tc010_scenario_fraser.pngbin0 -> 45190 bytes
-rw-r--r--docs/release/results/images/tc011_pod_fraser.pngbin0 -> 25593 bytes
-rw-r--r--docs/release/results/images/tc011_scenario_fraser.pngbin0 -> 28152 bytes
-rw-r--r--docs/release/results/images/tc012_pod_fraser.pngbin0 -> 36168 bytes
-rw-r--r--docs/release/results/images/tc012_scenario_fraser.pngbin0 -> 41289 bytes
-rw-r--r--docs/release/results/images/tc014_pod_fraseer.pngbin0 -> 29513 bytes
-rw-r--r--docs/release/results/images/tc014_scenario_fraser.pngbin0 -> 36018 bytes
-rw-r--r--docs/release/results/images/tc069_pod_fraser.pngbin0 -> 36041 bytes
-rw-r--r--docs/release/results/images/tc069_scenario_fraser.pngbin0 -> 43834 bytes
-rw-r--r--docs/release/results/images/tc082_pod_fraser.pngbin0 -> 25078 bytes
-rw-r--r--docs/release/results/images/tc083_pod_fraser.pngbin0 -> 25476 bytes
-rw-r--r--docs/release/results/index.rst2
-rw-r--r--docs/release/results/results.rst2
-rw-r--r--docs/release/results/tc002-network-latency.rst208
-rw-r--r--docs/release/results/tc010-memory-read-latency.rst211
-rw-r--r--docs/release/results/tc011-packet-delay-variation.rst170
-rw-r--r--docs/release/results/tc012-memory-read-write-bandwidth.rst214
-rw-r--r--docs/release/results/tc014-cpu-processing-speed.rst214
-rw-r--r--docs/release/results/tc069-memory-write-bandwidth.rst216
-rw-r--r--docs/release/results/tc082-context-switches-under-load.rst66
-rw-r--r--docs/release/results/tc083-network-throughput-between-vm.rst66
-rw-r--r--docs/testing/user/userguide/14-nsb-operation.rst148
-rwxr-xr-xnsb_setup.sh2
-rw-r--r--samples/vnf_samples/nsut/acl/acl_1rule.yaml60
-rw-r--r--samples/vnf_samples/nsut/acl/acl_rules.yaml60
-rw-r--r--samples/vnf_samples/nsut/acl/acl_worstcaserules.yaml64
-rw-r--r--samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml8
-rw-r--r--samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml24
-rw-r--r--samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml53
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_l2fwd-4.yaml10
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-scale-up.yaml11
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml6
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml113
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_pktTouch-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-scale-up.yaml11
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_lb-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-scale-up.yaml11
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_vpe-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/vfw/acl_1rule.yaml60
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml114
-rw-r--r--samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml53
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-ixia-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-correlated-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-10.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-2.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-4.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-scale-out.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml102
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml3
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc001.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml5
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc011.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc012.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc023.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc069.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc076.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc082.yaml2
-rwxr-xr-xtests/opnfv/test_cases/opnfv_yardstick_tc083.yaml2
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc084.yaml2
-rw-r--r--tests/unit/__init__.py26
-rw-r--r--tests/unit/network_services/__init__.py0
-rw-r--r--tools/cover.sh11
-rwxr-xr-xtools/run_tests.sh11
-rw-r--r--yardstick/benchmark/contexts/base.py77
-rw-r--r--yardstick/benchmark/contexts/dummy.py6
-rw-r--r--yardstick/benchmark/contexts/heat.py44
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py9
-rw-r--r--yardstick/benchmark/contexts/node.py62
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py14
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py17
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py18
-rw-r--r--yardstick/benchmark/core/task.py33
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py11
-rw-r--r--yardstick/benchmark/runners/duration.py7
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py7
-rw-r--r--yardstick/benchmark/runners/iteration.py7
-rw-r--r--yardstick/benchmark/runners/proxduration.py165
-rw-r--r--yardstick/benchmark/runners/search.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py8
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py22
-rw-r--r--yardstick/benchmark/scenarios/base.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py20
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/moongen_testpmd.py7
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py6
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/nstat.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py124
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py8
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py20
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py14
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py16
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py2
-rw-r--r--yardstick/common/constants.py1
-rw-r--r--yardstick/common/exceptions.py19
-rw-r--r--yardstick/common/openstack_utils.py9
-rw-r--r--yardstick/common/utils.py48
-rw-r--r--yardstick/network_services/helpers/samplevnf_helper.py121
-rw-r--r--yardstick/network_services/traffic_profile/base.py26
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py1
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py124
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py398
-rw-r--r--yardstick/network_services/traffic_profile/trex_traffic_profile.py122
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py207
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py96
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py28
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py51
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py18
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py87
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py26
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vfw_vnf.py18
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py1
-rw-r--r--yardstick/network_services/yang_model.py108
-rw-r--r--yardstick/ssh.py12
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py9
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py16
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py18
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_base.py129
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_dummy.py8
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py108
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_kubernetes.py8
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_node.py45
-rw-r--r--yardstick/tests/unit/benchmark/core/test_report.py10
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py25
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py39
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_proxduration.py295
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_search.py50
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py18
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py11
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py41
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py5
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py3
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py94
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py21
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py199
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py45
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py5
-rw-r--r--yardstick/tests/unit/common/test_openstack_utils.py6
-rw-r--r--yardstick/tests/unit/common/test_utils.py120
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py2
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py62
-rw-r--r--yardstick/tests/unit/network_services/test_yang_model.py129
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py17
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_fixed.py3
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http.py29
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py96
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py2
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py463
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py154
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py147
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py194
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py69
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py11
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py263
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py4
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py13
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py121
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py196
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py4
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py7
-rw-r--r--yardstick/tests/unit/test_cmd/commands/test_env.py70
-rw-r--r--yardstick/tests/unit/test_ssh.py37
222 files changed, 6373 insertions, 2656 deletions
diff --git a/ansible/install-inventory.ini b/ansible/install-inventory.ini
new file mode 100644
index 000000000..6aa9905bd
--- /dev/null
+++ b/ansible/install-inventory.ini
@@ -0,0 +1,30 @@
+# the group of systems on which to install yardstick
+# by default just localhost
+[jumphost]
+#yardstickvm1 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=local
+localhost ansible_connection=local
+
+# section below is only due backward compatibility.
+# it will be removed later
+[yardstick:children]
+jumphost
+
+[yardstick-standalone]
+#yardstickvm2 ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
+# uncomment hosts below if you would to test yardstick-standalone/sriov scenarios
+#yardstick-standalone-node ansible_host=192.168.1.2
+#yardstick-standalone-node-2 ansible_host=192.168.1.3
+
+[yardstick-baremetal]
+#yardstickvm3 ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
+# hostname ansible_host=192.168.1.2
+
+[all:vars]
+arch_amd64=amd64
+arch_arm64=arm64
+inst_mode_container=container
+inst_mode_baremetal=baremetal
+ubuntu_archive={"amd64": "http://archive.ubuntu.com/ubuntu/", "arm64": "http://ports.ubuntu.com/ubuntu-ports/"}
+# uncomment credentials below for yardstick-standalone
+#ansible_user=root
+#ansible_pass=root
diff --git a/ansible/install.yaml b/ansible/install.yaml
index c446b91f7..d1745798c 100644
--- a/ansible/install.yaml
+++ b/ansible/install.yaml
@@ -1,14 +1,20 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
---
-- hosts: localhost
-
+- hosts: jumphost
+ become: yes
vars:
- arch_amd64: "amd64"
- arch_arm64: "arm64"
- inst_mode_container: "container"
- inst_mode_baremetal: "baremetal"
- ubuntu_archive:
- amd64: "http://archive.ubuntu.com/ubuntu/"
- arm64: "http://ports.ubuntu.com/ubuntu-ports/"
installation_mode: "{{ INSTALLATION_MODE | default('baremetal') }}"
yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}"
virtual_environment: "{{ VIRTUAL_ENVIRONMENT | default(False) }}"
@@ -42,3 +48,52 @@
- shell: uwsgi -i /etc/yardstick/yardstick.ini
when: installation_mode != inst_mode_container
+
+- name: Prepare baremetal and standalone server(s)
+ hosts: yardstick-baremetal,yardstick-standalone
+ become: yes
+ vars:
+ YARD_IMG_ARCH: "{{ arch_amd64 }}"
+ environment:
+ proxy_env:
+ http_proxy: "{{ lookup('env', 'http_proxy') }}"
+ https_proxy: "{{ lookup('env', 'https_proxy') }}"
+ ftp_proxy: "{{ lookup('env', 'ftp_proxy') }}"
+ no_proxy: "{{ lookup('env', 'no_proxy') }}"
+
+ roles:
+ - add_custom_repos
+ - role: set_package_installer_proxy
+ when: proxy_env is defined and proxy_env
+ # can't update grub in chroot/docker
+ - enable_hugepages_on_boot
+ # needed for collectd plugins
+ - increase_open_file_limits
+ - install_image_dependencies
+ - role: download_dpdk
+ # dpdk_version: "17.02"
+ - install_dpdk
+ - download_trex
+ - install_trex
+ - download_civetweb
+ - install_civetweb
+ - download_samplevnfs
+ - role: install_samplevnf
+ vnf_name: PROX
+ - role: install_samplevnf
+ vnf_name: UDP_Replay
+ - role: install_samplevnf
+ vnf_name: ACL
+ - role: install_samplevnf
+ vnf_name: FW
+ - role: install_samplevnf
+ vnf_name: CGNATP
+ # build shared DPDK for collectd only, required DPDK downloaded already
+ - install_dpdk_shared
+ - install_rabbitmq
+ - download_intel_cmt_cat
+ - install_intel_cmt_cat
+ - download_pmu_tools
+ - install_pmu_tools
+ - download_collectd
+ - install_collectd
diff --git a/ansible/roles/configure_nginx/tasks/main.yml b/ansible/roles/configure_nginx/tasks/main.yml
index 37b052725..e0f7f75bb 100644
--- a/ansible/roles/configure_nginx/tasks/main.yml
+++ b/ansible/roles/configure_nginx/tasks/main.yml
@@ -30,4 +30,4 @@
shell: |
semanage port -m -t http_port_t -p tcp 5000
semanage port -m -t http_port_t -p udp 5000
- when: ansible_os_family == "RedHat" \ No newline at end of file
+ when: ansible_os_family == "RedHat"
diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml
index 4a9925ab4..cd0e86fb9 100644
--- a/ansible/roles/install_yardstick/tasks/regular_install.yml
+++ b/ansible/roles/install_yardstick/tasks/regular_install.yml
@@ -18,5 +18,6 @@
- name: Install Yardstick code
pip:
- name: "{{ yardstick_dir }}/."
+ name: "."
extra_args: -e
+ chdir: "{{ yardstick_dir }}/"
diff --git a/ansible/yardstick-install-inventory.ini b/ansible/yardstick-install-inventory.ini
deleted file mode 100644
index e276076cc..000000000
--- a/ansible/yardstick-install-inventory.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-# the group of systems on which to install yardstick
-# by default just localhost
-[jumphost]
-localhost ansible_connection=local
-
-# section below is only due backward compatibility.
-# it will be removed later
-[yardstick:children]
-jumphost
-
-[yardstick-standalone]
-# uncomment hosts below if you would to test yardstick-standalone/sriov scenarios
-#yardstick-standalone-node ansible_host=192.168.1.2
-#yardstick-standalone-node-2 ansible_host=192.168.1.2
-
-[all:vars]
-# incomment credentials below for yardstick-standalone
-#ansible_user=root
-#ansible_pass=root
-
diff --git a/docs/release/results/euphrates_fraser_comparison.rst b/docs/release/results/euphrates_fraser_comparison.rst
new file mode 100644
index 000000000..53dfb994f
--- /dev/null
+++ b/docs/release/results/euphrates_fraser_comparison.rst
@@ -0,0 +1,602 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+=======================================================
+Test results analysis for Euphrates and Fraser releases
+=======================================================
+
+TC002
+-----
+
+The round-trip-time (RTT) between 2 VMs on different blades is measured using
+ping.
+
+Most test run measurements result on average between 0.39 and 4.00 ms.
+Compared with Euphrates release, the average RTT result of the same pod experiences
+a slight decline in Fraser release. For example, the average RTT of arm-pod5 is
+1.518 in Ehphrates and 1.714 in Fraser. The average RTT of intel-pod18 is 1.6575
+ms in Ehphrates and 1.856 ms in Fraser.
+
+{
+
+ "huawei-pod2:stable/euphrates": [0.3925],
+
+ "lf-pod2:stable/euphrates": [0.5315],
+
+ "lf-pod1:stable/euphrates": [0.62],
+
+ "huawei-pod2:stable/fraser": [0.677],
+
+ "lf-pod1:stable/fraser": [0.725],
+
+ "flex-pod2:stable/euphrates": [0.795],
+
+ "huawei-pod12:stable/euphrates": [0.87],
+
+ "ericsson-pod1:stable/fraser": [0.9165],
+
+ "huawei-pod12:stable/fraser": [1.0465],
+
+ "lf-pod2:stable/fraser": [1.2325],
+
+ "intel-pod5:stable/euphrates": [1.25],
+
+ "ericsson-virtual3:stable/euphrates": [1.2655],
+
+ "ericsson-pod1:stable/euphrates": [1.372],
+
+ "zte-pod2:stable/fraser": [1.395],
+
+ "arm-pod5:stable/euphrates": [1.518],
+
+ "huawei-virtual4:stable/euphrates": [1.5355],
+
+ "ericsson-virtual4:stable/fraser": [1.582],
+
+ "huawei-virtual3:stable/euphrates": [1.606],
+
+ "intel-pod18:stable/euphrates": [1.6575],
+
+ "huawei-virtual4:stable/fraser": [1.697],
+
+ "huawei-virtual8:stable/euphrates": [1.709],
+
+ "arm-pod5:stable/fraser": [1.714],
+
+ "huawei-virtual3:stable/fraser": [1.716],
+
+ "intel-pod18:stable/fraser": [1.856],
+
+ "huawei-virtual2:stable/euphrates": [1.872],
+
+ "arm-pod6:stable/euphrates": [1.895],
+
+ "huawei-virtual2:stable/fraser": [1.964],
+
+ "huawei-virtual1:stable/fraser": [1.9765],
+
+ "huawei-virtual9:stable/euphrates": [2.0745],
+
+ "arm-pod6:stable/fraser": [2.209],
+
+ "huawei-virtual1:stable/euphrates": [2.495],
+
+ "ericsson-virtual2:stable/euphrates": [2.7895],
+
+ "ericsson-virtual4:stable/euphrates": [3.768],
+
+ "ericsson-virtual1:stable/euphrates": [3.8035],
+
+ "ericsson-virtual3:stable/fraser": [3.9175],
+
+ "ericsson-virtual2:stable/fraser": [4.004]
+
+}
+
+TC010
+-----
+
+The tool we use to measure memory read latency is lmbench, which is a series of
+micro benchmarks intended to measure basic operating system and hardware system
+metrics. Compared with Euphrates release, the memory read latency of the same pod
+also experience a slight decline. Virtual pods seem to have a higher memory read
+latency than physical pods. Compared with X86 pods, the memory read latency of
+arm pods is significant higher.
+
+{
+
+ "ericsson-pod1:stable/euphrates": [5.7785],
+
+ "flex-pod2:stable/euphrates": [5.908],
+
+ "ericsson-virtual1:stable/euphrates": [6.412],
+
+ "intel-pod18:stable/euphrates": [6.5905],
+
+ "intel-pod5:stable/euphrates": [6.6975],
+
+ "ericsson-pod1:stable/fraser": [7.0645],
+
+ "ericsson-virtual4:stable/euphrates": [7.183],
+
+ "intel-pod18:stable/fraser": [7.4465],
+
+ "zte-pod2:stable/fraser": [8.1865],
+
+ "ericsson-virtual2:stable/euphrates": [8.4985],
+
+ "huawei-pod2:stable/euphrates": [8.877],
+
+ "huawei-pod12:stable/euphrates": [9.091],
+
+ "huawei-pod2:stable/fraser": [9.236],
+
+ "huawei-pod12:stable/fraser": [9.615],
+
+ "ericsson-virtual3:stable/euphrates": [9.719],
+
+ "ericsson-virtual2:stable/fraser": [9.8925],
+
+ "huawei-virtual4:stable/euphrates": [10.1195],
+
+ "huawei-virtual3:stable/euphrates": [10.19],
+
+ "huawei-virtual2:stable/fraser": [10.22],
+
+ "huawei-virtual1:stable/euphrates": [10.3045],
+
+ "huawei-virtual9:stable/euphrates": [10.318],
+
+ "ericsson-virtual4:stable/fraser": [10.5465],
+
+ "ericsson-virtual3:stable/fraser": [10.9355],
+
+ "huawei-virtual3:stable/fraser": [10.95],
+
+ "huawei-virtual2:stable/euphrates": [11.274],
+
+ "huawei-virtual4:stable/fraser": [11.557],
+
+ "lf-pod1:stable/euphrates": [15.7025],
+
+ "lf-pod2:stable/euphrates": [15.8495],
+
+ "lf-pod2:stable/fraser": [16.5595],
+
+ "lf-pod1:stable/fraser": [16.8395],
+
+ "arm-pod5:stable/euphrates": [18.092],
+
+ "arm-pod5:stable/fraser": [18.744],
+
+ "huawei-virtual1:stable/fraser": [19.8235],
+
+ "huawei-virtual8:stable/euphrates": [33.999],
+
+ "arm-pod6:stable/euphrates": [41.5605],
+
+ "arm-pod6:stable/fraser": [55.804]
+
+}
+
+TC011
+-----
+
+Iperf3 is a tool for evaluating the packet delay variation between 2 VMs on
+different blades. In general, the packet delay variation of the two releases
+look similar.
+
+{
+
+ "arm-pod6:stable/fraser": [1],
+
+ "ericsson-pod1:stable/fraser": [1],
+
+ "ericsson-virtual2:stable/fraser": [1],
+
+ "ericsson-virtual3:stable/fraser": [1],
+
+ "lf-pod2:stable/fraser": [1],
+
+ "huawei-virtual1:stable/fraser": [2997],
+
+ "huawei-virtual2:stable/euphrates": [2997],
+
+ "flex-pod2:stable/euphrates": [2997.5],
+
+ "huawei-virtual3:stable/euphrates": [2998],
+
+ "huawei-virtual3:stable/fraser": [2999],
+
+ "huawei-virtual9:stable/euphrates": [3000],
+
+ "huawei-virtual8:stable/euphrates": [3001],
+
+ "huawei-virtual4:stable/euphrates": [3002],
+
+ "huawei-virtual4:stable/fraser": [3002],
+
+ "ericsson-virtual3:stable/euphrates": [3006],
+
+ "huawei-virtual1:stable/euphrates": [3007],
+
+ "ericsson-virtual2:stable/euphrates": [3009],
+
+ "intel-pod18:stable/euphrates": [3010],
+
+ "ericsson-virtual4:stable/euphrates": [3017],
+
+ "lf-pod2:stable/euphrates": [3021],
+
+ "arm-pod5:stable/euphrates": [3022],
+
+ "arm-pod6:stable/euphrates": [3022],
+
+ "ericsson-pod1:stable/euphrates": [3022],
+
+ "huawei-pod12:stable/euphrates": [3022],
+
+ "huawei-pod12:stable/fraser": [3022],
+
+ "huawei-pod2:stable/euphrates": [3022],
+
+ "huawei-pod2:stable/fraser": [3022],
+
+ "intel-pod18:stable/fraser": [3022],
+
+ "intel-pod5:stable/euphrates": [3022],
+
+ "lf-pod1:stable/euphrates": [3022],
+
+ "lf-pod1:stable/fraser": [3022],
+
+ "zte-pod2:stable/fraser": [3022],
+
+ "huawei-virtual2:stable/fraser": [3025]
+
+}
+
+TC012
+-----
+
+Lmbench is also used to measure the memory read and write bandwidth.
+Like TC010, compared with Euphrates release, the memory read and write bandwidth
+of the same pod also experience a slight decline. And compared with X86 pods, the memory
+read and write bandwidth of arm pods is significant lower.
+
+{
+
+ "lf-pod1:stable/euphrates": [22912.39],
+
+ "lf-pod2:stable/euphrates": [22637.67],
+
+ "lf-pod1:stable/fraser": [20552.9],
+
+ "flex-pod2:stable/euphrates": [20229.99],
+
+ "lf-pod2:stable/fraser": [20058.925],
+
+ "ericsson-pod1:stable/fraser": [18930.78],
+
+ "intel-pod18:stable/fraser": [18757.545],
+
+ "ericsson-virtual1:stable/euphrates": [17474.965],
+
+ "ericsson-pod1:stable/euphrates": [17127.38],
+
+ "ericsson-virtual4:stable/euphrates": [16219.97],
+
+ "ericsson-virtual2:stable/euphrates": [15652.28],
+
+ "ericsson-virtual3:stable/euphrates": [15551.26],
+
+ "ericsson-virtual4:stable/fraser": [15389.465],
+
+ "ericsson-virtual2:stable/fraser": [15343.79],
+
+ "huawei-pod2:stable/euphrates": [15017.2],
+
+ "huawei-pod2:stable/fraser": [14870.78],
+
+ "huawei-virtual4:stable/euphrates": [14266.34],
+
+ "huawei-virtual1:stable/euphrates": [14233.035],
+
+ "huawei-virtual3:stable/euphrates": [14227.63],
+
+ "zte-pod2:stable/fraser": [14157.99],
+
+ "huawei-pod12:stable/euphrates": [14147.245],
+
+ "huawei-pod12:stable/fraser": [14126.99],
+
+ "intel-pod18:stable/euphrates": [14058.33],
+
+ "huawei-virtual3:stable/fraser": [13929.67],
+
+ "huawei-virtual2:stable/euphrates": [13862.85],
+
+ "huawei-virtual4:stable/fraser": [13847.155],
+
+ "huawei-virtual2:stable/fraser": [13702.92],
+
+ "huawei-virtual1:stable/fraser": [13496.45],
+
+ "intel-pod5:stable/euphrates": [13280.32],
+
+ "ericsson-virtual3:stable/fraser": [12733.19],
+
+ "huawei-virtual9:stable/euphrates": [12559.445],
+
+ "huawei-virtual8:stable/euphrates": [8998.02],
+
+ "arm-pod5:stable/euphrates": [4388.875],
+
+ "arm-pod5:stable/fraser": [4326.11],
+
+ "arm-pod6:stable/euphrates": [4260.2],
+
+ "arm-pod6:stable/fraser": [3809.885]
+
+}
+
+TC014
+-----
+
+The Unixbench is used to evaluate the IaaS processing speed with regards to
+score of single CPU running and parallel running. Below are the single CPU running
+scores. It can be seen that the processing test results vary from scores 715 to 3737.
+In general, the single CPU score of the two releases look similar.
+
+{
+
+ "lf-pod2:stable/fraser": [3737.6],
+
+ "lf-pod2:stable/euphrates": [3723.95],
+
+ "lf-pod1:stable/fraser": [3702.7],
+
+ "lf-pod1:stable/euphrates": [3669],
+
+ "intel-pod5:stable/euphrates": [3388.6],
+
+ "intel-pod18:stable/euphrates": [3298.4],
+
+ "flex-pod2:stable/euphrates": [3208.6],
+
+ "ericsson-pod1:stable/fraser": [3131.6],
+
+ "intel-pod18:stable/fraser": [3098.1],
+
+ "ericsson-virtual1:stable/euphrates": [2988.9],
+
+ "zte-pod2:stable/fraser": [2831.4],
+
+ "ericsson-pod1:stable/euphrates": [2669.1],
+
+ "ericsson-virtual4:stable/euphrates": [2598.5],
+
+ "ericsson-virtual2:stable/fraser": [2559.7],
+
+ "ericsson-virtual3:stable/euphrates": [2553.15],
+
+ "huawei-pod2:stable/euphrates": [2531.2],
+
+ "huawei-pod2:stable/fraser": [2528.9],
+
+ "ericsson-virtual4:stable/fraser": [2527.8],
+
+ "ericsson-virtual2:stable/euphrates": [2526.9],
+
+ "huawei-virtual4:stable/euphrates": [2407.4],
+
+ "huawei-virtual3:stable/fraser": [2379.1],
+
+ "huawei-virtual3:stable/euphrates": [2374.6],
+
+ "huawei-virtual4:stable/fraser": [2362.1],
+
+ "huawei-virtual2:stable/euphrates": [2326.4],
+
+ "huawei-virtual9:stable/euphrates": [2324.95],
+
+ "huawei-virtual1:stable/euphrates": [2302.6],
+
+ "huawei-virtual2:stable/fraser": [2299.3],
+
+ "huawei-pod12:stable/euphrates": [2232.2],
+
+ "huawei-pod12:stable/fraser": [2229],
+
+ "huawei-virtual1:stable/fraser": [2171.3],
+
+ "ericsson-virtual3:stable/fraser": [2104.8],
+
+ "huawei-virtual8:stable/euphrates": [2085.3],
+
+ "arm-pod5:stable/fraser": [1764.2],
+
+ "arm-pod5:stable/euphrates": [1754.4],
+
+ "arm-pod6:stable/euphrates": [716.15],
+
+ "arm-pod6:stable/fraser": [715.4]
+
+}
+
+TC069
+-----
+
+With the block size changing from 1 kb to 512 kb, the memory write bandwidth
+tends to become larger first and then smaller within every run test. Below are
+the scores for 32mb block array.
+
+{
+
+ "intel-pod18:stable/euphrates": [18871.79],
+
+ "intel-pod18:stable/fraser": [16939.24],
+
+ "intel-pod5:stable/euphrates": [16055.79],
+
+ "arm-pod6:stable/euphrates": [13327.02],
+
+ "arm-pod6:stable/fraser": [11895.71],
+
+ "flex-pod2:stable/euphrates": [9384.585],
+
+ "zte-pod2:stable/fraser": [9375.33],
+
+ "ericsson-pod1:stable/euphrates": [9331.535],
+
+ "huawei-pod12:stable/euphrates": [9164.88],
+
+ "ericsson-pod1:stable/fraser": [9140.42],
+
+ "huawei-pod2:stable/euphrates": [9026.52],
+
+ "huawei-pod12:stable/fraser": [8993.37],
+
+ "huawei-virtual9:stable/euphrates": [8825.805],
+
+ "huawei-pod2:stable/fraser": [8794.01],
+
+ "huawei-virtual2:stable/fraser": [7670.21],
+
+ "ericsson-virtual1:stable/euphrates": [7615.97],
+
+ "ericsson-virtual4:stable/euphrates": [7539.23],
+
+ "arm-pod5:stable/fraser": [7479.32],
+
+ "arm-pod5:stable/euphrates": [7403.38],
+
+ "huawei-virtual3:stable/euphrates": [7247.89],
+
+ "ericsson-virtual2:stable/fraser": [7219.21],
+
+ "huawei-virtual2:stable/euphrates": [7205.35],
+
+ "huawei-virtual1:stable/euphrates": [7196.405],
+
+ "ericsson-virtual3:stable/euphrates": [7173.72],
+
+ "huawei-virtual4:stable/euphrates": [7131.47],
+
+ "ericsson-virtual2:stable/euphrates": [7129.08],
+
+ "huawei-virtual4:stable/fraser": [7059.045],
+
+ "huawei-virtual3:stable/fraser": [7023.57],
+
+ "lf-pod1:stable/euphrates": [6928.18],
+
+ "lf-pod2:stable/euphrates": [6875.88],
+
+ "lf-pod2:stable/fraser": [6834.7],
+
+ "lf-pod1:stable/fraser": [6775.27],
+
+ "ericsson-virtual4:stable/fraser": [6522.86],
+
+ "ericsson-virtual3:stable/fraser": [5835.59],
+
+ "huawei-virtual8:stable/euphrates": [5729.705],
+
+ "huawei-virtual1:stable/fraser": [5617.12]
+
+}
+
+TC082
+-----
+
+For this test case, we use perf to measure context-switches under load.
+High context switch rates are not themselves an issue, but they may point the
+way to a more significant problem.
+
+{
+
+ "zte-pod2:stable/fraser": [306.5],
+
+ "huawei-pod12:stable/euphrates": [316],
+
+ "lf-pod2:stable/fraser": [337.5],
+
+ "intel-pod18:stable/euphrates": [340],
+
+ "intel-pod18:stable/fraser": [343.5],
+
+ "intel-pod5:stable/euphrates": [357.5],
+
+ "ericsson-pod1:stable/euphrates": [384],
+
+ "lf-pod2:stable/euphrates": [394.5],
+
+ "huawei-pod12:stable/fraser": [399],
+
+ "lf-pod1:stable/euphrates": [435],
+
+ "lf-pod1:stable/fraser": [454],
+
+ "flex-pod2:stable/euphrates": [476],
+
+ "huawei-pod2:stable/euphrates": [518],
+
+ "huawei-pod2:stable/fraser": [544.5],
+
+ "arm-pod5:stable/euphrates": [869.5],
+
+ "huawei-virtual9:stable/euphrates": [1002],
+
+ "huawei-virtual4:stable/fraser": [1138],
+
+ "huawei-virtual4:stable/euphrates": [1174],
+
+ "huawei-virtual3:stable/euphrates": [1239],
+
+ "ericsson-pod1:stable/fraser": [1305],
+
+ "huawei-virtual2:stable/euphrates": [1430],
+
+ "huawei-virtual3:stable/fraser": [1433],
+
+ "huawei-virtual1:stable/fraser": [1470],
+
+ "huawei-virtual1:stable/euphrates": [1489],
+
+ "arm-pod6:stable/fraser": [1738.5],
+
+ "arm-pod6:stable/euphrates": [1883.5]
+
+}
+
+TC083
+-----
+
+TC083 measures network latency and throughput between VMs using netperf.
+The test results shown below are for UDP throughout.
+
+{
+
+ "lf-pod1:stable/euphrates": [2204.42],
+
+ "lf-pod2:stable/fraser": [1893.39],
+
+ "intel-pod18:stable/euphrates": [1835.55],
+
+ "lf-pod2:stable/euphrates": [1676.705],
+
+ "intel-pod5:stable/euphrates": [1612.555],
+
+ "zte-pod2:stable/fraser": [1543.995],
+
+ "lf-pod1:stable/fraser": [1480.86],
+
+ "intel-pod18:stable/fraser": [1417.015],
+
+ "flex-pod2:stable/euphrates": [1370.23],
+
+ "huawei-pod12:stable/euphrates": [1300.12]
+
+}
diff --git a/docs/release/results/euphrates_fraser_comparsion.rst b/docs/release/results/euphrates_fraser_comparsion.rst
deleted file mode 100644
index 222dc8bb0..000000000
--- a/docs/release/results/euphrates_fraser_comparsion.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-=======================================================
-Test results analysis for Euphrates and Fraser releases
-=======================================================
-
diff --git a/docs/release/results/images/tc002_pod_fraser.png b/docs/release/results/images/tc002_pod_fraser.png
new file mode 100644
index 000000000..797dc3136
--- /dev/null
+++ b/docs/release/results/images/tc002_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc002_scenario_fraser.png b/docs/release/results/images/tc002_scenario_fraser.png
new file mode 100644
index 000000000..ff42e6516
--- /dev/null
+++ b/docs/release/results/images/tc002_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc010_pod_fraser.png b/docs/release/results/images/tc010_pod_fraser.png
new file mode 100644
index 000000000..23367d34a
--- /dev/null
+++ b/docs/release/results/images/tc010_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc010_scenario_fraser.png b/docs/release/results/images/tc010_scenario_fraser.png
new file mode 100644
index 000000000..a481a595f
--- /dev/null
+++ b/docs/release/results/images/tc010_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc011_pod_fraser.png b/docs/release/results/images/tc011_pod_fraser.png
new file mode 100644
index 000000000..82dc9c763
--- /dev/null
+++ b/docs/release/results/images/tc011_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc011_scenario_fraser.png b/docs/release/results/images/tc011_scenario_fraser.png
new file mode 100644
index 000000000..226d0b856
--- /dev/null
+++ b/docs/release/results/images/tc011_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc012_pod_fraser.png b/docs/release/results/images/tc012_pod_fraser.png
new file mode 100644
index 000000000..66e79be85
--- /dev/null
+++ b/docs/release/results/images/tc012_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc012_scenario_fraser.png b/docs/release/results/images/tc012_scenario_fraser.png
new file mode 100644
index 000000000..4ef44119a
--- /dev/null
+++ b/docs/release/results/images/tc012_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc014_pod_fraseer.png b/docs/release/results/images/tc014_pod_fraseer.png
new file mode 100644
index 000000000..697201d76
--- /dev/null
+++ b/docs/release/results/images/tc014_pod_fraseer.png
Binary files differ
diff --git a/docs/release/results/images/tc014_scenario_fraser.png b/docs/release/results/images/tc014_scenario_fraser.png
new file mode 100644
index 000000000..f7865dcdc
--- /dev/null
+++ b/docs/release/results/images/tc014_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc069_pod_fraser.png b/docs/release/results/images/tc069_pod_fraser.png
new file mode 100644
index 000000000..1cba192d7
--- /dev/null
+++ b/docs/release/results/images/tc069_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc069_scenario_fraser.png b/docs/release/results/images/tc069_scenario_fraser.png
new file mode 100644
index 000000000..f988b90c6
--- /dev/null
+++ b/docs/release/results/images/tc069_scenario_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc082_pod_fraser.png b/docs/release/results/images/tc082_pod_fraser.png
new file mode 100644
index 000000000..d54ab901a
--- /dev/null
+++ b/docs/release/results/images/tc082_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/images/tc083_pod_fraser.png b/docs/release/results/images/tc083_pod_fraser.png
new file mode 100644
index 000000000..942cc2074
--- /dev/null
+++ b/docs/release/results/images/tc083_pod_fraser.png
Binary files differ
diff --git a/docs/release/results/index.rst b/docs/release/results/index.rst
index 3ec9e1cff..63445fd1a 100644
--- a/docs/release/results/index.rst
+++ b/docs/release/results/index.rst
@@ -14,4 +14,4 @@ Yardstick test results
.. include:: ./overview.rst
.. include:: ./results.rst
-.. include:: ./euphrates_fraser_comparsion.rst
+.. include:: ./euphrates_fraser_comparison.rst
diff --git a/docs/release/results/results.rst b/docs/release/results/results.rst
index c75f5ae94..0ed92f867 100644
--- a/docs/release/results/results.rst
+++ b/docs/release/results/results.rst
@@ -23,6 +23,7 @@ Scenario Results
The following documents contain results of Yardstick test cases executed on
OPNFV labs, triggered by OPNFV CI pipeline, documented per test case.
+For hardware details of OPNFV labs, please visit: https://wiki.opnfv.org/display/pharos/Community+Labs
.. toctree::
:maxdepth: 1
@@ -38,6 +39,7 @@ OPNFV labs, triggered by OPNFV CI pipeline, documented per test case.
Test results of executed tests are avilable in Dashboard_ and logs in Jenkins_.
+Test results for Fraser release are collected from April 10, 2018 to May 13, 2018.
Feature Test Results
====================
diff --git a/docs/release/results/tc002-network-latency.rst b/docs/release/results/tc002-network-latency.rst
index 722423473..064983bec 100644
--- a/docs/release/results/tc002-network-latency.rst
+++ b/docs/release/results/tc002-network-latency.rst
@@ -315,3 +315,211 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [0.42],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [0.557],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [0.5765],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [0.582],
+
+ "os-odl-bgpvpn-ha:lf-pod1:apex": [0.678],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [0.7075],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [0.713],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [0.7155],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [0.732],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [0.7415],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [0.7565],
+
+ "os-nosdn-ovs-ha:arm-pod6:fuel": [0.8015],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [0.908],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [0.9165],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [0.969],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [0.9765],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [1.0245],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [1.0495],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [1.1645],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [1.206],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [1.236],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [1.241],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [1.2805],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [1.286],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [1.299],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [1.305],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [1.309],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [1.314],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [1.431],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1.457],
+
+ "os-odl-nofeature-ha:zte-pod2:daisy": [1.517],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [1.576],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [1.592],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [1.714],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [1.809],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [1.81],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [1.8505],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [1.8895],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [1.909],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [1.925],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [1.964],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [1.9755],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [1.9765],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [1.9915],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [1.9925],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [2.0265],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [2.106],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [2.124],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [2.185],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [2.281],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [2.432],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [2.483],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [2.524],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [3.9175],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [4.338],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [4.641]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc002_scenario_fraser.png
+ :width: 800px
+ :alt: TC002 influence of scenario
+
+{
+
+ "os-odl-bgpvpn-ha": [0.678],
+
+ "os-nosdn-calipso-noha": [0.713],
+
+ "os-nosdn-ovs-ha": [0.7245],
+
+ "os-odl_l3-nofeature-ha": [0.7435],
+
+ "os-odl-sfc-ha": [0.796],
+
+ "os-nosdn-kvm-ha": [1.059],
+
+ "os-nosdn-bar-ha": [1.083],
+
+ "os-nosdn-ovs-noha": [1.09],
+
+ "os-odl-sfc-noha": [1.196],
+
+ "os-nosdn-nofeature-noha": [1.26],
+
+ "os-nosdn-nofeature-ha": [1.291],
+
+ "os-odl_l3-nofeature-noha": [1.308],
+
+ "os-nosdn-bar-noha": [1.4125],
+
+ "os-nosdn-kvm-noha": [1.4475],
+
+ "os-odl-nofeature-ha": [1.508],
+
+ "os-odl-nofeature-noha": [1.914],
+
+ "os-nosdn-openbaton-ha": [1.9755]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc002_pod_fraser.png
+ :width: 800px
+ :alt: TC002 influence of the POD
+
+{
+
+ "huawei-pod2": [0.677],
+
+ "lf-pod1": [0.725],
+
+ "ericsson-pod1": [0.9165],
+
+ "huawei-pod12": [1.0465],
+
+ "lf-pod2": [1.2325],
+
+ "zte-pod2": [1.395],
+
+ "ericsson-virtual4": [1.582],
+
+ "huawei-virtual4": [1.697],
+
+ "arm-pod5": [1.714],
+
+ "huawei-virtual3": [1.716],
+
+ "intel-pod18": [1.856],
+
+ "huawei-virtual2": [1.964],
+
+ "huawei-virtual1": [1.9765],
+
+ "arm-pod6": [2.209],
+
+ "ericsson-virtual3": [3.9175],
+
+ "ericsson-virtual2": [4.004]
+
+}
diff --git a/docs/release/results/tc010-memory-read-latency.rst b/docs/release/results/tc010-memory-read-latency.rst
index 9a296b7a0..81559d647 100644
--- a/docs/release/results/tc010-memory-read-latency.rst
+++ b/docs/release/results/tc010-memory-read-latency.rst
@@ -297,3 +297,214 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [6.8675],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [6.991],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [7.5535],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [7.571],
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [7.635],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [8.153],
+
+ "os-odl-nofeature-ha:zte-pod2:daisy": [8.1935],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [9.1715],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [9.1875],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [9.241],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [9.255],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [9.388],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [9.5825],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [9.5875],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [9.6345],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [9.6535],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [9.743],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [9.82],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [9.8715],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [9.982],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [10.0195],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [10.1285],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [10.1335],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [10.22],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [10.2845],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [10.4185],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [10.4555],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [10.5635],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [10.6515],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [10.9355],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [11.2015],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [12.984],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [13.306],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [13.721],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [14.133],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [14.158],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [14.375],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [14.396],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [14.9375],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [14.957],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [16.3445],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [16.478],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [16.4895],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [16.55],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [16.5665],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [16.598],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [16.805],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [16.9095],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [17.494],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [17.4995],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [18.094],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [18.744],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [19.8235],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [20.758],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [26.5245],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [55.667],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [56.175],
+
+ "os-nosdn-ovs-ha:arm-pod6:fuel": [57.86]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc010_scenario_fraser.png
+ :width: 800px
+ :alt: TC010 influence of scenario
+
+{
+
+ "os-nosdn-openbaton-ha": [7.5535],
+
+ "os-odl-nofeature-ha": [8.2535],
+
+ "os-odl-sfc-ha": [9.251],
+
+ "os-nosdn-nofeature-ha": [9.464],
+
+ "os-odl-sfc-noha": [9.8265],
+
+ "os-odl_l3-nofeature-ha": [9.836],
+
+ "os-odl_l3-nofeature-noha": [10.0565],
+
+ "os-nosdn-nofeature-noha": [10.079],
+
+ "os-nosdn-kvm-ha": [10.418],
+
+ "os-nosdn-ovs-noha": [10.43],
+
+ "os-nosdn-kvm-noha": [10.603],
+
+ "os-nosdn-bar-noha": [11.067],
+
+ "os-nosdn-bar-ha": [13.911],
+
+ "os-odl-nofeature-noha": [14.046],
+
+ "os-nosdn-calipso-noha": [16.3445],
+
+ "os-nosdn-ovs-ha": [16.478],
+
+ "os-ovn-nofeature-noha": [16.805]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc010_pod_fraser.png
+ :width: 800px
+ :alt: TC010 influence of the POD
+
+{
+
+ "ericsson-pod1": [7.0645],
+
+ "intel-pod18": [7.4465],
+
+ "zte-pod2": [8.1865],
+
+ "huawei-pod2": [9.236],
+
+ "huawei-pod12": [9.615],
+
+ "ericsson-virtual2": [9.8925],
+
+ "huawei-virtual2": [10.22],
+
+ "ericsson-virtual4": [10.5465],
+
+ "ericsson-virtual3": [10.9355],
+
+ "huawei-virtual3": [10.95],
+
+ "huawei-virtual4": [11.557],
+
+ "lf-pod2": [16.5595],
+
+ "lf-pod1": [16.8395],
+
+ "arm-pod5": [18.744],
+
+ "huawei-virtual1": [19.8235],
+
+ "arm-pod6": [55.804]
+
+}
diff --git a/docs/release/results/tc011-packet-delay-variation.rst b/docs/release/results/tc011-packet-delay-variation.rst
index b07ea8980..f255b50ca 100644
--- a/docs/release/results/tc011-packet-delay-variation.rst
+++ b/docs/release/results/tc011-packet-delay-variation.rst
@@ -260,3 +260,173 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [1],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [1],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [1],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [1],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [1511.5],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [2996],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [2997],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [2997],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [2997],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [2997],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [2997],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [2997],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [2997],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [2997],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [3000],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [3003],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [3011],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [3015.5],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [3019],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [3021],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [3021],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [3022],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [3022],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [3022],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [3022],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [3022],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [3022],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [3022],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [3022],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [3022],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [3022],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [3022],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [3022.5],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [3023],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [3023],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [3025]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc011_scenario_fraser.png
+ :width: 800px
+ :alt: TC011 influence of scenario
+
+{
+
+ "os-ovn-nofeature-noha": [1511.5],
+
+ "os-nosdn-kvm-noha": [2997],
+
+ "os-odl-sfc-ha": [3021],
+
+ "os-nosdn-bar-ha": [3022],
+
+ "os-nosdn-bar-noha": [3022],
+
+ "os-nosdn-calipso-noha": [3022],
+
+ "os-nosdn-kvm-ha": [3022],
+
+ "os-nosdn-nofeature-ha": [3022],
+
+ "os-nosdn-nofeature-noha": [3022],
+
+ "os-nosdn-openbaton-ha": [3022],
+
+ "os-odl_l3-nofeature-ha": [3022],
+
+ "os-odl_l3-nofeature-noha": [3022],
+
+ "os-odl-sfc-noha": [3023]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc011_pod_fraser.png
+ :width: 800px
+ :alt: TC011 influence of the POD
+
+{
+
+ "arm-pod6": [1],
+
+ "ericsson-pod1": [1],
+
+ "ericsson-virtual2": [1],
+
+ "ericsson-virtual3": [1],
+
+ "lf-pod2": [1],
+
+ "huawei-virtual1": [2997],
+
+ "huawei-virtual3": [2999],
+
+ "huawei-virtual4": [3002],
+
+ "huawei-pod12": [3022],
+
+ "huawei-pod2": [3022],
+
+ "intel-pod18": [3022],
+
+ "lf-pod1": [3022],
+
+ "zte-pod2": [3022],
+
+ "huawei-virtual2": [3025]
+
+}
diff --git a/docs/release/results/tc012-memory-read-write-bandwidth.rst b/docs/release/results/tc012-memory-read-write-bandwidth.rst
index c28eb1f3c..71d69cde9 100644
--- a/docs/release/results/tc012-memory-read-write-bandwidth.rst
+++ b/docs/release/results/tc012-memory-read-write-bandwidth.rst
@@ -297,3 +297,217 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [21421.795],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [21075],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [21017.44],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [20991.46],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [20812.405],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [20694.035],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [20672.765],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [20269.65],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [20186.32],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [19959.915],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [19719.38],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [19654.505],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [19391.145],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [19378.64],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [19103.43],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [18688.695],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [18557.95],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [17088.61],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [17040.78],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [16057.235],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [15622.355],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [15422.235],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [15403.09],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [15141.58],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [14922.37],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [14864.195],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [14856.295],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [14796.035],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [14484.375],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [14441.955],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [14373],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [14330.44],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [14320.305],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [14253.715],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [14203.655],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [14179.93],
+
+ "os-odl-nofeature-ha:zte-pod2:daisy": [14177.135],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [14150.825],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [14100.87],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [14033.36],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [13963.73],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [13874.775],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [13805.65],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [13754.63],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [13702.92],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [13638.115],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [13637.83],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [13635.66],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [13635.58],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [13544.95],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [13514.27],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [13496.45],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [13475.38],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [12733.19],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [12682.805],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [4326.11],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [3824.13],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [3797.795],
+
+ "os-nosdn-ovs-ha:arm-pod6:fuel": [3749.91]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc012_scenario_fraser.png
+ :width: 800px
+ :alt: TC012 influence of scenario
+
+{
+
+ "os-ovn-nofeature-noha": [20694.035],
+
+ "os-nosdn-calipso-noha": [20186.32],
+
+ "os-nosdn-openbaton-ha": [18557.95],
+
+ "os-nosdn-ovs-ha": [17048.17],
+
+ "os-odl-nofeature-noha": [16191.125],
+
+ "os-nosdn-ovs-noha": [15790.32],
+
+ "os-nosdn-bar-ha": [14833.97],
+
+ "os-odl-sfc-ha": [14828.72],
+
+ "os-odl_l3-nofeature-ha": [14801.25],
+
+ "os-nosdn-kvm-ha": [14700.1],
+
+ "os-nosdn-nofeature-ha": [14610.48],
+
+ "os-nosdn-nofeature-noha": [14555.975],
+
+ "os-odl-sfc-noha": [14508.14],
+
+ "os-nosdn-bar-noha": [14395.22],
+
+ "os-odl-nofeature-ha": [14231.245],
+
+ "os-odl_l3-nofeature-noha": [14161.58],
+
+ "os-nosdn-kvm-noha": [13845.685]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc012_pod_fraser.png
+ :width: 800px
+ :alt: TC012 influence of the POD
+
+{
+
+ "lf-pod1": [20552.9],
+
+ "lf-pod2": [20058.925],
+
+ "ericsson-pod1": [18930.78],
+
+ "intel-pod18": [18757.545],
+
+ "ericsson-virtual4": [15389.465],
+
+ "ericsson-virtual2": [15343.79],
+
+ "huawei-pod2": [14870.78],
+
+ "zte-pod2": [14157.99],
+
+ "huawei-pod12": [14126.99],
+
+ "huawei-virtual3": [13929.67],
+
+ "huawei-virtual4": [13847.155],
+
+ "huawei-virtual2": [13702.92],
+
+ "huawei-virtual1": [13496.45],
+
+ "ericsson-virtual3": [12733.19],
+
+ "arm-pod5": [4326.11],
+
+ "arm-pod6": [3809.885]
+
+}
diff --git a/docs/release/results/tc014-cpu-processing-speed.rst b/docs/release/results/tc014-cpu-processing-speed.rst
index 34d4ad0f9..a2eeb6302 100644
--- a/docs/release/results/tc014-cpu-processing-speed.rst
+++ b/docs/release/results/tc014-cpu-processing-speed.rst
@@ -296,3 +296,217 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [3747.3],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [3727.2],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [3726.5],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [3723.8],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [3718.9],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [3717.75],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [3706.5],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [3704.9],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [3687.7],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [3635.4],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [3632.55],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [3569],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [3432.1],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [3133.85],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [3079.8],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [3074.75],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [2976.2],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [2910.95],
+
+ "os-odl-nofeature-ha:zte-pod2:daisy": [2801.1],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [2603],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [2559.7],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [2539.1],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [2530.5],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [2529.4],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [2528.9],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [2527.8],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [2527.4],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [2517.8],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [2472.4],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [2469.1],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [2452.05],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [2438.7],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [2418.4],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [2404.35],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [2391],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [2376.75],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [2376.2],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [2359.45],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [2353.3],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [2351.9],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [2339.4],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [2335.6],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [2328],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [2324.5],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [2317.3],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [2313.95],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [2308.1],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [2299.3],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [2250.4],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [2229.7],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [2228.8],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [2171.3],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [2104.8],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1961.35],
+
+ "os-nosdn-ovs-ha:arm-pod5:fuel": [1764.2],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [1730.95],
+
+ "os-nosdn-ovs-ha:arm-pod6:fuel": [715.55],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [715.4],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [715.25]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc014_scenario_fraser.png
+ :width: 800px
+ :alt: TC014 influence of scenario
+
+{
+
+ "os-nosdn-calipso-noha": [3727.2],
+
+ "os-ovn-nofeature-noha": [3723.8],
+
+ "os-odl-nofeature-noha": [3128.05],
+
+ "os-nosdn-openbaton-ha": [2976.2],
+
+ "os-nosdn-ovs-ha": [2814.5],
+
+ "os-odl-nofeature-ha": [2801.4],
+
+ "os-nosdn-nofeature-ha": [2649.7],
+
+ "os-nosdn-ovs-noha": [2587.3],
+
+ "os-odl_l3-nofeature-ha": [2528.45],
+
+ "os-odl-sfc-ha": [2527.6],
+
+ "os-nosdn-bar-ha": [2526.55],
+
+ "os-nosdn-kvm-ha": [2516.95],
+
+ "os-odl-sfc-noha": [2453.65],
+
+ "os-nosdn-bar-noha": [2447.7],
+
+ "os-nosdn-nofeature-noha": [2443.85],
+
+ "os-odl_l3-nofeature-noha": [2394.3],
+
+ "os-nosdn-kvm-noha": [2379.7]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc014_pod_fraser.png
+ :width: 800px
+ :alt: TC014 influence of the POD
+
+{
+
+ "lf-pod2": [3737.6],
+
+ "lf-pod1": [3702.7],
+
+ "ericsson-pod1": [3131.6],
+
+ "intel-pod18": [3098.1],
+
+ "zte-pod2": [2831.4],
+
+ "ericsson-virtual2": [2559.7],
+
+ "huawei-pod2": [2528.9],
+
+ "ericsson-virtual4": [2527.8],
+
+ "huawei-virtual3": [2379.1],
+
+ "huawei-virtual4": [2362.1],
+
+ "huawei-virtual2": [2299.3],
+
+ "huawei-pod12": [2229],
+
+ "huawei-virtual1": [2171.3],
+
+ "ericsson-virtual3": [2104.8],
+
+ "arm-pod5": [1764.2],
+
+ "arm-pod6": [715.4]
+
+}
diff --git a/docs/release/results/tc069-memory-write-bandwidth.rst b/docs/release/results/tc069-memory-write-bandwidth.rst
index 06e2ec922..4cd3be3b0 100644
--- a/docs/release/results/tc069-memory-write-bandwidth.rst
+++ b/docs/release/results/tc069-memory-write-bandwidth.rst
@@ -298,3 +298,219 @@ The influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [18382.49],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [16774.52],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [16680.305],
+
+ "os-nosdn-ovs-ha:arm-pod6:fuel": [11925.22],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [11895.71],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [11880.7],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [9471.095],
+
+ "os-odl-nofeature-ha:zte-pod2:daisy": [9375.33],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [9372.95],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [9174.36],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [9051.57],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [8894.74],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [8857.23],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [8855.8],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [8840.94],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [8826.23],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [8039.48],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [7670.21],
+
+ "os-nosdn-ovs-ha:arm-pod5:fuel": [7590.9],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [7579.625],
+
+ "os-nosdn-bar-noha:huawei-virtual3:compass": [7511.775],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [7475.16],
+
+ "os-nosdn-bar-noha:huawei-virtual4:compass": [7435.08],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [7426.79],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [7362.8],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [7263.45],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [7262.72],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [7241.07],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [7219.21],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [7174.33],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [7170.795],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [7158.335],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [7122.45],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [7104.9],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [7044.37],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [7011.075],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [6950.28],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [6918.31],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [6903.11],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [6880.98],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [6863.39],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [6851.54],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [6834.75],
+
+ "os-nosdn-calipso-noha:lf-pod1:apex": [6833.92],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [6814.68],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [6809.44],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [6784.48],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [6737.64],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [6708.61],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [6697.2],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [6626.51],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [6609.57],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [6606.87],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [6547.39],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [6465.48],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [6413],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [6409.075],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [6128.79],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [5835.59],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [5617.12]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc069_scenario_fraser.png
+ :width: 800px
+ :alt: TC069 influence of scenario
+
+{
+
+ "os-nosdn-openbaton-ha": [16774.52],
+
+ "os-odl-nofeature-ha": [9363.69],
+
+ "os-nosdn-nofeature-ha": [8878.01],
+
+ "os-odl_l3-nofeature-ha": [8748.4],
+
+ "os-odl-sfc-ha": [8708.045],
+
+ "os-nosdn-nofeature-noha": [7426.79],
+
+ "os-nosdn-kvm-noha": [7230.79],
+
+ "os-odl-sfc-noha": [7224.11],
+
+ "os-odl-nofeature-noha": [7187.84],
+
+ "os-nosdn-ovs-noha": [7044.37],
+
+ "os-nosdn-bar-ha": [6947.87],
+
+ "os-odl_l3-nofeature-noha": [6895.96],
+
+ "os-nosdn-kvm-ha": [6890.92],
+
+ "os-nosdn-calipso-noha": [6833.92],
+
+ "os-nosdn-ovs-ha": [6833.495],
+
+ "os-nosdn-bar-noha": [6811.66],
+
+ "os-ovn-nofeature-noha": [6809.44]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc069_pod_fraser.png
+ :width: 800px
+ :alt: TC069 influence of the POD
+
+{
+
+ "intel-pod18": [16939.24],
+
+ "arm-pod6": [11895.71],
+
+ "zte-pod2": [9375.33],
+
+ "ericsson-pod1": [9140.42],
+
+ "huawei-pod12": [8993.37],
+
+ "huawei-pod2": [8794.01],
+
+ "huawei-virtual2": [7670.21],
+
+ "arm-pod5": [7479.32],
+
+ "ericsson-virtual2": [7219.21],
+
+ "huawei-virtual4": [7059.045],
+
+ "huawei-virtual3": [7023.57],
+
+ "lf-pod2": [6834.7],
+
+ "lf-pod1": [6775.27],
+
+ "ericsson-virtual4": [6522.86],
+
+ "ericsson-virtual3": [5835.59],
+
+ "huawei-virtual1": [5617.12]
+
+}
diff --git a/docs/release/results/tc082-context-switches-under-load.rst b/docs/release/results/tc082-context-switches-under-load.rst
index d8a9f5493..92bc69907 100644
--- a/docs/release/results/tc082-context-switches-under-load.rst
+++ b/docs/release/results/tc082-context-switches-under-load.rst
@@ -70,8 +70,6 @@ The influence of the scenario
:width: 800px
:alt: TC082 influence of scenario
-the influence of the scenario
-
{
"os-nosdn-nofeature-ha": [505],
@@ -88,8 +86,6 @@ The influence of the POD
:width: 800px
:alt: TC082 influence of the POD
-the influence of the POD
-
{
"huawei-pod12": [316],
@@ -127,3 +123,65 @@ the influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [306.5],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [337.5],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [343.5],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [399],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [454],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [544.5],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [1138],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1305],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [1433],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [1470],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [1738.5]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc082_pod_fraser.png
+ :width: 800px
+ :alt: TC082 influence of the POD
+
+{
+
+ "zte-pod2": [306.5],
+
+ "lf-pod2": [337.5],
+
+ "intel-pod18": [343.5],
+
+ "huawei-pod12": [399],
+
+ "lf-pod1": [454],
+
+ "huawei-pod2": [544.5],
+
+ "huawei-virtual4": [1138],
+
+ "ericsson-pod1": [1305],
+
+ "huawei-virtual3": [1433],
+
+ "huawei-virtual1": [1470],
+
+ "arm-pod6": [1738.5]
+
+}
diff --git a/docs/release/results/tc083-network-throughput-between-vm.rst b/docs/release/results/tc083-network-throughput-between-vm.rst
index f846571a5..0389eaafe 100644
--- a/docs/release/results/tc083-network-throughput-between-vm.rst
+++ b/docs/release/results/tc083-network-throughput-between-vm.rst
@@ -70,8 +70,6 @@ The influence of the scenario
:width: 800px
:alt: TC083 influence of scenario
-the influence of the scenario
-
{
"os-nosdn-nofeature-ha": [1109.12],
@@ -88,8 +86,6 @@ The influence of the POD
:width: 800px
:alt: TC083 influence of the POD
-the influence of the POD
-
{
"lf-pod1": [2204.42],
@@ -127,3 +123,65 @@ the influence of the POD
Fraser release
--------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [1893.39],
+
+ "os-nosdn-nofeature-ha:zte-pod2:daisy": [1543.995],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [1480.86],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [1417.015],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [1028.55],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [1007.65],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [811.795],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [552.95],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [227.655],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [216.63],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [59.255]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc083_pod_fraser.png
+ :width: 800px
+ :alt: TC083 influence of the POD
+
+{
+
+ "lf-pod2": [1893.39],
+
+ "zte-pod2": [1543.995],
+
+ "lf-pod1": [1480.86],
+
+ "intel-pod18": [1417.015],
+
+ "huawei-pod12": [1028.55],
+
+ "huawei-pod2": [1007.65],
+
+ "ericsson-pod1": [811.795],
+
+ "huawei-virtual4": [552.95],
+
+ "arm-pod6": [227.655],
+
+ "huawei-virtual1": [216.63],
+
+ "huawei-virtual3": [59.255]
+
+}
diff --git a/docs/testing/user/userguide/14-nsb-operation.rst b/docs/testing/user/userguide/14-nsb-operation.rst
index 2e741822e..851c6528e 100644
--- a/docs/testing/user/userguide/14-nsb-operation.rst
+++ b/docs/testing/user/userguide/14-nsb-operation.rst
@@ -84,6 +84,116 @@ In this example we have ``TRex xe0 <-> xe0 VNF xe1 <-> xe0 UDP_Replay``
downlink_0:
- xe0
+
+Availability zone
+^^^^^^^^^^^^^^^^^
+
+The configuration of the availability zone is requred in cases where location
+of exact compute host/group of compute hosts needs to be specified for SampleVNF
+or traffic generator in the heat test case. If this is the case, please follow
+the instructions below.
+
+.. _`Create a host aggregate`:
+
+1. Create a host aggregate in the OpenStack and add the available compute hosts
+ into the aggregate group.
+
+ .. note:: Change the ``<AZ_NAME>`` (availability zone name), ``<AGG_NAME>``
+ (host aggregate name) and ``<HOST>`` (host name of one of the compute) in the
+ commands below.
+
+ .. code-block:: bash
+
+ # create host aggregate
+ openstack aggregate create --zone <AZ_NAME> --property availability_zone=<AZ_NAME> <AGG_NAME>
+ # show available hosts
+ openstack compute service list --service nova-compute
+ # add selected host into the host aggregate
+ openstack aggregate add host <AGG_NAME> <HOST>
+
+2. To specify the OpenStack location (the exact compute host or group of the hosts)
+ of SampleVNF or traffic generator in the heat test case, the ``availability_zone`` server
+ configuration option should be used. For example:
+
+ .. note:: The ``<AZ_NAME>`` (availability zone name) should be changed according
+ to the name used during the host aggregate creation steps above.
+
+ .. code-block:: yaml
+
+ context:
+ name: yardstick
+ image: yardstick-samplevnfs
+ ...
+ servers:
+ vnf__0:
+ ...
+ availability_zone: <AZ_NAME>
+ ...
+ tg__0:
+ ...
+ availability_zone: <AZ_NAME>
+ ...
+ networks:
+ ...
+
+There are two example of SampleVNF scale out test case which use the availability zone
+feature to specify the exact location of scaled VNFs and traffic generators.
+
+Those are:
+
+.. code-block:: console
+
+ <repo>/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml
+ <repo>/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml
+
+.. note:: This section describes the PROX scale-out testcase, but the same
+ procedure is used for the vFW test case.
+
+1. Before running the scale-out test case, make sure the host aggregates are
+ configured in the OpenStack environment. To check this, run the following
+ command:
+
+ .. code-block:: console
+
+ # show configured host aggregates (example)
+ openstack aggregate list
+ +----+------+-------------------+
+ | ID | Name | Availability Zone |
+ +----+------+-------------------+
+ | 4 | agg0 | AZ_NAME_0 |
+ | 5 | agg1 | AZ_NAME_1 |
+ +----+------+-------------------+
+
+2. If no host aggregates are configured, please use `steps above`__ to
+ configure them.
+
+__ `Create a host aggregate`_
+
+
+3. Run the SampleVNF PROX scale-out test case, specifying the availability
+ zone of each VNF and traffic generator as a task arguments.
+
+ .. note:: The ``az_0`` and ``az_1`` should be changed according to the host
+ aggregates created in the OpenStack.
+
+ .. code-block:: console
+
+ yardstick -d task start\
+ <repo>/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml\
+ --task-args='{
+ "num_vnfs": 4, "availability_zone": {
+ "vnf_0": "az_0", "tg_0": "az_1",
+ "vnf_1": "az_0", "tg_1": "az_1",
+ "vnf_2": "az_0", "tg_2": "az_1",
+ "vnf_3": "az_0", "tg_3": "az_1"
+ }
+ }'
+
+ ``num_vnfs`` specifies how many VNFs are going to be deployed in the
+ ``heat`` contexts. ``vnf_X`` and ``tg_X`` arguments configure the
+ availability zone where the VNF and traffic generator is going to be deployed.
+
+
Collectd KPIs
-------------
@@ -242,7 +352,7 @@ Baremetal
file: /etc/yardstick/nodes/pod.yaml
Scale-Out
---------------------
+---------
VNFs performance data with scale-out helps
@@ -313,3 +423,39 @@ options section.
options:
tg_0:
queues_per_port: 2
+
+
+Standalone configuration
+------------------------
+
+NSB supports certain Standalone deployment configurations.
+Standalone supports provisioning a VM in a standalone visualised environment using kvm/qemu.
+There two types of Standalone contexts available: OVS-DPDK and SRIOV.
+OVS-DPDK uses OVS network with DPDK drivers.
+SRIOV enables network traffic to bypass the software switch layer of the Hyper-V stack.
+
+Standalone with OVS-DPDK
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+SampleVNF image is spawned in a VM on a baremetal server.
+OVS with DPDK is installed on the baremetal server.
+
+.. note:: Ubuntu 17.10 requires DPDK v.17.05 and higher, DPDK v.17.05 requires OVS v.2.8.0.
+
+Default values for OVS-DPDK:
+
+ * queues: 4
+ * lcore_mask: ""
+ * pmd_cpu_mask: "0x6"
+
+Sample test case file
+^^^^^^^^^^^^^^^^^^^^^
+
+ 1. Prepare SampleVNF image and copy it to ``flavor/images``.
+ 2. Prepare context files for TREX and SampleVNF under ``contexts/file``.
+ 3. Add bridge named ``br-int`` to the baremetal where SampleVNF image is deployed.
+ 4. Modify ``networks/phy_port`` accordingly to the baremetal setup.
+ 5. Run test from:
+
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
+ :language: yaml
diff --git a/nsb_setup.sh b/nsb_setup.sh
index 86796c4d4..3396b82d1 100755
--- a/nsb_setup.sh
+++ b/nsb_setup.sh
@@ -79,4 +79,4 @@ ansible-playbook \
-e img_property="nsb" \
${yardstick_docker_image} \
-e YARD_IMG_ARCH='amd64' ${extra_args}\
- -i yardstick-install-inventory.ini nsb_setup.yml
+ -i install-inventory.ini nsb_setup.yml
diff --git a/samples/vnf_samples/nsut/acl/acl_1rule.yaml b/samples/vnf_samples/nsut/acl/acl_1rule.yaml
index b184a29e2..49066e924 100644
--- a/samples/vnf_samples/nsut/acl/acl_1rule.yaml
+++ b/samples/vnf_samples/nsut/acl/acl_1rule.yaml
@@ -11,37 +11,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-access-list1:
- acl:
- access-list-entries:
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 152.16.40.20/24
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 0.0.0.0/0
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1588
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 0.0.0.0/0
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 152.16.100.20/24
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1589
- acl-name: sample-ipv4-acl
- acl-type: ipv4-acl
+---
+access-list-entries:
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 152.16.40.20/24
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 0.0.0.0/0
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1588
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 0.0.0.0/0
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 152.16.100.20/24
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1589
diff --git a/samples/vnf_samples/nsut/acl/acl_rules.yaml b/samples/vnf_samples/nsut/acl/acl_rules.yaml
index b184a29e2..49066e924 100644
--- a/samples/vnf_samples/nsut/acl/acl_rules.yaml
+++ b/samples/vnf_samples/nsut/acl/acl_rules.yaml
@@ -11,37 +11,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-access-list1:
- acl:
- access-list-entries:
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 152.16.40.20/24
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 0.0.0.0/0
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1588
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 0.0.0.0/0
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 152.16.100.20/24
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1589
- acl-name: sample-ipv4-acl
- acl-type: ipv4-acl
+---
+access-list-entries:
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 152.16.40.20/24
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 0.0.0.0/0
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1588
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 0.0.0.0/0
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 152.16.100.20/24
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1589
diff --git a/samples/vnf_samples/nsut/acl/acl_worstcaserules.yaml b/samples/vnf_samples/nsut/acl/acl_worstcaserules.yaml
index b184a29e2..6f09bb848 100644
--- a/samples/vnf_samples/nsut/acl/acl_worstcaserules.yaml
+++ b/samples/vnf_samples/nsut/acl/acl_worstcaserules.yaml
@@ -11,37 +11,33 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-access-list1:
- acl:
- access-list-entries:
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 152.16.40.20/24
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 0.0.0.0/0
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1588
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 0.0.0.0/0
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 152.16.100.20/24
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1589
- acl-name: sample-ipv4-acl
- acl-type: ipv4-acl
+---
+access-list-entries:
+ -
+ ace-oper-data:
+ match-counter: 0
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 152.16.40.20/24
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 0.0.0.0/0
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1588
+ -
+ ace-oper-data:
+ match-counter: 0
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 0.0.0.0/0
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 152.16.100.20/24
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1589
diff --git a/samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
index 134b15fb0..00bd186ee 100644
--- a/samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -42,10 +42,10 @@ scenarios:
contexts:
- name: yardstick
type: Node
- file: /etc/yardstick/nodes/standalone/pod_trex.yaml
+ file: etc/yardstick/nodes/standalone/pod_trex.yaml
- type: StandaloneOvsDpdk
name: yardstick
- file: /etc/yardstick/nodes/standalone/pod_ovs.yaml
+ file: etc/yardstick/nodes/standalone/host_ovs.yaml
vm_deploy: True
ovs_properties:
version:
@@ -56,10 +56,12 @@ contexts:
socket_0: 2048
socket_1: 2048
queues: 4
+ lcore_mask: ""
+ pmd_cpu_mask: "0x6"
vpath: "/usr/local"
flavor:
- images: "/var/lib/libvirt/images/ubuntu.qcow2"
+ images: "/var/lib/libvirt/images/yardstick-nsb-image.img"
ram: 4096
extra_specs:
hw:cpu_sockets: 1
diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
index b4b003680..4b586b4c9 100644
--- a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
+++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
@@ -38,32 +38,34 @@ nsd:nsd-catalog:
vnfd-id-ref: vnf__0
- id: downlink_0
- name: vnf__0 to tg__0 link 1
+ name: vnf__0 to tg__0 link 2
type: ELAN
vnfd-connection-point-ref:
- - member-vnf-index-ref: '1'
+ - member-vnf-index-ref: '2'
vnfd-connection-point-ref: xe1
vnfd-id-ref: vnf__0
- - member-vnf-index-ref: '2'
+ - member-vnf-index-ref: '1'
vnfd-connection-point-ref: xe1
vnfd-id-ref: tg__0
- - id: downlink_1
- name: vnf__0 to tg__0 link 2
+
+ - id: uplink_1
+ name: tg__0 to vnf__0 link 3
type: ELAN
vnfd-connection-point-ref:
- member-vnf-index-ref: '1'
vnfd-connection-point-ref: xe2
- vnfd-id-ref: vnf__0
+ vnfd-id-ref: tg__0
- member-vnf-index-ref: '2'
vnfd-connection-point-ref: xe2
- vnfd-id-ref: tg__0
- - id: downlink_2
- name: vnf__0 to tg__0 link 3
+ vnfd-id-ref: vnf__0
+
+ - id: downlink_1
+ name: vnf__0 to tg__0 link 4
type: ELAN
vnfd-connection-point-ref:
- - member-vnf-index-ref: '1'
+ - member-vnf-index-ref: '2'
vnfd-connection-point-ref: xe3
vnfd-id-ref: vnf__0
- - member-vnf-index-ref: '2'
+ - member-vnf-index-ref: '1'
vnfd-connection-point-ref: xe3
vnfd-id-ref: tg__0
diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml
new file mode 100644
index 000000000..5f01ecb7e
--- /dev/null
+++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-scale-out.yaml
@@ -0,0 +1,53 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %}
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: prox-tg-topology
+ name: prox-tg-topology
+ short-name: prox-tg-topology
+ description: prox-tg-topology
+ constituent-vnfd:
+{% for vnf_num in range(num_vnfs|int) %}
+ - member-vnf-index: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-id-ref: tg__{{ vnf_num }}
+ VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
+ - member-vnf-index: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ VNF model: ../../vnf_descriptors/prox_vnf.yaml
+{% endfor %}
+ vld:
+{% for vnf_num in range(num_vnfs|int) %}
+ - id: uplink_{{ vnf_num }}
+ name: tg__{{ vnf_num }} to vnf__{{ vnf_num }} link {{ (vnf_num * 2) + 1 }}
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: tg__{{ vnf_num }}
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ - id: downlink_{{ vnf_num }}
+ name: vnf__{{ vnf_num }} to tg__{{ vnf_num }} link {{ (vnf_num * 2) + 2 }}
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: tg__{{ vnf_num }}
+{% endfor %}
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_l2fwd-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_l2fwd-4.yaml
index ab067a836..84edcd47d 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_l2fwd-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_l2fwd-4.yaml
@@ -42,9 +42,15 @@ scenarios:
"-t": ""
runner:
- type: Duration
+ type: ProxDuration
+ # sampling interval
+ interval: 1
+ # sampled : yes OR sampled: no (DEFAULT yes)
+ sampled: yes
# we kill after duration, independent of test duration, so set this high
- duration: 300
+ duration: 3100
+ # Confirmation attempts
+ confirmation: 1
context:
type: Node
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-4.yaml
index ece618fa1..eaa940185 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-4.yaml
@@ -91,12 +91,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-scale-up.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-scale-up.yaml
index 5e4ced1e5..0a079275f 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-scale-up.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_acl-scale-up.yaml
@@ -88,14 +88,15 @@ context:
networks:
mgmt:
cidr: '10.0.1.0/24'
- uplink_0:
- cidr: '10.0.2.0/24'
+{% for vport in range(1,vports,2|int) %}
+ uplink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+1 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
-{% for vport in range(vports-1|int) %}
- downlink_{{ vport }}:
- cidr: '10.0.{{ vport+3 }}.0/24'
+
+ downlink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+2 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
index 67ad5e278..e2092537b 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
@@ -52,7 +52,7 @@ context:
image: yardstick-samplevnfs
user: ubuntu
flavor:
- vcpus: 18
+ vcpus: 18
ram: 20480
disk: 6
extra_specs:
@@ -85,12 +85,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
index 0578bf5f7..b043dd08a 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
@@ -86,12 +86,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd-4.yaml
index a0bc7ef16..55f794325 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd-4.yaml
@@ -81,12 +81,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml
new file mode 100644
index 000000000..88581d2da
--- /dev/null
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-2-scale-out.yaml
@@ -0,0 +1,113 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+{% set num_vnfs = num_vnfs or 1 %}
+{% set availability_zone = availability_zone or {} %}
+---
+schema: "yardstick:task:0.1"
+scenarios:
+-
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/prox_binsearch.yaml
+ topology: prox-tg-topology-scale-out.yaml
+ extra_args:
+ num_vnfs: {{ num_vnfs }}
+
+ nodes:
+{% for vnf_num in range(num_vnfs|int) %}
+ tg__{{ vnf_num }}: tg_{{ vnf_num }}.yardstick
+ vnf__{{ vnf_num }}: vnf_{{ vnf_num }}.yardstick
+{% endfor %}
+
+ options:
+{% for vnf_num in range(num_vnfs|int) %}
+ vnf__{{ vnf_num }}:
+ prox_path: /opt/nsb_bin/prox
+ prox_config: "configs/handle_l2fwd_multiflow-2.cfg"
+ prox_args:
+ "-t": ""
+
+ tg__{{ vnf_num }}:
+ prox_path: /opt/nsb_bin/prox
+ prox_config: "configs/gen_l2fwd_multiflow-2.cfg"
+ prox_args:
+ "-e": ""
+ "-t": ""
+{% endfor %}
+
+ runner:
+ type: Duration
+ # we kill after duration, independent of test duration, so set this high
+ duration: 300
+
+context:
+ name: yardstick
+ image: yardstick-samplevnfs
+ user: ubuntu
+ flavor:
+ vcpus: 8
+ ram: 20480
+ disk: 10
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 8
+ hw:cpu_threads: 1
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+{% for vnf_num in range(num_vnfs|int) %}
+ vnf_{{ vnf_num }}:
+ floating_ip: true
+ placement: "pgrp1"
+ {% if 'vnf_%s'|format(vnf_num) in availability_zone %}
+ availability_zone: "{{ availability_zone['vnf_%s'|format(vnf_num)] }}"
+ {% endif %}
+ network_ports:
+ mgmt:
+ - mgmt
+ uplink_{{ vnf_num }}:
+ - xe0
+ downlink_{{ vnf_num }}:
+ - xe1
+ tg_{{ vnf_num }}:
+ floating_ip: true
+ placement: "pgrp1"
+ {% if 'tg_%s'|format(vnf_num) in availability_zone %}
+ availability_zone: "{{ availability_zone['tg_%s'|format(vnf_num)] }}"
+ {% endif %}
+ network_ports:
+ mgmt:
+ - mgmt
+ uplink_{{ vnf_num }}:
+ - xe0
+ downlink_{{ vnf_num }}:
+ - xe1
+{% endfor %}
+
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+{% for vnf_num in range(num_vnfs|int) %}
+ uplink_{{ vnf_num }}:
+ cidr: '10.0.{{ (vnf_num * 2) + 2 }}.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ downlink_{{ vnf_num }}:
+ cidr: '10.0.{{ (vnf_num * 2) + 3 }}.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+{% endfor %}
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-4.yaml
index 321b17373..4a20fad84 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_multiflow-4.yaml
@@ -81,12 +81,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_pktTouch-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_pktTouch-4.yaml
index 05723cab3..a3ac21a59 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_pktTouch-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l2fwd_pktTouch-4.yaml
@@ -81,12 +81,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-4.yaml
index db18949fc..7d7d2b02a 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-4.yaml
@@ -85,12 +85,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-scale-up.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-scale-up.yaml
index 3462d288e..52fe8fcf4 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-scale-up.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_l3fwd-scale-up.yaml
@@ -81,14 +81,15 @@ context:
networks:
mgmt:
cidr: '10.0.1.0/24'
- uplink_0:
- cidr: '10.0.2.0/24'
+{% for vport in range(1,vports,2|int) %}
+ uplink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+1 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
-{% for vport in range(vports-1|int) %}
- downlink_{{ vport }}:
- cidr: '10.0.{{ vport+3 }}.0/24'
+
+ downlink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+2 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_lb-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_lb-4.yaml
index 736f1c4c1..c34b4d99a 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_lb-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_lb-4.yaml
@@ -84,12 +84,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-4.yaml
index d9cc4bc57..24f7ce4b7 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-4.yaml
@@ -82,12 +82,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-scale-up.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-scale-up.yaml
index ef2894604..6370360e4 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-scale-up.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_mpls_tagging-scale-up.yaml
@@ -79,14 +79,15 @@ context:
networks:
mgmt:
cidr: '10.0.1.0/24'
- uplink_0:
- cidr: '10.0.2.0/24'
+{% for vport in range(1,vports,2|int) %}
+ uplink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+1 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
-{% for vport in range(vports-1|int) %}
- downlink_{{ vport }}:
- cidr: '10.0.{{ vport+3 }}.0/24'
+
+ downlink_{{ loop.index0 }}:
+ cidr: '10.0.{{ vport+2 }}.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_vpe-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_vpe-4.yaml
index 4e2e70bb4..1ffd2dcc5 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_vpe-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_vpe-4.yaml
@@ -89,12 +89,12 @@ context:
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_1:
+ uplink_1:
cidr: '10.0.4.0/24'
gateway_ip: 'null'
port_security_enabled: False
enable_dhcp: 'false'
- downlink_2:
+ downlink_1:
cidr: '10.0.5.0/24'
gateway_ip: 'null'
port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/vfw/acl_1rule.yaml b/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
index 6753645ba..f7569b32c 100644
--- a/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
+++ b/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
@@ -11,37 +11,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-access-list1:
- acl:
- access-list-entries:
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 152.16.0.0/24
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 0.0.0.0/0
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1588
- - ace:
- ace-oper-data:
- match-counter: 0
- actions: drop,count
- matches:
- destination-ipv4-network: 0.0.0.0/0
- destination-port-range:
- lower-port: 0
- upper-port: 65535
- source-ipv4-network: 152.16.0.0/24
- source-port-range:
- lower-port: 0
- upper-port: 65535
- rule-name: rule1589
- acl-name: sample-ipv4-acl
- acl-type: ipv4-acl
+---
+access-list-entries:
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 152.16.0.0/24
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 0.0.0.0/0
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1588
+ -
+ actions: [drop,count]
+ matches:
+ destination-ipv4-network: 0.0.0.0/0
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 152.16.0.0/24
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1589
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml
new file mode 100644
index 000000000..f1408931f
--- /dev/null
+++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_out.yaml
@@ -0,0 +1,114 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+{% set num_vnfs = num_vnfs or 1 %}
+{% set availability_zone = availability_zone or {} %}
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput_scale_out.yaml
+ topology: vfw_tg_topology_scale_out.yaml
+ extra_args:
+ num_vnfs: {{ num_vnfs }}
+ nodes:
+{% for vnf_num in range(num_vnfs|int) %}
+ tg__{{ vnf_num }}: tg_{{ vnf_num }}.yardstick
+ vnf__{{ vnf_num }}: vnf_{{ vnf_num }}.yardstick
+{% endfor %}
+ options:
+ framesize:
+ uplink: {64B: 100}
+ downlink: {64B: 100}
+ flow:
+ src_ip:
+{% for vnf_num in range(num_vnfs|int) %}
+ - {'tg__{{ vnf_num }}': 'xe0'}
+{% endfor %}
+ dst_ip:
+{% for vnf_num in range(num_vnfs|int) %}
+ - {'tg__{{ vnf_num }}': 'xe1'}
+{% endfor %}
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+{% for vnf_num in range(num_vnfs|int) %}
+ vnf__{{ vnf_num }}:
+ rules: acl_1rule.yaml
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+{% endfor %}
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 35
+context:
+ # put node context first, so we don't HEAT deploy if node has errors
+ name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 6
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+{% for vnf_num in range(num_vnfs|int) %}
+ vnf_{{ vnf_num }}:
+ floating_ip: true
+ placement: "pgrp1"
+ {% if 'vnf_%s'|format(vnf_num) in availability_zone %}
+ availability_zone: "{{ availability_zone['vnf_%s'|format(vnf_num)] }}"
+ {% endif %}
+ network_ports:
+ mgmt:
+ - mgmt
+ uplink_{{ vnf_num }}:
+ - xe0
+ downlink_{{ vnf_num }}:
+ - xe1
+ tg_{{ vnf_num }}:
+ floating_ip: true
+ placement: "pgrp1"
+ {% if 'tg_%s'|format(vnf_num) in availability_zone %}
+ availability_zone: "{{ availability_zone['tg_%s'|format(vnf_num)] }}"
+ {% endif %}
+ network_ports:
+ mgmt:
+ - mgmt
+ uplink_{{ vnf_num }}:
+ - xe0
+ downlink_{{ vnf_num }}:
+ - xe1
+{% endfor %}
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+{% for vnf_num in range(num_vnfs|int) %}
+ uplink_{{ vnf_num }}:
+ cidr: '10.0.{{ (vnf_num * 2) + 2 }}.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ downlink_{{ vnf_num }}:
+ cidr: '10.0.{{ (vnf_num * 2) + 3 }}.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+{% endfor %}
diff --git a/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml b/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml
new file mode 100644
index 000000000..8bd01b7f2
--- /dev/null
+++ b/samples/vnf_samples/nsut/vfw/vfw_tg_topology_scale_out.yaml
@@ -0,0 +1,53 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %}
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: 3tg-topology
+ name: 3tg-topology
+ short-name: 3tg-topology
+ description: 3tg-topology
+ constituent-vnfd:
+{% for vnf_num in range(num_vnfs|int) %}
+ - member-vnf-index: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-id-ref: tg__{{ vnf_num }}
+ VNF model: ../../vnf_descriptors/tg_rfc2544_tpl.yaml
+ - member-vnf-index: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ VNF model: ../../vnf_descriptors/vfw_vnf.yaml
+{% endfor %}
+ vld:
+{% for vnf_num in range(num_vnfs|int) %}
+ - id: uplink_{{ vnf_num }}
+ name: tg__{{ vnf_num }} to vnf__{{ vnf_num }} link {{ (vnf_num * 2) + 1 }}
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: tg__{{ vnf_num }}
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ - id: downlink_{{ vnf_num }}
+ name: vnf__{{ vnf_num }} to tg__{{ vnf_num }} link {{ (vnf_num * 2) + 2 }}
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 2 }}'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: vnf__{{ vnf_num }}
+ - member-vnf-index-ref: '{{ (vnf_num * 2) + 1 }}'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: tg__{{ vnf_num }}
+{% endfor %}
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
index f862abdb7..98b1bf96d 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
index a3218879b..ee0415371 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
index d849ed8ab..19f083646 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
index c03b28d60..95fa0b6d8 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-ixia-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-ixia-scale-out.yaml
index ad703f291..43f52094a 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-ixia-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-ixia-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-scale-out.yaml
index 75927d40d..a025a6931 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-cgnapt-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-correlated-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-correlated-scale-out.yaml
index 500163205..081d630ac 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-correlated-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-correlated-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-scale-out.yaml
index 78e5f516a..d93bf1145 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-ixia-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-out.yaml
index 73c41099f..0e842d48a 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml
index d2cc18c15..b9e0c8cd1 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml
@@ -44,7 +44,8 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
+ duration: {{ duration }}
+
{% set count = 0 %}
{% for vport in range(vports|int) %}
uplink_{{vport}}:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
index 5b5b4473b..c267e7677 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
@@ -42,8 +42,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-10.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-10.yaml
index 80d0872d5..1ff47a5b8 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-10.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-10.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-2.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-2.yaml
index d6c9164a0..2b3e6f3d1 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-2.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-2.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-4.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-4.yaml
index 55610b048..7df0682ed 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-4.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-4.yaml
@@ -43,8 +43,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-scale-out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-scale-out.yaml
index d455bccea..82c487e1b 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-scale-out.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt-scale-out.yaml
@@ -44,8 +44,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
[% for vnf_num in range(num_vnfs|int) %]
uplink_[[ vnf_num ]]:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
index 61cbd4e4e..809415a00 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
@@ -42,8 +42,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml
new file mode 100644
index 000000000..71e9e817b
--- /dev/null
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_scale_out.yaml
@@ -0,0 +1,102 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# flow definition for ACL tests - 1K flows - ipv4 only
+#
+# the number of flows defines the widest range of parameters
+# for example if srcip_range=1.0.0.1-1.0.0.255 and dst_ip_range=10.0.0.1-10.0.1.255
+# and it should define only 16 flows
+#
+#there is assumption that packets generated will have a random sequences of following addresses pairs
+# in the packets
+# 1. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512)
+# 2. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512)
+# ...
+# 512. src=1.x.x.x(x.x.x =random from 1..255) dst=10.x.x.x (random from 1..512)
+#
+# not all combination should be filled
+# Any other field with random range will be added to flow definition
+#
+# the example.yaml provides all possibilities for traffic generation
+#
+# the profile defines a public and private side to make limited traffic correlation
+# between private and public side same way as it is made by IXIA solution.
+#
+{% set num_vnfs = get(extra_args, 'num_vnfs', 1) %}
+---
+schema: "nsb:traffic_profile:0.1"
+name: rfc2544
+description: Traffic profile to run RFC2544 latency
+traffic_profile:
+ traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
+ frame_rate : 100 # pc of linerate
+ # that specifies a range (e.g. ipv4 address, port)
+{% for vnf_num in range(num_vnfs|int) %}
+uplink_{{ vnf_num }}:
+ ipv4:
+ id: {{ (vnf_num * 2) + 1 }}
+ outer_l2:
+ framesize:
+ 64B: "{{get(imix, 'imix.uplink.64B', '0') }}"
+ 128B: "{{get(imix, 'imix.uplink.128B', '0') }}"
+ 256B: "{{get(imix, 'imix.uplink.256B', '0') }}"
+ 373B: "{{get(imix, 'imix.uplink.373B', '0') }}"
+ 512B: "{{get(imix, 'imix.uplink.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.uplink.570B', '0') }}"
+ 1024B: "{{get(imix, 'imix.uplink.1024B', '0') }}"
+ 1280B: "{{get(imix, 'imix.uplink.1280B', '0') }}"
+ 1400B: "{{get(imix, 'imix.uplink.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.uplink.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.uplink.1518B', '0') }}"
+
+ outer_l3v4:
+ proto: "udp"
+ srcip4: "{{get(flow, 'flow.src_ip_{{ vnf_num }}', '10.0.2.1-10.0.2.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip_{{ vnf_num }}', '10.0.3.1-10.0.3.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ ttl: 32
+ dscp: 0
+ outer_l4:
+ srcport: "{{get(flow, 'flow.src_port_{{ vnf_num }}', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.dst_port_{{ vnf_num }}', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+downlink_{{ vnf_num }}:
+ ipv4:
+ id: {{ (vnf_num * 2) + 2 }}
+ outer_l2:
+ framesize:
+ 64B: "{{ get(imix, 'imix.downlink.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.downlink.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.downlink.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.downlink.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.downlink.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.downlink.570B', '0') }}"
+ 1024B: "{{get(imix, 'imix.downlink.1024B', '0') }}"
+ 1280B: "{{get(imix, 'imix.downlink.1280B', '0') }}"
+ 1400B: "{{get(imix, 'imix.downlink.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.downlink.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.downlink.1518B', '0') }}"
+
+ outer_l3v4:
+ proto: "udp"
+ srcip4: "{{get(flow, 'flow.dst_ip_{{ vnf_num }}', '10.0.3.1-10.0.3.255') }}"
+ dstip4: "{{get(flow, 'flow.src_ip_{{ vnf_num }}', '10.0.2.1-10.0.2.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ ttl: 32
+ dscp: 0
+ outer_l4:
+ srcport: "{{get(flow, 'flow.dst_port_{{ vnf_num }}', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.src_port_{{ vnf_num }}', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+{% endfor %}
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
index 20bc6568d..e113c9de2 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
@@ -42,8 +42,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index 7b66d663b..b34672907 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -28,8 +28,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 8fdfe9b2f..513aefb40 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -28,8 +28,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
-
+ duration: {{ duration }}
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index 4d73b8ffe..aad751549 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -42,7 +42,7 @@ description: Traffic profile to run RFC2544 latency
traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
- # that specifies a range (e.g. ipv4 address, port)
+ injection_time: {{ injection_time }}
uplink_0:
ipv4:
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc001.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc001.yaml
index 4faa0bc5a..8bfe44693 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc001.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc001.yaml
@@ -62,7 +62,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
index 7f8c22943..596db25bb 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
@@ -59,7 +59,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
index 101c4210e..756c14d86 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
@@ -60,7 +60,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
index fe244e81c..cf124360e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
@@ -64,7 +64,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
index 22e576015..c905302e2 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
@@ -78,13 +78,10 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
segmentation_id: {{segmentation_id}}
{% endif %}
{% endif %}
- #test-sriov:
- #cidr: '10.0.1.0/24'
- #provider: "sriov"
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml
index 3c5f72d5a..cefa50541 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml
@@ -64,7 +64,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml
index cf9706847..e84e1135c 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml
@@ -49,7 +49,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc011.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc011.yaml
index ee36c6c82..998002249 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc011.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc011.yaml
@@ -61,7 +61,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc012.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc012.yaml
index b8b208f12..a1058bc3b 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc012.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc012.yaml
@@ -50,7 +50,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
index bd0fe3627..78c9e850c 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
@@ -43,7 +43,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc023.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc023.yaml
index 7136a9e7e..c2c176b87 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc023.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc023.yaml
@@ -181,7 +181,7 @@ contexts:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
index 3622b40d7..a6cf6a292 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
@@ -95,7 +95,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
index 59fb95d07..2250ae589 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
@@ -95,7 +95,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc069.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc069.yaml
index 2a4082310..3bfaff961 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc069.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc069.yaml
@@ -49,7 +49,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml
index 7ea10d8a4..8cbd24747 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc070.yaml
@@ -97,7 +97,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml
index b6a944bbb..c1ce2c556 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc071.yaml
@@ -95,7 +95,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml
index 09930d442..d237fa92a 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc072.yaml
@@ -97,7 +97,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc076.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc076.yaml
index 8c0edac83..c5e799c99 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc076.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc076.yaml
@@ -58,7 +58,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
index 9c15acc9c..04a9225ea 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
@@ -45,7 +45,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc082.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc082.yaml
index 4b67f0f20..85b6c91db 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc082.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc082.yaml
@@ -55,7 +55,7 @@ context:
networks:
test:
cidr: "10.0.1.0/24"
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc083.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc083.yaml
index 6315fdc07..158f064cd 100755
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc083.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc083.yaml
@@ -81,7 +81,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc084.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc084.yaml
index 472aabe07..9df16578a 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc084.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc084.yaml
@@ -58,7 +58,7 @@ context:
networks:
test:
cidr: '10.0.1.0/24'
- {% if provider == "vlan" %}
+ {% if provider == "vlan" or provider == "sriov" %}
provider: {{provider}}
physical_network: {{physical_network}}
{% if segmentation_id %}
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
deleted file mode 100644
index 95b2b8a4e..000000000
--- a/tests/unit/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-import mock
-
-from yardstick import tests
-
-# NOTE(ralonsoh): to be removed. Replace all occurrences of
-# tests.unit.STL_MOCKS with yardstick.tests.STL_MOCKS
-STL_MOCKS = tests.STL_MOCKS
-
-mock_stl = mock.patch.dict(sys.modules, tests.STL_MOCKS)
-mock_stl.start()
diff --git a/tests/unit/network_services/__init__.py b/tests/unit/network_services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/tests/unit/network_services/__init__.py
+++ /dev/null
diff --git a/tools/cover.sh b/tools/cover.sh
index c6e928d1a..4e54a64b7 100644
--- a/tools/cover.sh
+++ b/tools/cover.sh
@@ -44,14 +44,12 @@ run_coverage_test() {
baseline_report=$(mktemp -t yardstick_coverageXXXXXXX)
find . -type f -name "*.pyc" -delete
+ coverage erase
- # Temporarily run tests from two directories, until all tests have moved
- coverage run -p -m unittest discover ./tests/unit
coverage run -p -m unittest discover ./yardstick/tests/unit
coverage combine
- # Temporarily omit yardstick/tests from the report
- coverage report --omit=yardstick/tests/*/* > ${baseline_report}
+ coverage report > ${baseline_report}
coverage erase
# debug awk
@@ -72,13 +70,10 @@ run_coverage_test() {
find . -type f -name "*.pyc" -delete
- # Temporarily run tests from two directories, until all tests have moved
- coverage run -p -m unittest discover ./tests/unit
coverage run -p -m unittest discover ./yardstick/tests/unit
coverage combine
- # Temporarily omit yardstick/tests from the report
- coverage report --omit=yardstick/tests/*/* > ${current_report}
+ coverage report > ${current_report}
coverage erase
rm -rf cover-$PY_VER
diff --git a/tools/run_tests.sh b/tools/run_tests.sh
index 32c4f19e4..49f628eec 100755
--- a/tools/run_tests.sh
+++ b/tools/run_tests.sh
@@ -28,17 +28,8 @@ run_tests() {
echo "Running unittest ... "
if [ $FILE_OPTION == "f" ]; then
- python -m unittest discover -v -s tests/unit > $logfile 2>&1
- if [ $? -ne 0 ]; then
- echo "FAILED, results in $logfile"
- exit 1
- fi
- python -m unittest discover -v -s yardstick/tests/unit >> $logfile 2>&1
+ python -m unittest discover -v -s yardstick/tests/unit > $logfile 2>&1
else
- python -m unittest discover -v -s tests/unit
- if [ $? -ne 0 ]; then
- exit 1
- fi
python -m unittest discover -v -s yardstick/tests/unit
fi
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index 692c16892..022e365e4 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -8,10 +8,13 @@
##############################################################################
import abc
+import errno
import six
+import os
from yardstick.common import constants
from yardstick.common import utils
+from yardstick.common.constants import YARDSTICK_ROOT_PATH
class Flags(object):
@@ -45,20 +48,13 @@ class Context(object):
list = []
SHORT_TASK_ID_LEN = 8
- @staticmethod
- def split_name(name, sep='.'):
- try:
- name_iter = iter(name.split(sep))
- except AttributeError:
- # name is not a string
- return None, None
- return next(name_iter), next(name_iter, None)
-
- def __init__(self):
+ def __init__(self, host_name_separator='.'):
Context.list.append(self)
self._flags = Flags()
self._name = None
self._task_id = None
+ self.file_path = None
+ self._host_name_separator = host_name_separator
def init(self, attrs):
"""Initiate context"""
@@ -68,6 +64,32 @@ class Context(object):
self._name_task_id = '{}-{}'.format(
self._name, self._task_id[:self.SHORT_TASK_ID_LEN])
+ def split_host_name(self, name):
+ if (isinstance(name, six.string_types)
+ and self._host_name_separator in name):
+ return tuple(name.split(self._host_name_separator, 1))
+ return None, None
+
+ def read_pod_file(self, attrs):
+ self.file_path = file_path = attrs.get("file", "pod.yaml")
+ try:
+ cfg = utils.read_yaml_file(self.file_path)
+ except IOError as io_error:
+ if io_error.errno != errno.ENOENT:
+ raise
+
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+ cfg = utils.read_yaml_file(self.file_path)
+
+ self.nodes.extend(cfg["nodes"])
+ self.controllers.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Controller"])
+ self.computes.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Compute"])
+ self.baremetals.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Baremetal"])
+ return cfg
+
@property
def name(self):
if self._flags.no_setup or self._flags.no_teardown:
@@ -79,6 +101,10 @@ class Context(object):
def assigned_name(self):
return self._name
+ @property
+ def host_name_separator(self):
+ return self._host_name_separator
+
@staticmethod
def get_cls(context_type):
"""Return class of specified type."""
@@ -129,6 +155,25 @@ class Context(object):
attr_name)
@staticmethod
+ def get_physical_nodes():
+ """return physical node names for all contexts"""
+ physical_nodes = {}
+ for context in Context.list:
+ nodes = context._get_physical_nodes()
+ physical_nodes.update({context._name: nodes})
+
+ return physical_nodes
+
+ @staticmethod
+ def get_physical_node_from_server(server_name):
+ """return physical nodes for all contexts"""
+ context = Context.get_context_from_server(server_name)
+ if context == None:
+ return None
+
+ return context._get_physical_node_for_server(server_name)
+
+ @staticmethod
def get_context_from_server(attr_name):
"""lookup context info by name from node config
attr_name: either a name of the node created by yardstick or a dict
@@ -157,3 +202,15 @@ class Context(object):
except StopIteration:
raise ValueError("context not found for server %r" %
attr_name)
+
+ @abc.abstractmethod
+ def _get_physical_nodes(self):
+ """return the list of physical nodes in context"""
+
+ @abc.abstractmethod
+ def _get_physical_node_for_server(self, server_name):
+ """ Find physical node for given server
+
+ :param server_name: (string) Server name in scenario
+ :return string: <node_name>.<context_name>
+ """
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index a9e4564fe..36e8854e8 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -32,3 +32,9 @@ class DummyContext(Context):
def _get_network(self, attr_name):
return None
+
+ def _get_physical_nodes(self):
+ return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 82861829e..ac85b6ffe 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -29,6 +29,7 @@ from yardstick.common import constants as consts
from yardstick.common import utils
from yardstick.common.utils import source_env
from yardstick.ssh import SSH
+from yardstick.common import openstack_utils
LOG = logging.getLogger(__name__)
@@ -68,6 +69,12 @@ class HeatContext(Context):
self.shade_client = None
self.heat_timeout = None
self.key_filename = None
+ self.shade_client = None
+ self.operator_client = None
+ self.nodes = []
+ self.controllers = []
+ self.computes = []
+ self.baremetals = []
super(HeatContext, self).__init__()
@staticmethod
@@ -96,6 +103,14 @@ class HeatContext(Context):
self.template_file = attrs.get("heat_template")
+ self.shade_client = openstack_utils.get_shade_client()
+ self.operator_client = openstack_utils.get_shade_operator_client()
+
+ try:
+ self.read_pod_file(attrs)
+ except IOError:
+ LOG.warning("No pod file specified. NVFi metrics will be disabled")
+
self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
if self.template_file:
self.heat_parameters = attrs.get("heat_parameters")
@@ -465,7 +480,7 @@ class HeatContext(Context):
with attribute name mapping when using external heat templates
"""
if isinstance(attr_name, collections.Mapping):
- node_name, cname = self.split_name(attr_name['name'])
+ node_name, cname = self.split_host_name(attr_name['name'])
if cname is None or cname != self.name:
return None
@@ -528,3 +543,30 @@ class HeatContext(Context):
"physical_network": network.physical_network,
}
return result
+
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s.name == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ server = openstack_utils.get_server(self.shade_client,
+ name_or_id=server_name)
+
+ if server:
+ server = server.toDict()
+ list_hypervisors = self.operator_client.list_hypervisors()
+
+ for hypervisor in list_hypervisors:
+ if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
+ for node in self.nodes:
+ if node['ip'] == hypervisor.host_ip:
+ return "{}.{}".format(node['name'], self._name)
+
+ return None
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 6b00c0374..916f4b12f 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -33,8 +33,7 @@ class KubernetesContext(Context):
self.key_path = ''
self.public_key_path = ''
self.template = None
-
- super(KubernetesContext, self).__init__()
+ super(KubernetesContext, self).__init__(host_name_separator='-')
def init(self, attrs):
super(KubernetesContext, self).init(attrs)
@@ -151,3 +150,9 @@ class KubernetesContext(Context):
def _get_network(self, attr_name):
return None
+
+ def _get_physical_nodes(self):
+ return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index fa619a9aa..d3af98920 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -8,7 +8,6 @@
##############################################################################
from __future__ import absolute_import
-import errno
import subprocess
import os
import collections
@@ -22,7 +21,7 @@ from yardstick import ssh
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
from yardstick.common.ansible_common import AnsibleCommon
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common.exceptions import ContextUpdateCollectdForNodeError
LOG = logging.getLogger(__name__)
@@ -49,40 +48,11 @@ class NodeContext(Context):
}
super(NodeContext, self).__init__()
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(NodeContext, self).init(attrs)
- self.file_path = file_path = attrs.get("file", "pod.yaml")
-
- try:
- cfg = self.read_config_file()
- except IOError as io_error:
- if io_error.errno != errno.ENOENT:
- raise
-
- self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
- cfg = self.read_config_file()
-
- self.nodes.extend(cfg["nodes"])
- self.controllers.extend([node for node in cfg["nodes"]
- if node.get("role") == "Controller"])
- self.computes.extend([node for node in cfg["nodes"]
- if node.get("role") == "Compute"])
- self.baremetals.extend([node for node in cfg["nodes"]
- if node.get("role") == "Baremetal"])
- LOG.debug("Nodes: %r", self.nodes)
- LOG.debug("Controllers: %r", self.controllers)
- LOG.debug("Computes: %r", self.computes)
- LOG.debug("BareMetals: %r", self.baremetals)
+ cfg = self.read_pod_file(attrs)
self.env = attrs.get('env', {})
self.attrs = attrs
@@ -135,11 +105,37 @@ class NodeContext(Context):
playbook = os.path.join(ANSIBLE_DIR, playbook)
return playbook
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+
+ node_name, context_name = self.split_host_name(server_name)
+
+ if context_name is None or self.name != context_name:
+ return None
+
+ for n in (n for n in self.nodes if n["name"] == node_name):
+ return "{}.{}".format(n["name"], self._name)
+
+ return None
+
+ def update_collectd_options_for_node(self, options, attr_name):
+ node_name, _ = self.split_host_name(attr_name)
+
+ matching_nodes = (n for n in self.nodes if n["name"] == node_name)
+ try:
+ node = next(matching_nodes)
+ except StopIteration:
+ raise ContextUpdateCollectdForNodeError(attr_name=attr_name)
+
+ node["collectd"] = options
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index 320c61c92..764cde3f7 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -26,7 +26,7 @@ import xml.etree.ElementTree as ET
from yardstick import ssh
from yardstick.common import constants
from yardstick.common import exceptions
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common.utils import read_yaml_file
from yardstick.network_services.utils import PciAddress
from yardstick.network_services.helpers.cpu import CpuSysCores
@@ -394,26 +394,18 @@ class StandaloneContextHelper(object):
return pf_vfs
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def parse_pod_file(self, file_path, nfvi_role='Sriov'):
self.file_path = file_path
nodes = []
nfvi_host = []
try:
- cfg = self.read_config_file()
+ cfg = read_yaml_file(self.file_path)
except IOError as io_error:
if io_error.errno != errno.ENOENT:
raise
self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
file_path)
- cfg = self.read_config_file()
+ cfg = read_yaml_file(self.file_path)
nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index b9e66a481..e6a6f99e4 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -298,13 +298,28 @@ class OvsDpdkContext(Context):
for vm in self.vm_names:
model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ # self.nfvi_host always contain only one host
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 2f93e530b..05fac0218 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -106,13 +106,29 @@ class SriovContext(Context):
build_vfs = "echo 0 > /sys/bus/pci/devices/{0}/sriov_numvfs"
self.connection.execute(build_vfs.format(ports.get('phy_port')))
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+
+ # self.nfvi_host always contain only one host.
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 697cc007f..f050e8d0f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -619,27 +619,22 @@ class TaskParser(object): # pragma: no cover
nodes:
tg__0: tg_0.yardstick
vnf__0: vnf_0.yardstick
+
+ NOTE: in Kubernetes context, the separator character between the server
+ name and the context name is "-":
+ scenario:
+ host: host-k8s
+ target: target-k8s
"""
def qualified_name(name):
- try:
- # for openstack
- node_name, context_name = name.split('.')
- sep = '.'
- except ValueError:
- # for kubernetes, some kubernetes resources don't support
- # name format like 'xxx.xxx', so we use '-' instead
- # need unified later
- node_name, context_name = name.split('-')
- sep = '-'
-
- try:
- ctx = next((context for context in contexts
- if context.assigned_name == context_name))
- except StopIteration:
- raise y_exc.ScenarioConfigContextNameNotFound(
- context_name=context_name)
-
- return '{}{}{}'.format(node_name, sep, ctx.name)
+ for context in contexts:
+ host_name, ctx_name = context.split_host_name(name)
+ if context.assigned_name == ctx_name:
+ return '{}{}{}'.format(host_name,
+ context.host_name_separator,
+ context.name)
+
+ raise y_exc.ScenarioConfigContextNameNotFound(host_name=name)
if 'host' in scenario:
scenario['host'] = qualified_name(scenario['host'])
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 6aaaed888..ecb59f960 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -37,6 +37,7 @@ import six
from six.moves import range
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
loop_iter = six.moves.zip(*param_iters)
else:
LOG.warning("iter_type unrecognized: %s", iter_type)
- raise TypeError("iter_type unrecognized: %s", iter_type)
+ raise TypeError("iter_type unrecognized: %s" % iter_type)
# Populate options and run the requested method for each value combination
for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 60b0348c3..60f1fa536 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -27,6 +27,7 @@ import traceback
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -70,13 +71,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
except Exception: # pylint: disable=broad-except
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
index 63bfc823a..88d3c5704 100755
--- a/yardstick/benchmark/runners/dynamictp.py
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -27,6 +27,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
method(data)
- except AssertionError as assertion:
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ except y_exc.SLAValidationError as error:
+ LOG.warning("SLA validation failed: %s", error.args)
too_high = True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 20d6da054..4c88f3671 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -29,6 +29,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -75,13 +76,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
elif sla_action == "rate-control":
try:
scenario_cfg['options']['rate']
diff --git a/yardstick/benchmark/runners/proxduration.py b/yardstick/benchmark/runners/proxduration.py
new file mode 100644
index 000000000..61a468fd3
--- /dev/null
+++ b/yardstick/benchmark/runners/proxduration.py
@@ -0,0 +1,165 @@
+# Copyright 2014: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# yardstick comment: this is a modified copy of
+# rally/rally/benchmark/runners/constant.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
+from yardstick.common import constants
+
+LOG = logging.getLogger(__name__)
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = scenario_cfg['runner']
+
+ requested_interval = interval = runner_cfg.get("interval", 1)
+ duration = runner_cfg.get("duration", 60)
+ sampled = runner_cfg.get("sampled", False)
+
+ LOG.info("Worker START, duration is %ds", duration)
+ LOG.debug("class is %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ benchmark.setup()
+ method = getattr(benchmark, method_name)
+
+ sla_action = None
+ if "sla" in scenario_cfg:
+ sla_action = scenario_cfg["sla"].get("action", "assert")
+
+
+ start = time.time()
+ timeout = start + duration
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ benchmark.pre_run_wait_time(interval)
+
+ if sampled:
+ try:
+ pre_adjustment = time.time()
+ result = method(data)
+ post_adjustment = time.time()
+ if requested_interval > post_adjustment - pre_adjustment:
+ interval = requested_interval - (post_adjustment - pre_adjustment)
+ else:
+ interval = 0
+
+ except y_exc.SLAValidationError as error:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ # catch all exceptions because with multiprocessing we can have un-picklable exception
+ # problems https://bugs.python.org/issue9400
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, constants.QUEUE_PUT_TIMEOUT)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)
+ else:
+ LOG.debug("No sample collected ...Sequence %s", sequence)
+
+
+ sequence += 1
+
+ if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+ LOG.info("Worker END")
+ break
+
+ try:
+ benchmark.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+ LOG.info("Exiting ProxDuration Runner...")
+
+class ProxDurationRunner(base.Runner):
+ """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ duration - amount of time the scenario will be run for
+ type: int
+ unit: seconds
+ default: 60 sec
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ sampled - Sample data is required yes/no
+ type: boolean
+ unit: True/False
+ default: False
+ confirmation - Number of confirmation retries
+ type: int
+ unit: retry attempts
+ default: 0
+ """
+ __execution_type__ = 'ProxDuration'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/runners/search.py b/yardstick/benchmark/runners/search.py
index 8037329b5..01a4292c7 100644
--- a/yardstick/benchmark/runners/search.py
+++ b/yardstick/benchmark/runners/search.py
@@ -33,6 +33,7 @@ from collections import Mapping
from six.moves import zip
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
try:
self.worker_helper(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if self.sla_action == "assert":
raise
elif self.sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index d6e3f7109..0148a45b2 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -30,6 +30,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -74,14 +75,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 1fadd2532..e2db03a70 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -58,16 +58,20 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
+ service_not_found = False
for k, v in self.director.data.items():
if v == 0:
- result['sla_pass'] = 0
verify_result = False
+ service_not_found = True
LOG.info("\033[92m The service process (%s) not found in the host environment", k)
result['sla_pass'] = 1 if verify_result else 0
self.director.store_result(result)
- assert verify_result is True, "The HA test case NOT passed"
+ self.verify_SLA(
+ verify_result, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "Director.verify() failed"))
def teardown(self):
self.director.knockoff()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 42941c6e7..7f976fdbc 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,6 +29,7 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
+ self.sla_pass = False
def setup(self):
"""scenario setup"""
@@ -69,23 +70,28 @@ class ServiceHA(base.Scenario):
self.monitorMgr.wait_monitors()
LOG.info("Monitor '%s' stop!", self.__scenario_type__)
- sla_pass = self.monitorMgr.verify_SLA()
+ self.sla_pass = self.monitorMgr.verify_SLA()
+ service_not_found = False
for k, v in self.data.items():
if v == 0:
- sla_pass = False
+ self.sla_pass = False
+ service_not_found = True
LOG.info("The service process (%s) not found in the host envrioment", k)
- result['sla_pass'] = 1 if sla_pass else 0
+ result['sla_pass'] = 1 if self.sla_pass else 0
self.monitorMgr.store_result(result)
- assert sla_pass is True, "The HA test case NOT pass the SLA"
-
- return
+ self.verify_SLA(
+ self.sla_pass, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "MonitorMgr.verify_SLA() failed"))
def teardown(self):
"""scenario teardown"""
- for attacker in self.attackers:
- attacker.recover()
+ # only recover when sla not pass
+ if not self.sla_pass:
+ for attacker in self.attackers:
+ attacker.recover()
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 58a02805c..30ac1bea9 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -20,6 +20,7 @@ import six
from stevedore import extension
import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
def _iter_scenario_classes(scenario_type=None):
@@ -61,6 +62,11 @@ class Scenario(object):
"""Time waited after executing the run method"""
time.sleep(time_seconds)
+ def verify_SLA(self, condition, error_msg):
+ if not condition:
+ raise y_exc.SLAValidationError(
+ case_name=self.__scenario_type__, error_msg=error_msg)
+
@staticmethod
def get_types():
"""return a list of known runner type (class) names"""
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 998463ef6..413709f3b 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
if latency > sla_latency:
sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
(t, latency, t, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 801f7fa80..2237e49e0 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
(repetition, warmup)
else:
- raise RuntimeError("No such test_type: %s for Lmbench scenario",
- test_type)
+ raise RuntimeError("No such test_type: %s for Lmbench scenario"
+ % test_type)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index 0b8ed9b28..b973211f1 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
% (load, duration, events_string)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, _ = self.client.execute(cmd)
if status:
raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
exp_val = self.scenario_cfg['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' \
in self.scenario_cfg['sla']
-
- if metric not in result:
- assert False, "Metric (%s) not found." % metric
- else:
- if smaller_than_exp:
- assert result[metric] < exp_val, "%s %d >= %d (sla); " \
- % (metric, result[metric], exp_val)
- else:
- assert result[metric] >= exp_val, "%s %d < %d (sla); " \
- % (metric, result[metric], exp_val)
+ self.verify_SLA(metric in result,
+ "Metric (%s) not found." % metric)
+ self.verify_SLA(
+ not smaller_than_exp,
+ "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+ self.verify_SLA(
+ result[metric] >= exp_val,
+ "%s %d < %d (sla); " % (metric, result[metric], exp_val))
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 2de1270ef..975c90b22 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
if timevalue > sla_time:
sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
(t, timevalue, t, sla_time)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index ca64935dd..4daf776ff 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
(test_id, load, block_size)
# only the test_id 1-6 will be used in this scenario
else:
- raise RuntimeError("No such type_id: %s for Ramspeed scenario",
- test_id)
+ raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+ % test_id)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
index cdb345717..3cea31694 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench.py
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
if score < sla_score:
sla_error += "%s score %f < sla:%s_score(%f); " % \
(t, score, t, sla_score)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 98c45990e..51e044e7b 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
def teardown(self):
LOG.debug("teardown")
self.host.close()
- status, stdout, stderr = self.target.execute("pkill iperf3")
+ status, _, stderr = self.target.execute("pkill iperf3")
if status:
LOG.warning(stderr)
self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.host.execute(cmd)
+ status, stdout, _ = self.host.execute(cmd)
if status:
# error cause in json dict on stdout
raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
bit_per_second = \
int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
- assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d); " % \
- (bytes_per_second, sla_bytes_per_second)
+ self.verify_SLA(
+ bytes_per_second >= sla_bytes_per_second,
+ "bytes_per_second %d < sla:bytes_per_second (%d); "
+ % (bytes_per_second, sla_bytes_per_second))
else:
sla_jitter = float(sla_iperf["jitter"])
jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
- assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f; " % \
- (jitter_ms, sla_jitter)
+ self.verify_SLA(jitter_ms <= sla_jitter,
+ "jitter_ms %f > sla:jitter %f; "
+ % (jitter_ms, sla_jitter))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
index 86173c9da..e3bd7af46 100644
--- a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
+++ b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
@@ -367,9 +367,10 @@ ports = {0,1},
throughput_rx_mpps = int(
self.scenario_cfg["sla"]["throughput_rx_mpps"])
- assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
- "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
- (throughput_rx_mpps, moongen_result["tx_mpps"])
+ self.verify_SLA(
+ throughput_rx_mpps <= moongen_result["tx_mpps"],
+ "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+ % (throughput_rx_mpps, moongen_result["tx_mpps"]))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 33c02d409..9f1a81413 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index d52e6b9e1..0ad2ecff5 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(
+ mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def teardown(self):
"""remove netperf from nodes after test"""
diff --git a/yardstick/benchmark/scenarios/networking/nstat.py b/yardstick/benchmark/scenarios/networking/nstat.py
index 10c560769..ea067f8ab 100644
--- a/yardstick/benchmark/scenarios/networking/nstat.py
+++ b/yardstick/benchmark/scenarios/networking/nstat.py
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
if rate > sla_rate:
sla_error += "%s rate %f > sla:%s_rate(%f); " % \
(i, rate, i, sla_rate)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index e7d9beea8..6caeab5ef 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
sla_max_rtt = float(sla_max_rtt)
- assert rtt_result[target_vm_name] <= sla_max_rtt,\
- "rtt %f > sla: max_rtt(%f); " % \
- (rtt_result[target_vm_name], sla_max_rtt)
+ self.verify_SLA(
+ rtt_result[target_vm_name] <= sla_max_rtt,
+ "rtt %f > sla: max_rtt(%f); "
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
# we need to specify a result to satisfy influxdb schema
@@ -102,13 +103,12 @@ class Ping(base.Scenario):
rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
# store result before potential AssertionError
result.update(utils.flatten_dict_key(ping_result))
- if sla_max_rtt is not None:
- raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
- rtt_result[target_vm_name], sla_max_rtt))
-
- else:
- raise AssertionError(
- "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+ self.verify_SLA(sla_max_rtt is None,
+ "packet dropped rtt %f > sla: max_rtt(%f)"
+ % (rtt_result[target_vm_name], sla_max_rtt))
+ self.verify_SLA(False,
+ "packet dropped rtt %f"
+ % (rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
index 74855a10f..377278004 100644
--- a/yardstick/benchmark/scenarios/networking/ping6.py
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -59,8 +59,7 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.pre_setup_script, '~/pre_setup.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash pre_setup.sh")
+ self.client.execute("sudo bash pre_setup.sh")
def _get_controller_node(self, host_list):
for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario): # pragma: no cover
cmd = "sudo bash %s %s %s" % \
(setup_bash_file, self.openrc, self.external_network)
LOG.debug("Executing setup command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ self.client.execute(cmd)
self.setup_done = True
@@ -171,8 +170,9 @@ class Ping6(base.Scenario): # pragma: no cover
result["rtt"] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
- assert result["rtt"] <= sla_max_rtt, \
- "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+ self.verify_SLA(result["rtt"] <= sla_max_rtt,
+ "rtt %f > sla:max_rtt(%f); "
+ % (result["rtt"], sla_max_rtt))
else:
LOG.error("ping6 timeout!!!")
self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.post_teardown_script, '~/post_teardown.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash post_teardown.sh")
+ self.client.execute("sudo bash post_teardown.sh")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index b79b91539..c78108adb 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
self.server.send_command(cmd)
self.client.send_command(cmd)
- """multiqueue setup"""
+ # multiqueue setup
if not self._is_irqbalance_disabled():
self._disable_irqbalance()
@@ -112,18 +112,14 @@ class Pktgen(base.Scenario):
def _get_vnic_driver_name(self):
cmd = "readlink /sys/class/net/%s/device/driver" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return os.path.basename(stdout.strip())
def _is_irqbalance_disabled(self):
"""Did we disable irqbalance already in the guest?"""
is_disabled = False
cmd = "grep ENABLED /etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
if "0" in stdout:
is_disabled = True
@@ -132,49 +128,35 @@ class Pktgen(base.Scenario):
def _disable_irqbalance(self):
cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
"/etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance stop"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "sudo service irqbalance disable"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_ovs(self, queue_number):
cmd = "grep 'virtio0-input.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'"
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -186,44 +168,32 @@ class Pktgen(base.Scenario):
cmd = "grep 'virtio0-input.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
cmd = "grep 'virtio0-output.%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _setup_irqmapping_sriov(self, queue_number):
cmd = "grep '%s-TxRx-0' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % self.vnic_name
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
if queue_number == 1:
return
@@ -234,24 +204,18 @@ class Pktgen(base.Scenario):
cmd = "grep '%s-TxRx-%s' /proc/interrupts |" \
"awk '{match($0,/ +[0-9]+/)} " \
"{print substr($1,RSTART,RLENGTH-1)}'" % (self.vnic_name, i)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
def _get_sriov_queue_number(self):
"""Get queue number from server as both VMs are the same"""
cmd = "grep %s-TxRx- /proc/interrupts | wc -l" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_available_queue_number(self):
@@ -259,9 +223,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | head -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _get_usable_queue_number(self):
@@ -269,9 +231,7 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -l %s | grep Combined | tail -1 |" \
"awk '{printf $2}'" % self.vnic_name
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def _enable_ovs_multiqueue(self):
@@ -282,10 +242,8 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -L %s combined %s" % \
(self.vnic_name, available_queue_number)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd)
+ self.client.run(cmd)
return available_queue_number
def _iptables_setup(self):
@@ -294,9 +252,7 @@ class Pktgen(base.Scenario):
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, _, stderr = self.server.execute(cmd, timeout=SSH_TIMEOUT)
- if status:
- raise RuntimeError(stderr)
+ self.server.run(cmd, timeout=SSH_TIMEOUT)
def _iptables_get_result(self):
"""Get packet statistics from server"""
@@ -304,9 +260,7 @@ class Pktgen(base.Scenario):
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'" \
% (1000 + self.number_of_ports)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.server.execute(cmd, raise_on_error=True)
return int(stdout)
def run(self, result):
@@ -356,10 +310,8 @@ class Pktgen(base.Scenario):
duration, queue_number, pps)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd, timeout=SSH_TIMEOUT)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True,
+ timeout=SSH_TIMEOUT)
result.update(jsonutils.loads(stdout))
@@ -374,8 +326,8 @@ class Pktgen(base.Scenario):
if "sla" in self.scenario_cfg:
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm,
+ "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
index 9a7b975a2..efb7d8b5d 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
@@ -113,10 +113,7 @@ cat ~/result.log -vT \
{print substr($0,RSTART,RLENGTH)}' \
|grep -v ^$ |awk '{if ($2 != 0) print $2}'\
"""
- client_status, client_stdout, client_stderr = self.client.execute(cmd)
-
- if client_status:
- raise RuntimeError(client_stderr)
+ _, client_stdout, _ = self.client.execute(cmd, raise_on_error=True)
avg_latency = 0
if client_stdout:
@@ -135,4 +132,4 @@ cat ~/result.log -vT \
LOG.info("sla_max_latency: %d", sla_max_latency)
debug_info = "avg_latency %d > sla_max_latency %d" \
% (avg_latency, sla_max_latency)
- assert avg_latency <= sla_max_latency, debug_info
+ self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
index 497e59ee8..97b9cf73f 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
- status, stdout, stderr = self.server.execute(cmd)
+ _, stdout, _ = self.server.execute(cmd)
if stdout:
is_run = False
else:
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+ % (ppm, sla_max_ppm))
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 1c3ea1f8d..4d7c4f9be 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -133,11 +133,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
with utils.open_relative_file(profile, path) as infile:
return infile.read()
- def _get_topology(self):
- topology = self.scenario_cfg["topology"]
- path = self.scenario_cfg["task_path"]
- with utils.open_relative_file(topology, path) as infile:
- return infile.read()
+ def _get_duration(self):
+ options = self.scenario_cfg.get('options', {})
+ return options.get('duration',
+ tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
def _fill_traffic_profile(self):
tprofile = self._get_traffic_profile()
@@ -147,12 +146,17 @@ class NetworkServiceTestCase(scenario_base.Scenario):
'imix': self._get_traffic_imix(),
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
- 'extra_args': extra_args
- }
-
+ 'extra_args': extra_args,
+ 'duration': self._get_duration()}
traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+ def _get_topology(self):
+ topology = self.scenario_cfg["topology"]
+ path = self.scenario_cfg["task_path"]
+ with utils.open_relative_file(topology, path) as infile:
+ return infile.read()
+
def _render_topology(self):
topology = self._get_topology()
topology_args = self.scenario_cfg.get('extra_args', {})
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 705544c41..2b3474070 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -215,15 +215,15 @@ class Vsperf(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 454587829..27bf40dcb 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -231,7 +231,7 @@ class VsperfDPDK(base.Scenario):
is_run = True
cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
@@ -325,15 +325,15 @@ class VsperfDPDK(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index d3ed840d8..c57c6edf2 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -223,7 +223,7 @@ class Fio(base.Scenario):
sla_error += "%s %d < " \
"sla:%s(%d); " % (k, v, k, min_v)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index f6e4ab7e9..1ebd32509 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -119,6 +119,7 @@ INFLUXDB_DB_NAME = get_param('influxdb.db_name', 'yardstick')
INFLUXDB_IMAGE = get_param('influxdb.image', 'tutum/influxdb')
INFLUXDB_TAG = get_param('influxdb.tag', '0.13')
INFLUXDB_DASHBOARD_PORT = 8083
+QUEUE_PUT_TIMEOUT = 10
# grafana
GRAFANA_IP = get_param('grafana.ip', SERVER_IP)
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 5f362b356..935c77866 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -68,6 +68,10 @@ class ResourceCommandError(YardstickException):
message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"'
+class ContextUpdateCollectdForNodeError(YardstickException):
+ message = 'Cannot find node %(attr_name)s'
+
+
class FunctionNotImplemented(YardstickException):
message = ('The function "%(function_name)s" is not implemented in '
'"%(class_name)" class.')
@@ -168,7 +172,7 @@ class IncorrectNodeSetup(IncorrectSetup):
class ScenarioConfigContextNameNotFound(YardstickException):
- message = 'Context name "%(context_name)s" not found'
+ message = 'Context for host name "%(host_name)s" not found'
class StackCreationInterrupt(YardstickException):
@@ -313,3 +317,16 @@ class IxNetworkFlowNotPresent(YardstickException):
class IxNetworkFieldNotPresentInStackItem(YardstickException):
message = 'Field "%(field_name)s" not present in stack item %(stack_item)s'
+
+
+class SLAValidationError(YardstickException):
+ message = '%(case_name)s SLA validation failed. Error: %(error_msg)s'
+
+
+class AclMissingActionArguments(YardstickException):
+ message = ('Missing ACL action parameter '
+ '[action=%(action_name)s parameter=%(action_param)s]')
+
+
+class AclUknownActionTemplate(YardstickException):
+ message = 'No ACL CLI template found for "%(action_name)s" action'
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 6ff6617a9..541061351 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -172,6 +172,15 @@ def get_shade_client(**os_cloud_config):
params.update(os_cloud_config)
return shade.openstack_cloud(**params)
+def get_shade_operator_client(**os_cloud_config):
+ """Get Shade Operator cloud client
+
+ :return: ``shade.OperatorCloud`` object.
+ """
+ params = copy.deepcopy(constants.OS_CLOUD_DEFAULT_CONFIG)
+ params.update(os_cloud_config)
+ return shade.operator_cloud(**params)
+
# *********************************************
# NOVA
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 108ee17bc..251e5cc6c 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -37,6 +37,7 @@ from oslo_utils import encodeutils
import yardstick
from yardstick.common import exceptions
+from yardstick.common.yaml_loader import yaml_load
logger = logging.getLogger(__name__)
@@ -306,6 +307,19 @@ def get_ip_version(ip_addr):
return address.version
+def make_ip_addr(ip, mask):
+ """
+ :param ip[str]: ip adddress
+ :param mask[str]: /24 prefix of 255.255.255.0 netmask
+ :return: IPv4Interface object
+ """
+ try:
+ return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
+ except (TypeError, ValueError):
+ # None so we can skip later
+ return None
+
+
def ip_to_hex(ip_addr, separator=''):
try:
address = ipaddress.ip_address(six.text_type(ip_addr))
@@ -409,13 +423,18 @@ class ErrorClass(object):
class Timer(object):
- def __init__(self, timeout=None):
+ def __init__(self, timeout=None, raise_exception=True):
super(Timer, self).__init__()
self.start = self.delta = None
self._timeout = int(timeout) if timeout else None
+ self._timeout_flag = False
+ self._raise_exception = raise_exception
def _timeout_handler(self, *args):
- raise exceptions.TimerTimeout(timeout=self._timeout)
+ self._timeout_flag = True
+ if self._raise_exception:
+ raise exceptions.TimerTimeout(timeout=self._timeout)
+ self.__exit__()
def __enter__(self):
self.start = datetime.datetime.now()
@@ -432,6 +451,23 @@ class Timer(object):
def __getattr__(self, item):
return getattr(self.delta, item)
+ def __iter__(self):
+ self._raise_exception = False
+ return self.__enter__()
+
+ def next(self): # pragma: no cover
+ # NOTE(ralonsoh): Python 2 support.
+ if not self._timeout_flag:
+ return datetime.datetime.now()
+ raise StopIteration()
+
+ def __next__(self): # pragma: no cover
+ # NOTE(ralonsoh): Python 3 support.
+ return self.next()
+
+ def __del__(self): # pragma: no cover
+ signal.alarm(0)
+
def read_meminfo(ssh_client):
"""Read "/proc/meminfo" file and parse all keys and values"""
@@ -492,3 +528,11 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
if exception and issubclass(exception, Exception):
raise exception # pylint: disable=raising-bad-type
raise exceptions.WaitTimeout
+
+
+def read_yaml_file(path):
+ """Read yaml file"""
+
+ with open(path) as stream:
+ data = yaml_load(stream)
+ return data
diff --git a/yardstick/network_services/helpers/samplevnf_helper.py b/yardstick/network_services/helpers/samplevnf_helper.py
index 0ab10d7b7..8e6a3a3ea 100644
--- a/yardstick/network_services/helpers/samplevnf_helper.py
+++ b/yardstick/network_services/helpers/samplevnf_helper.py
@@ -23,8 +23,7 @@ from itertools import chain, repeat
import six
from six.moves.configparser import ConfigParser
-
-from yardstick.common.utils import ip_to_hex
+from yardstick.common import utils
LOG = logging.getLogger(__name__)
@@ -34,19 +33,6 @@ link {0} config {1} {2}
link {0} up
"""
-ACTION_TEMPLATE = """\
-p action add {0} accept
-p action add {0} fwd {0}
-p action add {0} count
-"""
-
-FW_ACTION_TEMPLATE = """\
-p action add {0} accept
-p action add {0} fwd {0}
-p action add {0} count
-p action add {0} conntrack
-"""
-
# This sets up a basic passthrough with no rules
SCRIPT_TPL = """
{link_config}
@@ -59,9 +45,7 @@ SCRIPT_TPL = """
{arp_route_tbl6}
-{actions}
-
-{rules}
+{flows}
"""
@@ -182,26 +166,9 @@ class MultiPortConfig(object):
return parser.get(section, key)
return default
- @staticmethod
- def make_ip_addr(ip, mask):
- """
- :param ip: ip adddress
- :type ip: str
- :param mask: /24 prefix of 255.255.255.0 netmask
- :type mask: str
- :return: interface
- :rtype: IPv4Interface
- """
-
- try:
- return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
- except (TypeError, ValueError):
- # None so we can skip later
- return None
-
@classmethod
def validate_ip_and_prefixlen(cls, ip_addr, prefixlen):
- ip_addr = cls.make_ip_addr(ip_addr, prefixlen)
+ ip_addr = utils.make_ip_addr(ip_addr, prefixlen)
return ip_addr.ip.exploded, ip_addr.network.prefixlen
def __init__(self, topology_file, config_tpl, tmp_file, vnfd_helper,
@@ -245,7 +212,7 @@ class MultiPortConfig(object):
self.ports_len = 0
self.prv_que_handler = None
self.vnfd = None
- self.rules = None
+ self.flows = None
self.pktq_out = []
@staticmethod
@@ -360,7 +327,7 @@ class MultiPortConfig(object):
"%s/%s" % (interface["dst_ip"], interface["netmask"])))
arp_vars = {
- "port_netmask_hex": ip_to_hex(dst_port_ip.network.netmask.exploded),
+ "port_netmask_hex": utils.ip_to_hex(dst_port_ip.network.netmask.exploded),
# this is the port num that contains port0 subnet and next_hop_ip_hex
# this is LINKID which should be based on DPDK port number
"port_num": dpdk_port_num,
@@ -542,7 +509,7 @@ class MultiPortConfig(object):
self.update_write_parser(self.loadb_tpl)
self.start_core += 1
- for i in range(self.worker_threads):
+ for _ in range(self.worker_threads):
vnf_data = self.generate_vnf_data()
if not self.vnf_tpl:
self.vnf_tpl = {}
@@ -637,65 +604,8 @@ class MultiPortConfig(object):
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config6))
- def generate_action_config(self):
- port_list = (self.vnfd_helper.port_num(p) for p in self.all_ports)
- if self.vnf_type == "VFW":
- template = FW_ACTION_TEMPLATE
- else:
- template = ACTION_TEMPLATE
-
- return ''.join((template.format(port) for port in port_list))
-
- def get_ip_from_port(self, port):
- # we can't use gateway because in OpenStack gateways interfer with floating ip routing
- # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
- vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
- ip = vintf["local_ip"]
- netmask = vintf["netmask"]
- return self.make_ip_addr(ip, netmask)
-
- def get_network_and_prefixlen_from_ip_of_port(self, port):
- ip_addr = self.get_ip_from_port(port)
- # handle cases with no gateway
- if ip_addr:
- return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
- else:
- return None, None
-
- def generate_rule_config(self):
- cmd = 'acl' if self.vnf_type == "ACL" else "vfw"
- rules_config = self.rules if self.rules else ''
- new_rules = []
- new_ipv6_rules = []
- pattern = 'p {0} add {1} {2} {3} {4} {5} 0 65535 0 65535 0 0 {6}'
- for src_intf, dst_intf in self.port_pair_list:
- src_port = self.vnfd_helper.port_num(src_intf)
- dst_port = self.vnfd_helper.port_num(dst_intf)
-
- src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_intf)
- dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_intf)
- # ignore entires with empty values
- if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
- new_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
- dst_net, dst_prefix_len, dst_port))
- new_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
- src_net, src_prefix_len, src_port))
-
- # src_net = self.get_ports_gateway6(port_pair[0])
- # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
- # dst_net = self.get_ports_gateway6(port_pair[1])
- # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
- # # ignore entires with empty values
- # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
- # new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
- # dst_net, dst_prefix_len, dst_port))
- # new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
- # src_net, src_prefix_len, src_port))
-
- acl_apply = "\np %s applyruleset" % cmd
- new_rules_config = '\n'.join(pattern.format(*values) for values
- in chain(new_rules, new_ipv6_rules))
- return ''.join([rules_config, new_rules_config, acl_apply])
+ def get_flows_config(self):
+ return self.flows if self.flows else ''
def generate_script_data(self):
self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
@@ -707,24 +617,15 @@ class MultiPortConfig(object):
# disable IPv6 for now
# 'arp_config6': self.generate_arp_config6(),
'arp_config6': "",
- 'arp_config': self.generate_arp_config(),
'arp_route_tbl': self.generate_arp_route_tbl(),
'arp_route_tbl6': "",
- 'actions': '',
- 'rules': '',
+ 'flows': self.get_flows_config()
}
-
- if self.vnf_type in ('ACL', 'VFW'):
- script_data.update({
- 'actions': self.generate_action_config(),
- 'rules': self.generate_rule_config(),
- })
-
return script_data
- def generate_script(self, vnfd, rules=None):
+ def generate_script(self, vnfd, flows=None):
self.vnfd = vnfd
- self.rules = rules
+ self.flows = flows
script_data = self.generate_script_data()
script = SCRIPT_TPL.format(**script_data)
if self.lb_config == self.HW_LB:
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index 9eba550aa..f4b5b178c 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -16,6 +16,31 @@ from yardstick.common import exceptions
from yardstick.common import utils
+class TrafficProfileConfig(object):
+ """Class to contain the TrafficProfile class information
+
+ This object will parse and validate the traffic profile information.
+ """
+
+ DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
+ DEFAULT_FRAME_RATE = 100
+ DEFAULT_DURATION = 30
+
+ def __init__(self, tp_config):
+ self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
+ self.name = tp_config.get('name')
+ self.description = tp_config.get('description')
+ tprofile = tp_config['traffic_profile']
+ self.traffic_type = tprofile.get('traffic_type')
+ self.frame_rate = tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE)
+ self.test_precision = tprofile.get('test_precision')
+ self.packet_sizes = tprofile.get('packet_sizes')
+ self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
+ self.lower_bound = tprofile.get('lower_bound')
+ self.upper_bound = tprofile.get('upper_bound')
+ self.step_interval = tprofile.get('step_interval')
+
+
class TrafficProfile(object):
"""
This class defines the behavior
@@ -43,6 +68,7 @@ class TrafficProfile(object):
# e.g. RFC2544 start_ip, stop_ip, drop_rate,
# IMIX = {"10K": 0.1, "100M": 0.5}
self.params = tp_config
+ self.config = TrafficProfileConfig(tp_config)
def execute_traffic(self, traffic_generator, **kawrgs):
""" This methods defines the behavior of the traffic generator.
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 73806f958..e105c2f55 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -95,7 +95,6 @@ class IXIARFC2544Profile(TrexProfile):
if not profile_data:
continue
self.profile_data = profile_data
- self.get_streams(self.profile_data)
self.full_profile.update({vld_id: self.profile_data})
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 225ee4356..af83b8f4c 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -21,6 +21,7 @@ import time
from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
from yardstick.network_services import constants
+from yardstick.common import constants as overall_constants
LOG = logging.getLogger(__name__)
@@ -84,9 +85,14 @@ class ProxBinSearchProfile(ProxProfile):
# success, the binary search will complete on an integer multiple
# of the precision, rather than on a fraction of it.
- theor_max_thruput = 0
+ theor_max_thruput = actual_max_thruput = 0
result_samples = {}
+ rate_samples = {}
+ pos_retry = 0
+ neg_retry = 0
+ total_retry = 0
+ ok_retry = 0
# Store one time only value in influxdb
single_samples = {
@@ -102,47 +108,85 @@ class ProxBinSearchProfile(ProxProfile):
successful_pkt_loss = 0.0
line_speed = traffic_gen.scenario_helper.all_options.get(
"interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
+
+ ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
for test_value in self.bounds_iterator(LOG):
- result, port_samples = self._profile_helper.run_test(pkt_size, duration,
- test_value,
- self.tolerated_loss,
- line_speed)
- self.curr_time = time.time()
- self.prev_time = self.curr_time
-
- if result.success:
- LOG.debug("Success! Increasing lower bound")
- self.current_lower = test_value
- successful_pkt_loss = result.pkt_loss
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
-
- # store results with success tag in influxdb
- success_samples = {'Success_' + key: value for key, value in samples.items()}
-
- # Store number of packets based statistics (we already have throughput)
- success_samples["Success_rx_total"] = int(result.rx_total)
- success_samples["Success_tx_total"] = int(result.tx_total)
- success_samples["Success_can_be_lost"] = int(result.can_be_lost)
- success_samples["Success_drop_total"] = int(result.drop_total)
- self.queue.put(success_samples)
-
- # Store Actual throughput for result samples
- result_samples["Result_Actual_throughput"] = \
- success_samples["Success_RxThroughput"]
- else:
- LOG.debug("Failure... Decreasing upper bound")
- self.current_upper = test_value
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- # samples contains data such as Latency, Throughput, number of packets
- # Hence they should not be divided by the time difference
-
- if theor_max_thruput < samples["RequestedTxThroughput"]:
- theor_max_thruput = samples['RequestedTxThroughput']
- self.queue.put({'theor_max_throughput': theor_max_thruput})
-
- LOG.debug("Collect TG KPIs %s %s", datetime.datetime.now(), samples)
- self.queue.put(samples)
+ pos_retry = 0
+ neg_retry = 0
+ total_retry = 0
+
+ rate_samples["MAX_Rate"] = self.current_upper
+ rate_samples["MIN_Rate"] = self.current_lower
+ rate_samples["Test_Rate"] = test_value
+ self.queue.put(rate_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
+ while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
+
+ total_retry = total_retry + 1
+ result, port_samples = self._profile_helper.run_test(pkt_size, duration,
+ test_value,
+ self.tolerated_loss,
+ line_speed)
+ if (total_retry > (ok_retry * 3)) and (ok_retry is not 0):
+ LOG.info("Failure.!! .. RETRY EXCEEDED ... decrease lower bound")
+
+ successful_pkt_loss = result.pkt_loss
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ self.current_upper = test_value
+ neg_retry = total_retry
+ elif result.success:
+ if (pos_retry < ok_retry) and (ok_retry is not 0):
+ neg_retry = 0
+ LOG.info("Success! ... confirm retry")
+
+ successful_pkt_loss = result.pkt_loss
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ else:
+ LOG.info("Success! Increasing lower bound")
+ self.current_lower = test_value
+
+ successful_pkt_loss = result.pkt_loss
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ # store results with success tag in influxdb
+ success_samples = \
+ {'Success_' + key: value for key, value in samples.items()}
+
+ success_samples["Success_rx_total"] = int(result.rx_total)
+ success_samples["Success_tx_total"] = int(result.tx_total)
+ success_samples["Success_can_be_lost"] = int(result.can_be_lost)
+ success_samples["Success_drop_total"] = int(result.drop_total)
+ success_samples["Success_RxThroughput"] = samples["RxThroughput"]
+ LOG.info(">>>##>>Collect SUCCESS TG KPIs %s %s",
+ datetime.datetime.now(), success_samples)
+ self.queue.put(success_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
+
+ # Store Actual throughput for result samples
+ actual_max_thruput = success_samples["Success_RxThroughput"]
+
+ pos_retry = pos_retry + 1
+
+ else:
+ if (neg_retry < ok_retry) and (ok_retry is not 0):
+
+ pos_retry = 0
+ LOG.info("failure! ... confirm retry")
+ else:
+ LOG.info("Failure... Decreasing upper bound")
+ self.current_upper = test_value
+
+ neg_retry = neg_retry + 1
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ if theor_max_thruput < samples["TxThroughput"]:
+ theor_max_thruput = samples['TxThroughput']
+ self.queue.put({'theor_max_throughput': theor_max_thruput})
+
+ LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
result_samples["Result_pktSize"] = pkt_size
result_samples["Result_theor_max_throughput"] = theor_max_thruput
+ result_samples["Result_Actual_throughput"] = actual_max_thruput
self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 83020c85c..c24e2f65a 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -11,190 +11,288 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" RFC2544 Throughput implemenation """
-from __future__ import absolute_import
-from __future__ import division
import logging
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-from yardstick.network_services.traffic_profile.trex_traffic_profile \
- import TrexProfile
LOGGING = logging.getLogger(__name__)
+SRC_PORT = 'sport'
+DST_PORT = 'dport'
+
+
+class PortPgIDMap(object):
+ """Port and pg_id mapping class
+
+ "pg_id" is the identification STL library gives to each stream. In the
+ RFC2544Profile class, the traffic has a STLProfile per port, which contains
+ one or several streams, one per packet size defined in the IMIX test case
+ description.
+
+ Example of port <-> pg_id map:
+ self._port_pg_id_map = {
+ 0: [1, 2, 3, 4],
+ 1: [5, 6, 7, 8]
+ }
+ """
+
+ def __init__(self):
+ self._pg_id = 0
+ self._last_port = None
+ self._port_pg_id_map = {}
+
+ def add_port(self, port):
+ self._last_port = port
+ self._port_pg_id_map[port] = []
+
+ def get_pg_ids(self, port):
+ return self._port_pg_id_map.get(port)
+
+ def increase_pg_id(self, port=None):
+ port = self._last_port if not port else port
+ if port is None:
+ return
+ pg_id_list = self._port_pg_id_map.get(port)
+ if not pg_id_list:
+ self.add_port(port)
+ pg_id_list = self._port_pg_id_map[port]
+ self._pg_id += 1
+ pg_id_list.append(self._pg_id)
+ return self._pg_id
-class RFC2544Profile(TrexProfile):
- """ This class handles rfc2544 implemenation. """
+class RFC2544Profile(trex_traffic_profile.TrexProfile):
+ """TRex RFC2544 traffic profile"""
+
+ TOLERANCE_LIMIT = 0.05
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
self.generator = None
- self.max_rate = None
- self.min_rate = None
- self.ports = None
- self.rate = 100
- self.drop_percent_at_max_tx = None
- self.throughput_max = None
+ self.rate = self.config.frame_rate
+ self.max_rate = self.config.frame_rate
+ self.min_rate = 0
+ self.drop_percent_max = 0
def register_generator(self, generator):
self.generator = generator
- def execute_traffic(self, traffic_generator=None):
- """ Generate the stream and run traffic on the given ports """
+ def stop_traffic(self, traffic_generator=None):
+ """"Stop traffic injection, reset counters and remove streams"""
if traffic_generator is not None and self.generator is None:
self.generator = traffic_generator
- if self.ports is not None:
- return
+ self.generator.client.stop()
+ self.generator.client.reset()
+ self.generator.client.remove_all_streams()
+
+ def execute_traffic(self, traffic_generator=None):
+ """Generate the stream and run traffic on the given ports
+
+ :param traffic_generator: (TrexTrafficGenRFC) traffic generator
+ :return ports: (list of int) indexes of ports
+ port_pg_id: (dict) port indexes and pg_id [1] map
+ [1] https://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/
+ profile_code.html#stlstream-modes
+ """
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- self.ports = []
+ port_pg_id = PortPgIDMap()
+ ports = []
for vld_id, intfs in sorted(self.generator.networks.items()):
profile_data = self.params.get(vld_id)
- # no profile for this port
if not profile_data:
continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
continue
for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.max_rate = self.rate
- self.min_rate = 0
- self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
- duration=30, force=True)
- self.drop_percent_at_max_tx = 0
- self.throughput_max = 0
-
- def get_multiplier(self):
- """ Get the rate at which next iteration to run """
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def get_drop_percentage(self, generator=None):
- """ Calculate the drop percentage and run the traffic """
- if generator is None:
- generator = self.generator
- run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples(self.ports)
-
- in_packets = sum([value['in_packets'] for value in samples.values()])
- out_packets = sum([value['out_packets'] for value in samples.values()])
-
- packet_drop = abs(out_packets - in_packets)
- drop_percent = 100.0
- try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
- except ZeroDivisionError:
- LOGGING.info('No traffic is flowing')
+ port_num = int(self.generator.port_num(intf))
+ ports.append(port_num)
+ port_pg_id.add_port(port_num)
+ profile = self._create_profile(profile_data,
+ self.rate, port_pg_id)
+ self.generator.client.add_streams(profile, ports=[port_num])
+
+ self.generator.client.start(ports=ports,
+ duration=self.config.duration,
+ force=True)
+ return ports, port_pg_id
+
+ def _create_profile(self, profile_data, rate, port_pg_id):
+ """Create a STL profile (list of streams) for a port"""
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ imix_data = self._create_imix_data(imix)
+ self._create_vm(profile_data[packet_name])
+ _streams = self._create_streams(imix_data, rate, port_pg_id)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _create_imix_data(self, imix):
+ """Generate the IMIX distribution for a STL profile
+
+ The input information is the framesize dictionary in a test case
+ traffic profile definition. E.g.:
+ downlink_0:
+ ipv4:
+ id: 2
+ outer_l2:
+ framesize:
+ 64B: 10
+ 128B: 20
+ ...
+
+ This function normalizes the sum of framesize weights to 100 and
+ returns a dictionary of frame sizes in bytes and weight in percentage.
+ E.g.:
+ imix_count = {64: 25, 128: 75}
+
+ :param imix: (dict) IMIX size and weight
+ """
+ imix_count = {}
+ if not imix:
+ return imix_count
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ imix_count = {64: 100}
+ imix_sum = 100
+
+ weight_normalize = float(imix_sum) / 100
+ return {size: float(weight) / weight_normalize
+ for size, weight in imix_count.items()}
+
+ def _create_vm(self, packet_definition):
+ """Create the STL Raw instructions"""
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp[DST_PORT] = 'UDP.dport'
+ self.udp[SRC_PORT] = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2')
+ outer_l3v4 = packet_definition.get('outer_l3v4')
+ outer_l3v6 = packet_definition.get('outer_l3v6')
+ outer_l4 = packet_definition.get('outer_l4')
+ if outer_l2:
+ self._set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self._set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self._set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self._set_outer_l4_fields(outer_l4)
+ self.trex_vm = trex_stl_packet_builder_scapy.STLScVmRaw(
+ self.vm_flow_vars)
+
+ def _create_single_packet(self, size=64):
+ size -= 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
+ else:
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ return trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / pad, vm=self.trex_vm)
+
+ def _create_streams(self, imix_data, rate, port_pg_id):
+ """Create a list of streams per packet size
+
+ The STL TX mode speed of the generated streams will depend on the frame
+ weight and the frame rate. Both the frame weight and the total frame
+ rate are normalized to 100. The STL TX mode speed, defined in
+ percentage, is the combitation of both percentages. E.g.:
+ frame weight = 100
+ rate = 90
+ --> STLTXmode percentage = 10 (%)
+
+ frame weight = 80
+ rate = 50
+ --> STLTXmode percentage = 40 (%)
+
+ :param imix_data: (dict) IMIX size and weight
+ :param rate: (float) normalized [0..100] total weight
+ :param pg_id: (PortPgIDMap) port / pg_id (list) map
+ """
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ packet = self._create_single_packet(size)
+ pg_id = port_pg_id.increase_pg_id()
+ stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
+ streams.append(trex_stl_client.STLStream(
+ packet=packet, flow_stats=stl_flow, mode=mode))
+ return streams
+
+ def get_drop_percentage(self, samples, tol_low, tol_high,
+ correlated_traffic):
+ """Calculate the drop percentage and run the traffic"""
+ tx_rate_fps = 0
+ rx_rate_fps = 0
+ for sample in samples:
+ tx_rate_fps += sum(
+ port['tx_throughput_fps'] for port in sample.values())
+ rx_rate_fps += sum(
+ port['rx_throughput_fps'] for port in sample.values())
+ tx_rate_fps = round(float(tx_rate_fps) / len(samples), 2)
+ rx_rate_fps = round(float(rx_rate_fps) / len(samples), 2)
# TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- tolerance_low = generator.rfc2544_helper.tolerance_low
- tolerance_high = generator.rfc2544_helper.tolerance_high
-
- tx_rate = out_packets / run_duration
- rx_rate = in_packets / run_duration
-
- throughput_max = self.throughput_max
- drop_percent_at_max_tx = self.drop_percent_at_max_tx
+ out_packets = sum(port['out_packets'] for port in samples[-1].values())
+ in_packets = sum(port['in_packets'] for port in samples[-1].values())
+ drop_percent = 100.0
- if self.drop_percent_at_max_tx is None:
- self.rate = tx_rate
- self.first_run = False
+ # https://tools.ietf.org/html/rfc2544#section-26.3
+ if out_packets:
+ drop_percent = round(
+ (float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- if drop_percent > tolerance_high:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
+ tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ if drop_percent > tol_high:
self.max_rate = self.rate
- if throughput_max == 0:
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= tolerance_low:
- # TODO(esm): why do we update the samples dict in this case
- # and not update our tracking values?
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= self.drop_percent_at_max_tx:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ elif drop_percent < tol_low:
self.min_rate = self.rate
- self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
- self.throughput_max = throughput_max = rx_rate
+ # else:
+ # NOTE(ralonsoh): the test should finish here
+ # pass
+ last_rate = self.rate
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
- else:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
+ throughput = rx_rate_fps * 2 if correlated_traffic else rx_rate_fps
- generator.clear_client_stats(self.ports)
- generator.start_client(self.ports, mult=self.get_multiplier(),
- duration=run_duration, force=True)
+ if drop_percent > self.drop_percent_max:
+ self.drop_percent_max = drop_percent
- # if correlated traffic update the Throughput
- if generator.rfc2544_helper.correlated_traffic:
- throughput_max *= 2
+ latency = {port_num: value['latency']
+ for port_num, value in samples[-1].items()}
- samples.update({
- 'TxThroughput': tx_rate,
- 'RxThroughput': rx_rate,
+ output = {
+ 'TxThroughput': tx_rate_fps,
+ 'RxThroughput': rx_rate_fps,
'CurrentDropPercentage': drop_percent,
- 'Throughput': throughput_max,
- 'DropPercentage': drop_percent_at_max_tx,
- })
-
- return samples
-
- def execute_latency(self, generator=None, samples=None):
- if generator is not None and self.generator is None:
- self.generator = generator
-
- if samples is None:
- samples = self.generator.generate_samples()
-
- self.pps, multiplier = self.calculate_pps(samples)
- self.ports = []
- self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
- for vld_id, intfs in sorted(self.generator.networks.items()):
- profile_data = self.params.get(vld_id)
- if not profile_data:
- continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
- continue
- for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.generator.start_client(ports=self.ports, mult=str(multiplier),
- duration=120, force=True)
- self.first_run = False
-
- def calculate_pps(self, samples):
- pps = round(samples['Throughput'] / 2, 2)
- multiplier = round(self.rate / self.pps, 2)
- return pps, multiplier
-
- def create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if pps:
- stl_mode = STLTXCont(pps=pps)
- else:
- stl_mode = STLTXCont(pps=self.pps)
- if self.pg_id:
- LOGGING.debug("pg_id: %s", self.pg_id)
- stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
- flow_stats=stl_flow_stats)
- self.pg_id += 1
- else:
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
- return stream
+ 'Throughput': throughput,
+ 'DropPercentage': self.drop_percent_max,
+ 'Rate': last_rate,
+ 'Latency': latency
+ }
+ return output
diff --git a/yardstick/network_services/traffic_profile/trex_traffic_profile.py b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
index f5e3923d5..ed0355fa5 100644
--- a/yardstick/network_services/traffic_profile/trex_traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
@@ -19,21 +19,16 @@ from random import SystemRandom
import ipaddress
import six
-
-from yardstick.common import exceptions as y_exc
-from yardstick.network_services.traffic_profile import base
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
-from trex_stl_lib.trex_stl_streams import STLProfile
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
from trex_stl_lib import api as Pkt
+from yardstick.common import exceptions as y_exc
+from yardstick.network_services.traffic_profile import base
+
+
SRC = 'src'
DST = 'dst'
ETHERNET = 'Ethernet'
@@ -342,115 +337,6 @@ class TrexProfile(base.TrafficProfile):
if 'dstport' in outer_l4:
self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count'])
- def generate_imix_data(self, packet_definition):
- """ generate packet size for a given traffic profile """
- imix_count = {}
- imix_data = {}
- if not packet_definition:
- return imix_count
- imix = packet_definition.get('framesize')
- if imix:
- for size in imix:
- data = imix[size]
- imix_data[int(size[:-1])] = int(data)
- imix_sum = sum(imix_data.values())
- if imix_sum > 100:
- raise SystemExit("Error in IMIX data")
- elif imix_sum < 100:
- imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
-
- avg_size = 0.0
- for size in imix_data:
- count = int(imix_data[size])
- if count:
- avg_size += round(size * count / 100, 2)
- pps = round(self.pps * count / 100, 0)
- imix_count[size] = pps
- self.rate = round(1342177280 / avg_size, 0) * 2
- logging.debug("Imax: %s rate: %s", imix_count, self.rate)
- return imix_count
-
- def get_streams(self, profile_data):
- """ generate trex stream
- :param profile_data:
- :type profile_data:
- """
- self.streams = []
- self.pps = self.params['traffic_profile'].get('frame_rate', 100)
- for packet_name in profile_data:
- outer_l2 = profile_data[packet_name].get('outer_l2')
- imix_data = self.generate_imix_data(outer_l2)
- if not imix_data:
- imix_data = {64: self.pps}
- self.generate_vm(profile_data[packet_name])
- for size in imix_data:
- self._generate_streams(size, imix_data[size])
- self._generate_profile()
- return self.profile
-
- def generate_vm(self, packet_definition):
- """ generate trex vm with flows setup """
- self.ether_packet = Pkt.Ether()
- self.ip_packet = Pkt.IP()
- self.ip6_packet = None
- self.udp_packet = Pkt.UDP()
- self.udp[DST_PORT] = 'UDP.dport'
- self.udp[SRC_PORT] = 'UDP.sport'
- self.qinq = False
- self.vm_flow_vars = []
- outer_l2 = packet_definition.get('outer_l2', None)
- outer_l3v4 = packet_definition.get('outer_l3v4', None)
- outer_l3v6 = packet_definition.get('outer_l3v6', None)
- outer_l4 = packet_definition.get('outer_l4', None)
- if outer_l2:
- self._set_outer_l2_fields(outer_l2)
- if outer_l3v4:
- self._set_outer_l3v4_fields(outer_l3v4)
- if outer_l3v6:
- self._set_outer_l3v6_fields(outer_l3v6)
- if outer_l4:
- self._set_outer_l4_fields(outer_l4)
- self.trex_vm = STLScVmRaw(self.vm_flow_vars)
-
- def generate_packets(self):
- """ generate packets from trex TG """
- base_pkt = self.base_pkt
- size = self.fsize - 4
- pad = max(0, size - len(base_pkt)) * 'x'
- self.packets = [STLPktBuilder(pkt=base_pkt / pad,
- vm=vm) for vm in self.vms]
-
- def _create_single_packet(self, size=64):
- size = size - 4
- ether_packet = self.ether_packet
- ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
- udp_packet = self.udp_packet
- if self.qinq:
- qinq_packet = self.qinq_packet
- base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
- else:
- base_pkt = ether_packet / ip_packet / udp_packet
- pad = max(0, size - len(base_pkt)) * 'x'
- packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
- return packet
-
- def _create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if self.pg_id:
- self.pg_id += 1
- stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
- flow_stats=stl_flow)
- else:
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
- return stream
-
- def _generate_streams(self, packet_size, pps):
- self.streams.append(self._create_single_stream(packet_size, pps))
-
- def _generate_profile(self):
- self.profile = STLProfile(self.streams)
-
@classmethod
def _count_ip(cls, start_ip, end_ip):
start = ipaddress.ip_address(six.u(start_ip))
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index d9719eb4e..1357f6b26 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -13,10 +13,14 @@
# limitations under the License.
import logging
-
+import ipaddress
+import six
from yardstick.common import utils
+from yardstick.common import exceptions
+
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
-from yardstick.network_services.yang_model import YangModel
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
+from itertools import chain
LOG = logging.getLogger(__name__)
@@ -38,6 +42,196 @@ class AclApproxSetupEnvSetupEnvHelper(DpdkVnfSetupEnvHelper):
SW_DEFAULT_CORE = 5
DEFAULT_CONFIG_TPL_CFG = "acl.cfg"
VNF_TYPE = "ACL"
+ RULE_CMD = "acl"
+
+ DEFAULT_PRIORITY = 1
+ DEFAULT_PROTOCOL = 0
+ DEFAULT_PROTOCOL_MASK = 0
+ # Default actions to be applied to SampleVNF. Please note,
+ # that this list is extended with `fwd` action when default
+ # actions are generated.
+ DEFAULT_FWD_ACTIONS = ["accept", "count"]
+
+ def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
+ super(AclApproxSetupEnvSetupEnvHelper, self).__init__(vnfd_helper,
+ ssh_helper,
+ scenario_helper)
+ self._action_id = 0
+
+ def get_ip_from_port(self, port):
+ # we can't use gateway because in OpenStack gateways interfere with floating ip routing
+ # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+ vintf = self.vnfd_helper.find_interface(name=port)["virtual-interface"]
+ return utils.make_ip_addr(vintf["local_ip"], vintf["netmask"])
+
+ def get_network_and_prefixlen_from_ip_of_port(self, port):
+ ip_addr = self.get_ip_from_port(port)
+ # handle cases with no gateway
+ if ip_addr:
+ return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
+ else:
+ return None, None
+
+ @property
+ def new_action_id(self):
+ """Get new action id"""
+ self._action_id += 1
+ return self._action_id
+
+ def get_default_flows(self):
+ """Get default actions/rules
+ Returns: (<actions>, <rules>)
+ <actions>:
+ { <action_id>: [ <list of actions> ]}
+ Example:
+ { 0 : [ "accept", "count", {"fwd" : "port": 0} ], ... }
+ <rules>:
+ [ {"src_ip": "x.x.x.x", "src_ip_mask", 24, ...}, ... ]
+ Note:
+ See `generate_rule_cmds()` to get list of possible map keys.
+ """
+ actions, rules = {}, []
+ _port_pairs = PortPairs(self.vnfd_helper.interfaces)
+ port_pair_list = _port_pairs.port_pair_list
+ for src_intf, dst_intf in port_pair_list:
+ # get port numbers of the interfaces
+ src_port = self.vnfd_helper.port_num(src_intf)
+ dst_port = self.vnfd_helper.port_num(dst_intf)
+ # get interface addresses and prefixes
+ src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_intf)
+ dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_intf)
+ # ignore entries with empty values
+ if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+ # flow: src_net:dst_net -> dst_port
+ action_id = self.new_action_id
+ actions[action_id] = self.DEFAULT_FWD_ACTIONS[:]
+ actions[action_id].append({"fwd": {"port": dst_port}})
+ rules.append({"priority": 1, 'cmd': self.RULE_CMD,
+ "src_ip": src_net, "src_ip_mask": src_prefix_len,
+ "dst_ip": dst_net, "dst_ip_mask": dst_prefix_len,
+ "src_port_from": 0, "src_port_to": 65535,
+ "dst_port_from": 0, "dst_port_to": 65535,
+ "protocol": 0, "protocol_mask": 0,
+ "action_id": action_id})
+ # flow: dst_net:src_net -> src_port
+ action_id = self.new_action_id
+ actions[action_id] = self.DEFAULT_FWD_ACTIONS[:]
+ actions[action_id].append({"fwd": {"port": src_port}})
+ rules.append({"cmd":self.RULE_CMD, "priority": 1,
+ "src_ip": dst_net, "src_ip_mask": dst_prefix_len,
+ "dst_ip": src_net, "dst_ip_mask": src_prefix_len,
+ "src_port_from": 0, "src_port_to": 65535,
+ "dst_port_from": 0, "dst_port_to": 65535,
+ "protocol": 0, "protocol_mask": 0,
+ "action_id": action_id})
+ return actions, rules
+
+ def get_flows(self, options):
+ """Get actions/rules based on provided options.
+ The `options` is a dict representing the ACL rules configuration
+ file. Result is the same as described in `get_default_flows()`.
+ """
+ actions, rules = {}, []
+ for ace in options['access-list-entries']:
+ # Generate list of actions
+ action_id = self.new_action_id
+ actions[action_id] = ace['actions']
+ # Destination nestwork
+ matches = ace['matches']
+ dst_ipv4_net = matches['destination-ipv4-network']
+ dst_ipv4_net_ip = ipaddress.ip_interface(six.text_type(dst_ipv4_net))
+ # Source network
+ src_ipv4_net = matches['source-ipv4-network']
+ src_ipv4_net_ip = ipaddress.ip_interface(six.text_type(src_ipv4_net))
+ # Append the rule
+ rules.append({'action_id': action_id, 'cmd': self.RULE_CMD,
+ 'dst_ip': dst_ipv4_net_ip.network.network_address.exploded,
+ 'dst_ip_mask': dst_ipv4_net_ip.network.prefixlen,
+ 'src_ip': src_ipv4_net_ip.network.network_address.exploded,
+ 'src_ip_mask': src_ipv4_net_ip.network.prefixlen,
+ 'dst_port_from': matches['destination-port-range']['lower-port'],
+ 'dst_port_to': matches['destination-port-range']['upper-port'],
+ 'src_port_from': matches['source-port-range']['lower-port'],
+ 'src_port_to': matches['source-port-range']['upper-port'],
+ 'priority': matches.get('priority', self.DEFAULT_PRIORITY),
+ 'protocol': matches.get('protocol', self.DEFAULT_PROTOCOL),
+ 'protocol_mask': matches.get('protocol_mask',
+ self.DEFAULT_PROTOCOL_MASK)
+ })
+ return actions, rules
+
+ def generate_rule_cmds(self, rules, apply_rules=False):
+ """Convert rules into list of SampleVNF CLI commands"""
+ rule_template = ("p {cmd} add {priority} {src_ip} {src_ip_mask} "
+ "{dst_ip} {dst_ip_mask} {src_port_from} {src_port_to} "
+ "{dst_port_from} {dst_port_to} {protocol} "
+ "{protocol_mask} {action_id}")
+ rule_cmd_list = []
+ for rule in rules:
+ rule_cmd_list.append(rule_template.format(**rule))
+ if apply_rules:
+ # add command to apply all rules at the end
+ rule_cmd_list.append("p {cmd} applyruleset".format(cmd=self.RULE_CMD))
+ return rule_cmd_list
+
+ def generate_action_cmds(self, actions):
+ """Convert actions into list of SampleVNF CLI commands.
+ These method doesn't validate the provided list of actions. Supported
+ list of actions are limited by SampleVNF. Thus, the user should be
+ responsible to specify correct action name(s). Yardstick should take
+ the provided action by user and apply it to SampleVNF.
+ Anyway, some of the actions require addition parameters to be
+ specified. In case of `fwd` & `nat` action used have to specify
+ the port attribute.
+ """
+ _action_template_map = {
+ "fwd": "p action add {action_id} fwd {port}",
+ "nat": "p action add {action_id} nat {port}"
+ }
+ action_cmd_list = []
+ for action_id, actions in actions.items():
+ for action in actions:
+ if isinstance(action, dict):
+ for action_name in action.keys():
+ # user provided an action name with addition options
+ # e.g.: {"fwd": {"port": 0}}
+ # format action CLI command and add it to the list
+ if action_name not in _action_template_map.keys():
+ raise exceptions.AclUknownActionTemplate(
+ action_name=action_name)
+ template = _action_template_map[action_name]
+ try:
+ action_cmd_list.append(template.format(
+ action_id=action_id, **action[action_name]))
+ except KeyError as exp:
+ raise exceptions.AclMissingActionArguments(
+ action_name=action_name,
+ action_param=exp.args[0])
+ else:
+ # user provided an action name w/o addition options
+ # e.g.: "accept", "count"
+ action_cmd_list.append(
+ "p action add {action_id} {action}".format(
+ action_id=action_id, action=action))
+ return action_cmd_list
+
+ def get_flows_config(self, options=None):
+ """Get action/rules configuration commands (string) to be
+ applied to SampleVNF to configure ACL rules (flows).
+ """
+ action_cmd_list, rule_cmd_list = [], []
+ if options:
+ # if file name is set, read actions/rules from the file
+ actions, rules = self.get_flows(options)
+ action_cmd_list = self.generate_action_cmds(actions)
+ rule_cmd_list = self.generate_rule_cmds(rules)
+ # default actions/rules
+ dft_actions, dft_rules = self.get_default_flows()
+ dft_action_cmd_list = self.generate_action_cmds(dft_actions)
+ dft_rule_cmd_list = self.generate_rule_cmds(dft_rules, apply_rules=True)
+ # generate multi-line commands to add actions/rules
+ return '\n'.join(chain(action_cmd_list, dft_action_cmd_list,
+ rule_cmd_list, dft_rule_cmd_list))
class AclApproxVnf(SampleVNF):
@@ -57,12 +251,3 @@ class AclApproxVnf(SampleVNF):
setup_env_helper_type = AclApproxSetupEnvSetupEnvHelper
super(AclApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
- self.acl_rules = None
-
- def _start_vnf(self):
- yang_model_path = utils.find_relative_file(
- self.scenario_helper.options['rules'],
- self.scenario_helper.task_path)
- yang_model = YangModel(yang_model_path)
- self.acl_rules = yang_model.get_rules()
- super(AclApproxVnf, self)._start_vnf()
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 7816c6d91..7fac75a29 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -325,7 +325,7 @@ class ProxSocketHelper(object):
return ret_str, False
- def get_data(self, pkt_dump_only=False, timeout=1):
+ def get_data(self, pkt_dump_only=False, timeout=0.01):
""" read data from the socket """
# This method behaves slightly differently depending on whether it is
@@ -398,8 +398,14 @@ class ProxSocketHelper(object):
def stop(self, cores, task=''):
""" stop specific cores on the remote instance """
- LOG.debug("Stopping cores %s", cores)
- self.put_command("stop {} {}\n".format(join_non_strings(',', cores), task))
+
+ tmpcores = []
+ for core in cores:
+ if core not in tmpcores:
+ tmpcores.append(core)
+
+ LOG.debug("Stopping cores %s", tmpcores)
+ self.put_command("stop {} {}\n".format(join_non_strings(',', tmpcores), task))
time.sleep(3)
def start_all(self):
@@ -409,8 +415,14 @@ class ProxSocketHelper(object):
def start(self, cores):
""" start specific cores on the remote instance """
- LOG.debug("Starting cores %s", cores)
- self.put_command("start {}\n".format(join_non_strings(',', cores)))
+
+ tmpcores = []
+ for core in cores:
+ if core not in tmpcores:
+ tmpcores.append(core)
+
+ LOG.debug("Starting cores %s", tmpcores)
+ self.put_command("start {}\n".format(join_non_strings(',', tmpcores)))
time.sleep(3)
def reset_stats(self):
@@ -532,6 +544,51 @@ class ProxSocketHelper(object):
tsc = int(ret[3])
return rx, tx, drop, tsc
+ def multi_port_stats(self, ports):
+ """get counter values from all ports port"""
+
+ ports_str = ""
+ for port in ports:
+ ports_str = ports_str + str(port) + ","
+ ports_str = ports_str[:-1]
+
+ ports_all_data = []
+ tot_result = [0] * len(ports)
+
+ retry_counter = 0
+ port_index = 0
+ while (len(ports) is not len(ports_all_data)) and (retry_counter < 10):
+ self.put_command("multi port stats {}\n".format(ports_str))
+ ports_all_data = self.get_data().split(";")
+
+ if len(ports) is len(ports_all_data):
+ for port_data_str in ports_all_data:
+
+ try:
+ tot_result[port_index] = [try_int(s, 0) for s in port_data_str.split(",")]
+ except (IndexError, TypeError):
+ LOG.error("Port Index error %d %s - retrying ", port_index, port_data_str)
+
+ if (len(tot_result[port_index]) is not 6) or \
+ tot_result[port_index][0] is not ports[port_index]:
+ ports_all_data = []
+ tot_result = [0] * len(ports)
+ port_index = 0
+ time.sleep(0.1)
+ LOG.error("Corrupted PACKET %s - retrying", port_data_str)
+ break
+ else:
+ port_index = port_index + 1
+ else:
+ LOG.error("Empty / too much data - retry -%s-", ports_all_data)
+ ports_all_data = []
+ tot_result = [0] * len(ports)
+ port_index = 0
+ time.sleep(0.1)
+
+ retry_counter = retry_counter + 1
+ return tot_result
+
def port_stats(self, ports):
"""get counter values from a specific port"""
tot_result = [0] * 12
@@ -1012,7 +1069,11 @@ class ProxDataHelper(object):
@property
def totals_and_pps(self):
if self._totals_and_pps is None:
- rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8]
+ rx_total = tx_total = 0
+ all_ports = self.sut.multi_port_stats(range(self.port_count))
+ for port in all_ports:
+ rx_total = rx_total + port[1]
+ tx_total = tx_total + port[2]
requested_pps = self.value / 100.0 * self.line_rate_to_pps()
self._totals_and_pps = rx_total, tx_total, requested_pps
return self._totals_and_pps
@@ -1032,19 +1093,18 @@ class ProxDataHelper(object):
@property
def samples(self):
samples = {}
+ ports = []
+ port_names = []
for port_name, port_num in self.vnfd_helper.ports_iter():
- try:
- port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
- samples[port_name] = {
- "in_packets": port_rx_total,
- "out_packets": port_tx_total,
- }
- except (KeyError, TypeError, NameError, MemoryError, ValueError,
- SystemError, BufferError):
- samples[port_name] = {
- "in_packets": 0,
- "out_packets": 0,
- }
+ ports.append(port_num)
+ port_names.append(port_name)
+
+ results = self.sut.multi_port_stats(ports)
+ for result in results:
+ port_num = result[0]
+ samples[port_names[port_num]] = {
+ "in_packets": result[1],
+ "out_packets": result[2]}
return samples
def __enter__(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index 36f1a19d0..63295c2e6 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -89,14 +89,15 @@ class ProxApproxVnf(SampleVNF):
raise RuntimeError("Failed ..Invalid no of ports .. "
"1, 2 or 4 ports only supported at this time")
- self.port_stats = self.vnf_execute('port_stats', range(port_count))
+ all_port_stats = self.vnf_execute('multi_port_stats', range(port_count))
+ rx_total = tx_total = 0
try:
- rx_total = self.port_stats[6]
- tx_total = self.port_stats[7]
- tsc = self.port_stats[10]
- except IndexError:
- LOG.debug("port_stats parse fail ")
- # return empty dict so we don't mess up existing KPIs
+ for single_port_stats in all_port_stats:
+ rx_total = rx_total + single_port_stats[1]
+ tx_total = tx_total + single_port_stats[2]
+ tsc = single_port_stats[5]
+ except (TypeError, IndexError):
+ LOG.error("Invalid data ...")
return {}
result = {
@@ -107,10 +108,19 @@ class ProxApproxVnf(SampleVNF):
# collectd KPIs here and not TG KPIs, so use a different method name
"collect_stats": self.resource_helper.collect_collectd_kpi(),
}
- curr_packets_in = int(((rx_total - self.prev_packets_in) * self.tsc_hz)
+ try:
+ curr_packets_in = int(((rx_total - self.prev_packets_in) * self.tsc_hz)
/ (tsc - self.prev_tsc) * port_count)
- curr_packets_fwd = int(((tx_total - self.prev_packets_sent) * self.tsc_hz)
+ except ZeroDivisionError:
+ LOG.error("Error.... Divide by Zero")
+ curr_packets_in = 0
+
+ try:
+ curr_packets_fwd = int(((tx_total - self.prev_packets_sent) * self.tsc_hz)
/ (tsc - self.prev_tsc) * port_count)
+ except ZeroDivisionError:
+ LOG.error("Error.... Divide by Zero")
+ curr_packets_fwd = 0
result["curr_packets_in"] = curr_packets_in
result["curr_packets_fwd"] = curr_packets_fwd
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index ef8b3f126..3976acb76 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -11,20 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class implementation for generic vnf implementation """
-from collections import Mapping
import logging
from multiprocessing import Queue, Value, Process
import os
import posixpath
import re
+import six
import subprocess
import time
-import six
-
from trex_stl_lib.trex_stl_client import LoggerApi
from trex_stl_lib.trex_stl_client import STLClient
from trex_stl_lib.trex_stl_exceptions import STLError
@@ -32,6 +29,7 @@ from yardstick.benchmark.contexts.base import Context
from yardstick.common import exceptions as y_exceptions
from yardstick.common.process import check_if_process_failed
from yardstick.common import utils
+from yardstick.common import yaml_loader
from yardstick.network_services import constants
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
@@ -144,6 +142,13 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
'vnf_type': self.VNF_TYPE,
}
+ # read actions/rules from file
+ acl_options = None
+ acl_file_name = self.scenario_helper.options.get('rules')
+ if acl_file_name:
+ with utils.open_relative_file(acl_file_name, task_path) as infile:
+ acl_options = yaml_loader.yaml_load(infile)
+
config_tpl_cfg = utils.find_relative_file(self.DEFAULT_CONFIG_TPL_CFG,
task_path)
config_basename = posixpath.basename(self.CFG_CONFIG)
@@ -176,12 +181,17 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
new_config = self._update_packet_type(new_config, traffic_options)
self.ssh_helper.upload_config_file(config_basename, new_config)
self.ssh_helper.upload_config_file(script_basename,
- multiport.generate_script(self.vnfd_helper))
+ multiport.generate_script(self.vnfd_helper,
+ self.get_flows_config(acl_options)))
LOG.info("Provision and start the %s", self.APP_NAME)
self._build_pipeline_kwargs()
return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
+ def get_flows_config(self, options=None): # pylint: disable=unused-argument
+ """No actions/rules (flows) by default"""
+ return None
+
def _build_pipeline_kwargs(self):
tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME)
# count the number of actual ports in the list of pairs
@@ -375,39 +385,14 @@ class ClientResourceHelper(ResourceHelper):
LOG.error('TRex client not connected')
return {}
- def generate_samples(self, ports, key=None, default=None):
- # needs to be used ports
- last_result = self.get_stats(ports)
- key_value = last_result.get(key, default)
-
- if not isinstance(last_result, Mapping): # added for mock unit test
- self._terminated.value = 1
- return {}
-
- samples = {}
- # recalculate port for interface and see if it matches ports provided
- for intf in self.vnfd_helper.interfaces:
- name = intf["name"]
- port = self.vnfd_helper.port_num(name)
- if port in ports:
- xe_value = last_result.get(port, {})
- samples[name] = {
- "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
- "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
- "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
- "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
- "in_packets": int(xe_value.get("ipackets", 0)),
- "out_packets": int(xe_value.get("opackets", 0)),
- }
- if key:
- samples[name][key] = key_value
- return samples
+ def _get_samples(self, ports, port_pg_id=False):
+ raise NotImplementedError()
def _run_traffic_once(self, traffic_profile):
traffic_profile.execute_traffic(self)
self.client_started.value = 1
time.sleep(self.RUN_DURATION)
- samples = self.generate_samples(traffic_profile.ports)
+ samples = self._get_samples(traffic_profile.ports)
time.sleep(self.QUEUE_WAIT_TIME)
self._queue.put(samples)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 2010546e7..a1f9fbeb4 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
import logging
-import sys
-from yardstick.common import exceptions
from yardstick.common import utils
+from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
@@ -27,14 +25,6 @@ LOG = logging.getLogger(__name__)
WAIT_AFTER_CFG_LOAD = 10
WAIT_FOR_TRAFFIC = 30
-IXIA_LIB = os.path.dirname(os.path.realpath(__file__))
-IXNET_LIB = os.path.join(IXIA_LIB, "../../libs/ixia_libs/IxNet")
-sys.path.append(IXNET_LIB)
-
-try:
- from IxNet import IxNextgen
-except ImportError:
- IxNextgen = exceptions.ErrorClass
class IxiaRfc2544Helper(Rfc2544ResourceHelper):
@@ -51,7 +41,7 @@ class IxiaResourceHelper(ClientResourceHelper):
super(IxiaResourceHelper, self).__init__(setup_helper)
self.scenario_helper = setup_helper.scenario_helper
- self.client = IxNextgen()
+ self.client = ixnet_api.IxNextgen()
if rfc_helper_type is None:
rfc_helper_type = IxiaRfc2544Helper
@@ -69,10 +59,8 @@ class IxiaResourceHelper(ClientResourceHelper):
def stop_collect(self):
self._terminated.value = 1
- if self.client:
- self.client.ix_stop_traffic()
- def generate_samples(self, ports, key=None, default=None):
+ def generate_samples(self, ports, key=None):
stats = self.get_stats()
samples = {}
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index 4e9f4bdc1..07cec6745 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -11,74 +11,45 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex traffic generation definitions which implements rfc2544 """
-from __future__ import absolute_import
-from __future__ import print_function
-import time
import logging
-from collections import Mapping
-
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexTrafficGen
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexResourceHelper
-
-LOGGING = logging.getLogger(__name__)
+import time
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_trex
-class TrexRfc2544ResourceHelper(Rfc2544ResourceHelper):
- def is_done(self):
- return self.latency and self.iteration.value > 10
+LOGGING = logging.getLogger(__name__)
-class TrexRfcResourceHelper(TrexResourceHelper):
+class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
- LATENCY_TIME_SLEEP = 120
- RUN_DURATION = 30
- WAIT_TIME = 3
+ SAMPLING_PERIOD = 2
+ TRANSIENT_PERIOD = 10
- def __init__(self, setup_helper, rfc_helper_type=None):
+ def __init__(self, setup_helper):
super(TrexRfcResourceHelper, self).__init__(setup_helper)
-
- if rfc_helper_type is None:
- rfc_helper_type = TrexRfc2544ResourceHelper
-
- self.rfc2544_helper = rfc_helper_type(self.scenario_helper)
+ self.rfc2544_helper = sample_vnf.Rfc2544ResourceHelper(
+ self.scenario_helper)
def _run_traffic_once(self, traffic_profile):
- if self._terminated.value:
- return
-
- traffic_profile.execute_traffic(self)
self.client_started.value = 1
- time.sleep(self.RUN_DURATION)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- samples = traffic_profile.get_drop_percentage(self)
- self._queue.put(samples)
-
- if not self.rfc2544_helper.is_done():
- return
-
- self.client.stop(traffic_profile.ports)
- self.client.reset(ports=traffic_profile.ports)
- self.client.remove_all_streams(traffic_profile.ports)
- traffic_profile.execute_traffic_latency(samples=samples)
- multiplier = traffic_profile.calculate_pps(samples)[1]
- for _ in range(5):
- time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- last_res = self.client.get_stats(traffic_profile.ports)
- if not isinstance(last_res, Mapping):
- self._terminated.value = 1
- continue
- self.generate_samples(traffic_profile.ports, 'latency', {})
- self._queue.put(samples)
- self.client.start(mult=str(multiplier),
- ports=traffic_profile.ports,
- duration=120, force=True)
+ ports, port_pg_id = traffic_profile.execute_traffic(self)
+
+ samples = []
+ timeout = int(traffic_profile.config.duration) - self.TRANSIENT_PERIOD
+ time.sleep(self.TRANSIENT_PERIOD)
+ for _ in utils.Timer(timeout=timeout):
+ samples.append(self._get_samples(ports, port_pg_id=port_pg_id))
+ time.sleep(self.SAMPLING_PERIOD)
+
+ traffic_profile.stop_traffic(self)
+ output = traffic_profile.get_drop_percentage(
+ samples, self.rfc2544_helper.tolerance_low,
+ self.rfc2544_helper.tolerance_high,
+ self.rfc2544_helper.correlated_traffic)
+ self._queue.put(output)
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
@@ -86,12 +57,8 @@ class TrexRfcResourceHelper(TrexResourceHelper):
def clear_client_stats(self, ports):
self.client.clear_stats(ports=ports)
- def collect_kpi(self):
- self.rfc2544_helper.iteration.value += 1
- return super(TrexRfcResourceHelper, self).collect_kpi()
-
-class TrexTrafficGenRFC(TrexTrafficGen):
+class TrexTrafficGenRFC(tg_trex.TrexTrafficGen):
"""
This class handles mapping traffic profile and generating
traffic for rfc2544 testcase.
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 0084a124c..80b42e22d 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -13,7 +13,6 @@
# limitations under the License.
""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
-from __future__ import absolute_import
import logging
import os
@@ -25,6 +24,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTraff
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+
LOG = logging.getLogger(__name__)
@@ -165,6 +165,30 @@ class TrexResourceHelper(ClientResourceHelper):
cmd = "sudo fuser -n tcp %s %s -k > /dev/null 2>&1"
self.ssh_helper.execute(cmd % (self.SYNC_PORT, self.ASYNC_PORT))
+ def _get_samples(self, ports, port_pg_id=None):
+ stats = self.get_stats(ports)
+ samples = {}
+ for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
+ port_num = self.vnfd_helper.port_num(pname)
+ port_stats = stats.get(port_num, {})
+ samples[pname] = {
+ 'rx_throughput_fps': float(port_stats.get('rx_pps', 0.0)),
+ 'tx_throughput_fps': float(port_stats.get('tx_pps', 0.0)),
+ 'rx_throughput_bps': float(port_stats.get('rx_bps', 0.0)),
+ 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
+ 'in_packets': int(port_stats.get('ipackets', 0)),
+ 'out_packets': int(port_stats.get('opackets', 0)),
+ }
+
+ pg_id_list = port_pg_id.get_pg_ids(port_num)
+ samples[pname]['latency'] = {}
+ for pg_id in pg_id_list:
+ latency_global = stats.get('latency', {})
+ pg_latency = latency_global.get(pg_id, {}).get('latency')
+ samples[pname]['latency'][pg_id] = pg_latency
+
+ return samples
+
class TrexTrafficGen(SampleVNFTrafficGen):
"""
diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
index 3ba1f91b7..432f30a0c 100644
--- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
@@ -14,9 +14,8 @@
import logging
-from yardstick.common import utils
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
-from yardstick.network_services.yang_model import YangModel
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
LOG = logging.getLogger(__name__)
@@ -27,7 +26,7 @@ FW_COLLECT_KPI = (r"""VFW TOTAL:[^p]+pkts_received"?:\s(\d+),[^p]+pkts_fw_forwar
r"""[^p]+pkts_drop_fw"?:\s(\d+),\s""")
-class FWApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
+class FWApproxSetupEnvHelper(AclApproxSetupEnvSetupEnvHelper):
APP_NAME = "vFW"
CFG_CONFIG = "/tmp/vfw_config"
@@ -37,6 +36,8 @@ class FWApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
SW_DEFAULT_CORE = 5
HW_DEFAULT_CORE = 2
VNF_TYPE = "VFW"
+ RULE_CMD = "vfw"
+ DEFAULT_FWD_ACTIONS = ["accept", "count", "conntrack"]
class FWApproxVnf(SampleVNF):
@@ -56,12 +57,3 @@ class FWApproxVnf(SampleVNF):
setup_env_helper_type = FWApproxSetupEnvHelper
super(FWApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
- self.vfw_rules = None
-
- def _start_vnf(self):
- yang_model_path = utils.find_relative_file(
- self.scenario_helper.options['rules'],
- self.scenario_helper.task_path)
- yang_model = YangModel(yang_model_path)
- self.vfw_rules = yang_model.get_rules()
- super(FWApproxVnf, self)._start_vnf()
diff --git a/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
index de6fd9329..6c5c6c833 100644
--- a/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
+++ b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
@@ -47,6 +47,7 @@ class VnfSshHelper(AutoConnectSSH):
def upload_config_file(self, prefix, content):
cfg_file = os.path.join(constants.REMOTE_TMP, prefix)
+ LOG.debug('Config file name: %s', cfg_file)
LOG.debug(content)
file_obj = StringIO(content)
self.put_file_obj(file_obj, cfg_file)
diff --git a/yardstick/network_services/yang_model.py b/yardstick/network_services/yang_model.py
deleted file mode 100644
index ec00c4513..000000000
--- a/yardstick/network_services/yang_model.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-import logging
-import ipaddress
-import six
-
-from yardstick.common.yaml_loader import yaml_load
-
-LOG = logging.getLogger(__name__)
-
-
-class YangModel(object):
-
- RULE_TEMPLATE = "p acl add 1 {0} {1} {2} {3} {4} {5} {6} {7} 0 0 {8}"
-
- def __init__(self, config_file):
- super(YangModel, self).__init__()
- self._config_file = config_file
- self._options = {}
- self._rules = ''
-
- @property
- def config_file(self):
- return self._config_file
-
- @config_file.setter
- def config_file(self, value):
- self._config_file = value
- self._options = {}
- self._rules = ''
-
- def _read_config(self):
- # TODO: add some error handling in case of empty or non-existing file
- try:
- with open(self._config_file) as f:
- self._options = yaml_load(f)
- except Exception as e:
- LOG.exception("Failed to load the yaml %s", e)
- raise
-
- def _get_entries(self):
- if not self._options:
- return ''
-
- rule_list = []
- for ace in self._options['access-list1']['acl']['access-list-entries']:
- # TODO: resolve ports using topology file and nodes'
- # ids: public or private.
- matches = ace['ace']['matches']
- dst_ipv4_net = matches['destination-ipv4-network']
- dst_ipv4_net_ip = ipaddress.ip_interface(six.text_type(dst_ipv4_net))
- port0_local_network = dst_ipv4_net_ip.network.network_address.exploded
- port0_prefix = dst_ipv4_net_ip.network.prefixlen
-
- src_ipv4_net = matches['source-ipv4-network']
- src_ipv4_net_ip = ipaddress.ip_interface(six.text_type(src_ipv4_net))
- port1_local_network = src_ipv4_net_ip.network.network_address.exploded
- port1_prefix = src_ipv4_net_ip.network.prefixlen
-
- lower_dport = matches['destination-port-range']['lower-port']
- upper_dport = matches['destination-port-range']['upper-port']
-
- lower_sport = matches['source-port-range']['lower-port']
- upper_sport = matches['source-port-range']['upper-port']
-
- # TODO: proto should be read from file also.
- # Now all rules in sample ACL file are TCP.
- rule_list.append('') # get an extra new line
- rule_list.append(self.RULE_TEMPLATE.format(port0_local_network,
- port0_prefix,
- port1_local_network,
- port1_prefix,
- lower_dport,
- upper_dport,
- lower_sport,
- upper_sport,
- 0))
- rule_list.append(self.RULE_TEMPLATE.format(port1_local_network,
- port1_prefix,
- port0_local_network,
- port0_prefix,
- lower_sport,
- upper_sport,
- lower_dport,
- upper_dport,
- 1))
-
- self._rules = '\n'.join(rule_list)
-
- def get_rules(self):
- if not self._rules:
- self._read_config()
- self._get_entries()
- return self._rules
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 6b5e6faf4..e6a26ab6b 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -348,12 +348,14 @@ class SSH(object):
raise exceptions.SSHError(error_msg=details)
return exit_status
- def execute(self, cmd, stdin=None, timeout=3600):
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
"""Execute the specified command on the server.
- :param cmd: Command to be executed.
- :param stdin: Open file to be sent on process stdin.
- :param timeout: Timeout for execution of the command.
+ :param cmd: (str) Command to be executed.
+ :param stdin: (StringIO) Open file to be sent on process stdin.
+ :param timeout: (int) Timeout for execution of the command.
+ :param raise_on_error: (bool) If True, then an SSHError will be raised
+ when non-zero exit code.
:returns: tuple (exit_status, stdout, stderr)
"""
@@ -362,7 +364,7 @@ class SSH(object):
exit_status = self.run(cmd, stderr=stderr,
stdout=stdout, stdin=stdin,
- timeout=timeout, raise_on_error=False)
+ timeout=timeout, raise_on_error=raise_on_error)
stdout.seek(0)
stderr.seek(0)
return exit_status, stdout.read(), stderr.read()
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index 2cf1d92d7..8ad581918 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -371,11 +371,6 @@ class StandaloneContextHelperTestCase(unittest.TestCase):
file_path = os.path.join(curr_path, filename)
return file_path
- def test_read_config_file(self):
- self.helper.file_path = self._get_file_abspath(self.NODE_SAMPLE)
- status = self.helper.read_config_file()
- self.assertIsNotNone(status)
-
def test_parse_pod_file(self):
self.helper.file_path = self._get_file_abspath("dummy")
self.assertRaises(IOError, self.helper.parse_pod_file,
@@ -549,5 +544,5 @@ class OvsDeployTestCase(unittest.TestCase):
'ovs_version': ovs_version,
'dpdk_version': dpdk_version,
'proxy': 'test_proxy'})
- mock_execute.assert_called_with(cmd)
- mock_env_get.assert_called_with('http_proxy', '')
+ mock_execute.assert_called_once_with(cmd)
+ mock_env_get.assert_has_calls([mock.call('http_proxy', '')])
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index 6eb438cb1..5be22a034 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -290,6 +290,22 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ def test__get_physical_node_for_server(self):
+ attrs = self.attrs
+ attrs.update({'servers': {'server1': {}}})
+ self.ovs_dpdk.init(attrs)
+
+ # When server is not from this context
+ result = self.ovs_dpdk._get_physical_node_for_server('server1.another-context')
+ self.assertIsNone(result)
+
+ # When node_name is not from this context
+ result = self.ovs_dpdk._get_physical_node_for_server('fake.foo-12345678')
+ self.assertIsNone(result)
+
+ result = self.ovs_dpdk._get_physical_node_for_server('server1.foo-12345678')
+ self.assertEqual(result, 'node5.foo')
+
# TODO(elfoley): Split this test for networks that exist and networks that
# don't
def test__get_network(self):
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index de748e285..169084607 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -62,7 +62,7 @@ class SriovContextTestCase(unittest.TestCase):
self.attrs = {
'name': 'foo',
'task_id': '1234567890',
- 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
+ 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE),
}
self.sriov = sriov.SriovContext()
self.addCleanup(self._remove_contexts)
@@ -171,6 +171,22 @@ class SriovContextTestCase(unittest.TestCase):
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ def test__get_physical_node_for_server(self):
+ attrs = self.attrs
+ attrs.update({'servers': {'server1': {}}})
+ self.sriov.init(attrs)
+
+ # When server is not from this context
+ result = self.sriov._get_physical_node_for_server('server1.another-context')
+ self.assertIsNone(result)
+
+ # When node_name is not from this context
+ result = self.sriov._get_physical_node_for_server('fake.foo-12345678')
+ self.assertIsNone(result)
+
+ result = self.sriov._get_physical_node_for_server('server1.foo-12345678')
+ self.assertEqual(result, 'node5.foo')
+
def test__get_server_no_task_id(self):
self.attrs['flags'] = {'no_setup': True}
self.sriov.init(self.attrs)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py
index b19883479..8f1cbcc55 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_base.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_base.py
@@ -12,12 +12,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
+import os
+import errno
+
+import mock
from yardstick.benchmark.contexts import base
+from yardstick.benchmark.contexts.base import Context
+from yardstick.tests.unit import base as ut_base
+from yardstick.common.constants import YARDSTICK_ROOT_PATH
+
+
+class DummyContextClass(Context):
+
+ def __init__(self, host_name_separator='.'):
+ super(DummyContextClass, self).__init__\
+ (host_name_separator=host_name_separator)
+ self.nodes = []
+ self.controllers = []
+ self.computes = []
+ self.baremetals = []
+
+ def _get_network(self, *args):
+ pass
+
+ def _get_server(self, *args):
+ pass
+
+ def deploy(self):
+ pass
+
+ def undeploy(self):
+ pass
+ def _get_physical_nodes(self):
+ pass
-class FlagsTestCase(unittest.TestCase):
+ def _get_physical_node_for_server(self, server_name):
+ pass
+
+
+class FlagsTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self.flags = base.Flags()
@@ -44,3 +79,93 @@ class FlagsTestCase(unittest.TestCase):
self.flags.parse(foo=42)
with self.assertRaises(AttributeError):
_ = self.flags.foo
+
+
+class ContextTestCase(ut_base.BaseUnitTestCase):
+
+ @staticmethod
+ def _remove_ctx(ctx_obj):
+ if ctx_obj in base.Context.list:
+ base.Context.list.remove(ctx_obj)
+
+ def test_split_host_name(self):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name.ctx_name'
+ self.assertEqual(('host_name', 'ctx_name'),
+ ctx_obj.split_host_name(config_name))
+
+ def test_split_host_name_wrong_separator(self):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name-ctx_name'
+ self.assertEqual((None, None),
+ ctx_obj.split_host_name(config_name))
+
+ def test_split_host_name_other_separator(self):
+ ctx_obj = DummyContextClass(host_name_separator='-')
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name-ctx_name'
+ self.assertEqual(('host_name', 'ctx_name'),
+ ctx_obj.split_host_name(config_name))
+
+ def test_get_physical_nodes(self):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+
+ result = Context.get_physical_nodes()
+
+ self.assertEqual(result, {None: None})
+
+ @mock.patch.object(Context, 'get_context_from_server')
+ def test_get_physical_node_from_server(self, mock_get_ctx):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+
+ mock_get_ctx.return_value = ctx_obj
+
+ result = Context.get_physical_node_from_server("mock_server")
+
+ mock_get_ctx.assert_called_once()
+ self.assertIsNone(result)
+
+ @mock.patch('yardstick.common.utils.read_yaml_file')
+ def test_read_pod_file(self, mock_read_yaml_file):
+ attrs = {'name': 'foo',
+ 'task_id': '12345678',
+ 'file': 'pod.yaml'
+ }
+
+ ctx_obj = DummyContextClass()
+ cfg = {"nodes": [
+ {
+ "name": "node1",
+ "role": "Controller",
+ "ip": "10.229.47.137",
+ "user": "root",
+ "key_filename": "/root/.yardstick_key"
+ },
+ {
+ "name": "node2",
+ "role": "Compute",
+ "ip": "10.229.47.139",
+ "user": "root",
+ "key_filename": "/root/.yardstick_key"
+ }
+ ]
+ }
+
+ mock_read_yaml_file.return_value = cfg
+ result = ctx_obj.read_pod_file(attrs)
+ self.assertEqual(result, cfg)
+
+ mock_read_yaml_file.side_effect = IOError(errno.EPERM, '')
+ with self.assertRaises(IOError):
+ ctx_obj.read_pod_file(attrs)
+
+ mock_read_yaml_file.side_effect = IOError(errno.ENOENT, '')
+ with self.assertRaises(IOError):
+ ctx_obj.read_pod_file(attrs)
+
+ file_path = os.path.join(YARDSTICK_ROOT_PATH, 'pod.yaml')
+ self.assertEqual(ctx_obj.file_path, file_path)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_dummy.py b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
index c4113be41..33832375f 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_dummy.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
@@ -76,3 +76,11 @@ class DummyContextTestCase(unittest.TestCase):
self.assertEqual(result, None)
self.test_context.undeploy()
+
+ def test__get_physical_nodes(self):
+ result = self.test_context._get_physical_nodes()
+ self.assertIsNone(result)
+
+ def test__get_physical_node_for_server(self):
+ result = self.test_context._get_physical_node_for_server("fake")
+ self.assertIsNone(result)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index 9c822b3a7..7605ef29a 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -12,30 +12,53 @@ import logging
import os
import mock
+import unittest
-from yardstick import ssh
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import heat
from yardstick.benchmark.contexts import model
from yardstick.common import constants as consts
from yardstick.common import exceptions as y_exc
-from yardstick.tests.unit import base as ut_base
+from yardstick.common import openstack_utils
+from yardstick import ssh
LOG = logging.getLogger(__name__)
-class HeatContextTestCase(ut_base.BaseUnitTestCase):
+class HeatContextTestCase(unittest.TestCase):
+
+ HEAT_POD_SAMPLE = {
+ "nodes": [
+ {
+ "name": "node1",
+ "role": "Controller",
+ "ip": "10.229.47.137",
+ "user": "root",
+ "key_filename": "/root/.yardstick_key"
+ },
+ {
+ "name": "node2",
+ "role": "Compute",
+ "ip": "10.229.47.139",
+ "user": "root",
+ "key_filename": "/root/.yardstick_key"
+ }
+ ]
+ }
+
+ def __init__(self, *args, **kwargs):
+
+ super(HeatContextTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.test_context = heat.HeatContext()
self.addCleanup(self._remove_contexts)
+ self.mock_context = mock.Mock(spec=heat.HeatContext())
- @staticmethod
- def _remove_contexts():
- for context in base.Context.list:
- context._delete_context()
- base.Context.list = []
+ def _remove_contexts(self):
+ if self.test_context in self.test_context.list:
+ self.test_context._delete_context()
def test___init__(self):
self.assertIsNone(self.test_context._name)
@@ -57,24 +80,29 @@ class HeatContextTestCase(ut_base.BaseUnitTestCase):
self.assertIsNone(self.test_context.heat_parameters)
self.assertIsNone(self.test_context.key_filename)
+ @mock.patch('yardstick.common.utils.read_yaml_file')
@mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup')
@mock.patch('yardstick.benchmark.contexts.heat.ServerGroup')
@mock.patch('yardstick.benchmark.contexts.heat.Network')
@mock.patch('yardstick.benchmark.contexts.heat.Server')
- def test_init(self, mock_server, mock_network, mock_sg, mock_pg):
+ def test_init(self, mock_server, mock_network, mock_sg, mock_pg, mock_read_yaml):
+ mock_read_yaml.return_value = self.HEAT_POD_SAMPLE
pgs = {'pgrp1': {'policy': 'availability'}}
sgs = {'servergroup1': {'policy': 'affinity'}}
networks = {'bar': {'cidr': '10.0.1.0/24'}}
servers = {'baz': {'floating_ip': True, 'placement': 'pgrp1'}}
attrs = {'name': 'foo',
+ 'file': 'pod.yaml',
'task_id': '1234567890',
'placement_groups': pgs,
'server_groups': sgs,
'networks': networks,
'servers': servers}
- self.test_context.init(attrs)
+ with mock.patch.object(openstack_utils, 'get_shade_client'), \
+ mock.patch.object(openstack_utils, 'get_shade_operator_client'):
+ self.test_context.init(attrs)
self.assertFalse(self.test_context._flags.no_setup)
self.assertFalse(self.test_context._flags.no_teardown)
@@ -128,13 +156,17 @@ class HeatContextTestCase(ut_base.BaseUnitTestCase):
'server_groups': {},
'networks': {},
'servers': {},
+ 'file': "pod.yaml",
'flags': {
'no_setup': True,
'no_teardown': True,
},
}
- self.test_context.init(attrs)
+ with mock.patch.object(openstack_utils, 'get_shade_client'), \
+ mock.patch.object(openstack_utils, 'get_shade_operator_client'):
+ self.test_context.init(attrs)
+
self.assertTrue(self.test_context._flags.no_setup)
self.assertTrue(self.test_context._flags.no_teardown)
@@ -654,7 +686,6 @@ class HeatContextTestCase(ut_base.BaseUnitTestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
- self.mock_context = mock.Mock(spec=heat.HeatContext())
self.mock_context._name = 'bar1'
self.test_context.stack = mock.Mock()
self.mock_context.stack.outputs = {
@@ -722,3 +753,56 @@ class HeatContextTestCase(ut_base.BaseUnitTestCase):
}
result = self.test_context._get_network(attr_name)
self.assertDictEqual(result, expected)
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ def test__get_physical_nodes(self):
+ self.test_context.nodes = {}
+ nodes = self.test_context._get_physical_nodes()
+ self.assertEquals(nodes, {})
+
+ @mock.patch('yardstick.common.utils.read_yaml_file')
+ def test__get_physical_node_for_server(self, mock_read_yaml):
+ attrs = {'name': 'foo',
+ 'task_id': '12345678',
+ 'file': "pod.yaml",
+ 'servers': {'vnf': {}},
+ 'networks': {'mgmt': {'cidr': '10.0.1.0/24'}}
+ }
+
+ with mock.patch.object(openstack_utils, 'get_shade_client'), \
+ mock.patch.object(openstack_utils, 'get_shade_operator_client'):
+ mock_read_yaml.return_value = self.HEAT_POD_SAMPLE
+ self.test_context.init(attrs)
+
+ with mock.patch('yardstick.common.openstack_utils.get_server') as mock_get_server:
+ mock_get_server.return_value = {'vnf': {}}
+
+ # When server is not from this context
+ result = self.test_context._get_physical_node_for_server('node1.foo-context')
+ self.assertIsNone(result)
+
+ # When node_name is not from this context
+ result = self.test_context._get_physical_node_for_server('fake.foo-12345678')
+ self.assertIsNone(result)
+
+ mock_munch = mock.Mock()
+ mock_munch.toDict = mock.Mock(return_value={
+ 'OS-EXT-SRV-ATTR:hypervisor_hostname': 'hypervisor_hostname'
+ })
+ mock_get_server.return_value = mock_munch
+
+ hypervisor = mock.Mock()
+ hypervisor.hypervisor_hostname = 'hypervisor_hostname'
+ hypervisor.host_ip = '10.229.47.137'
+
+ self.test_context.operator_client.list_hypervisors = mock.Mock(
+ return_value=[hypervisor])
+
+ mock_get_server.return_value = mock_munch
+
+ result = self.test_context._get_physical_node_for_server('vnf.foo-12345678')
+ self.assertEqual(result, 'node1.foo')
diff --git a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
index 0d698c5bd..821b84a1f 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -182,3 +182,11 @@ class KubernetesTestCase(unittest.TestCase):
mock_k8stemplate.assert_called_once_with(self.k8s_context.name,
CONTEXT_CFG)
self.assertEqual('fake_template', self.k8s_context.template)
+
+ def test__get_physical_nodes(self):
+ result = self.k8s_context._get_physical_nodes()
+ self.assertIsNone(result)
+
+ def test__get_physical_node_for_server(self):
+ result = self.k8s_context._get_physical_node_for_server("fake")
+ self.assertIsNone(result)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py
index b67be3758..5d7b24c3d 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_node.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_node.py
@@ -15,6 +15,7 @@ import mock
from yardstick.common import constants as consts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import node
+from yardstick.common import exceptions
class NodeContextTestCase(unittest.TestCase):
@@ -55,8 +56,9 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(self.test_context.env, {})
self.assertEqual(self.test_context.attrs, {})
+ @mock.patch('yardstick.common.utils.read_yaml_file')
@mock.patch('{}.os.path.join'.format(PREFIX))
- def test_init_negative(self, mock_path_join):
+ def test_init_negative(self, mock_path_join, read_mock):
special_path = '/foo/bar/error_file'
error_path = self._get_file_abspath("error_file")
@@ -68,7 +70,6 @@ class NodeContextTestCase(unittest.TestCase):
# we can't count mock_path_join calls because
# it can catch join calls for .pyc files.
mock_path_join.side_effect = path_join
- self.test_context.read_config_file = read_mock = mock.Mock()
read_calls = 0
with self.assertRaises(KeyError):
@@ -86,7 +87,7 @@ class NodeContextTestCase(unittest.TestCase):
self.test_context.init(attrs)
read_calls += 1
- self.assertEqual(read_mock.called, read_calls)
+ self.assertEqual(read_mock.call_count, read_calls)
self.assertIn(attrs['file'], self.test_context.file_path)
self.assertEqual(raised.exception.errno, errno.EBUSY)
self.assertEqual(str(raised.exception), str(read_mock.side_effect))
@@ -101,11 +102,6 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(raised.exception.errno, errno.ENOENT)
self.assertEqual(str(raised.exception), str(read_mock.side_effect))
- def test_read_config_file(self):
- self.test_context.init(self.attrs)
-
- self.assertIsNotNone(self.test_context.read_config_file())
-
def test__dispatch_script(self):
self.test_context.init(self.attrs)
@@ -171,6 +167,39 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ def test__get_physical_nodes(self):
+ self.test_context.init(self.attrs)
+ nodes = self.test_context._get_physical_nodes()
+ self.assertEquals(nodes, self.test_context.nodes)
+
+ def test__get_physical_node_for_server(self):
+ self.test_context.init(self.attrs)
+
+ # When server is not from this context
+ result = self.test_context._get_physical_node_for_server('node1.another-context')
+ self.assertIsNone(result)
+
+ # When node_name is not from this context
+ result = self.test_context._get_physical_node_for_server('fake.foo-12345678')
+ self.assertIsNone(result)
+
+ result = self.test_context._get_physical_node_for_server('node1.foo-12345678')
+ self.assertEqual(result, 'node1.foo')
+
+ def test_update_collectd_options_for_node(self):
+ self.test_context.init(self.attrs)
+ options = {'collectd': {'interval': 5}}
+
+ with self.assertRaises(exceptions.ContextUpdateCollectdForNodeError):
+ self.test_context.update_collectd_options_for_node(options, 'fake.foo-12345678')
+
+ self.test_context.update_collectd_options_for_node(options, 'node1.foo-12345678')
+
+ node_collectd_options = [node for node in self.test_context.nodes
+ if node['name'] == 'node1'][0]['collectd']
+
+ self.assertEquals(node_collectd_options, options)
+
@mock.patch('{}.NodeContext._dispatch_script'.format(PREFIX))
def test_deploy(self, dispatch_script_mock):
obj = node.NodeContext()
diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py
index a684ad750..524302f92 100644
--- a/yardstick/tests/unit/benchmark/core/test_report.py
+++ b/yardstick/tests/unit/benchmark/core/test_report.py
@@ -42,16 +42,16 @@ class ReportTestCase(unittest.TestCase):
self.param.task_id = [FAKE_TASK_ID]
self.rep = report.Report()
- @mock.patch('yardstick.benchmark.core.report.Report._get_tasks')
- @mock.patch('yardstick.benchmark.core.report.Report._get_fieldkeys')
- @mock.patch('yardstick.benchmark.core.report.Report._validate')
+ @mock.patch.object(report.Report, '_get_tasks')
+ @mock.patch.object(report.Report, '_get_fieldkeys')
+ @mock.patch.object(report.Report, '_validate')
def test_generate_success(self, mock_valid, mock_keys, mock_tasks):
mock_tasks.return_value = FAKE_DB_TASK
mock_keys.return_value = FAKE_DB_FIELDKEYS
self.rep.generate(self.param)
mock_valid.assert_called_once_with(FAKE_YAML_NAME, FAKE_TASK_ID)
- self.assertEqual(1, mock_tasks.call_count)
- self.assertEqual(1, mock_keys.call_count)
+ mock_tasks.assert_called_once_with()
+ mock_keys.assert_called_once_with()
# pylint: disable=deprecated-method
def test_invalid_yaml_name(self):
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 7468368df..0424c77a3 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -156,6 +156,31 @@ class TaskTestCase(unittest.TestCase):
t._run([scenario], False, "yardstick.out")
runner.run.assert_called_once()
+ @mock.patch.object(task, 'Context')
+ @mock.patch.object(task, 'base_runner')
+ def test_run_ProxDuration(self, mock_base_runner, *args):
+ scenario = {
+ 'host': 'athena.demo',
+ 'target': 'ares.demo',
+ 'runner': {
+ 'duration': 60,
+ 'interval': 1,
+ 'sampled': 'yes',
+ 'confirmation': 1,
+ 'type': 'ProxDuration'
+ },
+ 'type': 'Ping'
+ }
+
+ t = task.Task()
+ runner = mock.Mock()
+ runner.join.return_value = 0
+ runner.get_output.return_value = {}
+ runner.get_result.return_value = []
+ mock_base_runner.Runner.get.return_value = runner
+ t._run([scenario], False, "yardstick.out")
+ runner.run.assert_called_once()
+
@mock.patch.object(os, 'environ')
def test_check_precondition(self, mock_os_environ):
cfg = {
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
new file mode 100644
index 000000000..21e3874b8
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+
+from yardstick.benchmark.runners import duration
+
+
+class DurationRunnerTest(unittest.TestCase):
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {'interval': 0, "duration": 0},
+ 'type': 'some_type'
+ }
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = duration.DurationRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='Duration-some_type-101',
+ target=duration._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
diff --git a/yardstick/tests/unit/benchmark/runner/test_proxduration.py b/yardstick/tests/unit/benchmark/runner/test_proxduration.py
new file mode 100644
index 000000000..be1715aad
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_proxduration.py
@@ -0,0 +1,295 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+import time
+
+from yardstick.benchmark.runners import proxduration
+from yardstick.common import exceptions as y_exc
+
+
+class ProxDurationRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
+ self.count = 101
+ self.side_effect = side_effect
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
+ return self.count
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {'interval': 0, "duration": 0},
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = proxduration.ProxDurationRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='ProxDuration-some_type-101',
+ target=proxduration._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ def test__worker_process_called_with_cfg(self):
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ def test__worker_process_called_with_cfg_loop(self):
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.01}
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.my_method.call_count, 2)
+
+ def test__worker_process_called_without_cfg(self):
+ scenario_cfg = {'runner': {}}
+
+ aborted = multiprocessing.Event()
+ aborted.set()
+
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ scenario_cfg, {}, aborted, mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ def test__worker_process_output_queue(self):
+ self.benchmark.my_method = mock.Mock(return_value='my_result')
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ output_queue = multiprocessing.Queue()
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertEquals(output_queue.get(), 'my_result')
+
+ def test__worker_process_output_queue_multiple_iterations(self):
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.my_method.count, 103)
+
+ count = 101
+ while not output_queue.empty():
+ count += 1
+ self.assertEquals(output_queue.get(), count)
+
+ def test__worker_process_queue(self):
+ self.benchmark.my_method = self.MyMethod()
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_queue_multiple_iterations(self):
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.my_method.count, 103)
+
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
+ self.scenario_cfg['sla'] = {}
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_queue_on_sla_validation_error_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ def test__worker_process_queue_on_broad_exception(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertNotEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+
+ with self.assertRaises(SystemExit) as raised:
+ proxduration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_run_setup_and_teardown()
diff --git a/yardstick/tests/unit/benchmark/runner/test_search.py b/yardstick/tests/unit/benchmark/runner/test_search.py
index 4e5b4fe77..d5d1b8ded 100644
--- a/yardstick/tests/unit/benchmark/runner/test_search.py
+++ b/yardstick/tests/unit/benchmark/runner/test_search.py
@@ -19,36 +19,33 @@ import unittest
from yardstick.benchmark.runners.search import SearchRunner
from yardstick.benchmark.runners.search import SearchRunnerHelper
+from yardstick.common import exceptions as y_exc
class TestSearchRunnerHelper(unittest.TestCase):
def test___call__(self):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
- benchmark = cls()
- method = getattr(benchmark, 'my_method')
+ benchmark = mock.Mock()
+ method = getattr(benchmark(), 'my_method')
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ benchmark, 'my_method', scenario_cfg, {}, mock.Mock())
with helper.get_benchmark_instance():
helper()
- self.assertEqual(method.call_count, 1)
+ method.assert_called_once()
def test___call___error(self):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
with self.assertRaises(RuntimeError):
helper()
@@ -56,8 +53,6 @@ class TestSearchRunnerHelper(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_is_not_done(self, mock_time, *args):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
@@ -65,7 +60,7 @@ class TestSearchRunnerHelper(unittest.TestCase):
mock_time.side_effect = range(1000)
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
index = -1
for index in helper.is_not_done():
@@ -76,8 +71,6 @@ class TestSearchRunnerHelper(unittest.TestCase):
@mock.patch.object(time, 'sleep')
def test_is_not_done_immediate_stop(self, *args):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {
'run_step': '',
@@ -85,7 +78,7 @@ class TestSearchRunnerHelper(unittest.TestCase):
}
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
index = -1
for index in helper.is_not_done():
@@ -112,7 +105,7 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=update)
+ runner.worker_helper = mock.Mock(side_effect=update)
self.assertFalse(runner._worker_run_once('sequence 1'))
@@ -136,51 +129,49 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=update)
+ runner.worker_helper = mock.Mock(side_effect=update)
self.assertTrue(runner._worker_run_once('sequence 1'))
def test__worker_run_once_assertion_error_assert(self):
runner = SearchRunner({})
runner.sla_action = 'assert'
- runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+ runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError)
- with self.assertRaises(AssertionError):
+ with self.assertRaises(y_exc.SLAValidationError):
runner._worker_run_once('sequence 1')
def test__worker_run_once_assertion_error_monitor(self):
runner = SearchRunner({})
runner.sla_action = 'monitor'
- runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+ runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError)
self.assertFalse(runner._worker_run_once('sequence 1'))
def test__worker_run_once_non_assertion_error_none(self):
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=RuntimeError)
+ runner.worker_helper = mock.Mock(side_effect=RuntimeError)
self.assertTrue(runner._worker_run_once('sequence 1'))
def test__worker_run_once_non_assertion_error(self):
runner = SearchRunner({})
runner.sla_action = 'monitor'
- runner.worker_helper = mock.MagicMock(side_effect=RuntimeError)
+ runner.worker_helper = mock.Mock(side_effect=RuntimeError)
self.assertFalse(runner._worker_run_once('sequence 1'))
def test__worker_run(self):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {'interval': 0, 'timeout': 1},
}
runner = SearchRunner({})
- runner._worker_run_once = mock.MagicMock(side_effect=[0, 0, 1])
+ runner._worker_run_once = mock.Mock(side_effect=[0, 0, 1])
- runner._worker_run(cls, 'my_method', scenario_cfg, {})
+ runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {})
def test__worker_run_immediate_stop(self):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {
'run_step': '',
@@ -188,15 +179,14 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner._worker_run(cls, 'my_method', scenario_cfg, {})
+ runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {})
@mock.patch('yardstick.benchmark.runners.search.multiprocessing')
def test__run_benchmark(self, mock_multi_process):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
runner = SearchRunner({})
- runner._run_benchmark(cls, 'my_method', scenario_cfg, {})
- self.assertEqual(mock_multi_process.Process.call_count, 1)
+ runner._run_benchmark(mock.Mock(), 'my_method', scenario_cfg, {})
+ mock_multi_process.Process.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index ce972779d..8d042c406 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import time
+
import mock
import unittest
@@ -86,13 +88,19 @@ class BaseMonitorTestCase(unittest.TestCase):
'sla': {'max_outage_time': 5}
}
+ def _close_queue(self, instace):
+ time.sleep(0.1)
+ instace._queue.close()
+
def test__basemonitor_start_wait_successful(self):
ins = basemonitor.BaseMonitor(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.start_monitor()
ins.wait_monitor()
def test__basemonitor_all_successful(self):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
ins.run()
ins.verify_SLA()
@@ -100,16 +108,12 @@ class BaseMonitorTestCase(unittest.TestCase):
@mock.patch.object(basemonitor, 'multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
mock_multiprocess.Event().is_set.return_value = False
ins.run()
ins.verify_SLA()
- # TODO(elfoley): fix this test to not throw an error
def test__basemonitor_getmonitorcls_successfule(self):
- cls = None
- try:
- cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
- except Exception: # pylint: disable=broad-except
- pass
- self.assertIsNone(cls)
+ with self.assertRaises(RuntimeError):
+ basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index d1172d5a6..cd065c961 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -11,6 +11,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import scenario_general
+from yardstick.common import exceptions as y_exc
class ScenarioGeneralTestCase(unittest.TestCase):
@@ -59,6 +60,14 @@ class ScenarioGeneralTestCase(unittest.TestCase):
self.instance.director.verify.return_value = False
self.instance.director.data = {}
ret = {}
- self.assertRaises(AssertionError, self.instance.run, ret)
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
+ self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 0)
+
+ def test_scenario_general_case_service_not_found_fail(self):
+ self.instance.director.verify.return_value = True
+ self.instance.director.data = {"general-attacker": 0}
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
self.instance.teardown()
self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index dd656fbd5..ec0e5973c 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -11,6 +11,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import serviceha
+from yardstick.common import exceptions as y_exc
class ServicehaTestCase(unittest.TestCase):
@@ -42,6 +43,13 @@ class ServicehaTestCase(unittest.TestCase):
}
sla = {"outage_time": 5}
self.args = {"options": options, "sla": sla}
+ self.test__serviceha = serviceha.ServiceHA(self.args, self.ctx)
+
+ def test___init__(self):
+
+ self.assertEqual(self.test__serviceha.data, {})
+ self.assertFalse(self.test__serviceha.setup_done)
+ self.assertFalse(self.test__serviceha.sla_pass)
# NOTE(elfoley): This should be split into test_setup and test_run
# NOTE(elfoley): This should explicitly test outcomes and states
@@ -71,5 +79,36 @@ class ServicehaTestCase(unittest.TestCase):
mock_monitor.MonitorMgr().verify_SLA.return_value = False
ret = {}
- self.assertRaises(AssertionError, p.run, ret)
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+ self.assertEqual(ret['sla_pass'], 0)
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+
+ p.setup()
+ self.assertTrue(p.setup_done)
+ p.data["kill-process"] = 0
+
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
self.assertEqual(ret['sla_pass'], 0)
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_no_teardown_when_sla_pass(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ p.setup()
+ self.assertTrue(p.setup_done)
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+ ret = {}
+ p.run(ret)
+ attacker = mock.Mock()
+ p.attackers = [attacker]
+ p.teardown()
+ attacker.recover.assert_not_called()
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index f24ec24ec..4fadde4dc 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import cyclictest
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
@@ -122,7 +123,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
@@ -136,7 +137,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
@@ -150,7 +151,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 9640ce000..c4ac347f4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -144,7 +145,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 37.5, "size": 0.00049}]'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
@@ -162,7 +163,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_successful_latency_for_cache_run_sla(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
index 03003d01f..02040ca01 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import qemu_migrate
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
@@ -116,7 +117,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
@@ -129,7 +130,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
@@ -142,7 +143,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index dcc0e810d..9e055befe 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -18,6 +18,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import ramspeed
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
@@ -146,7 +147,7 @@ class RamspeedTestCase(unittest.TestCase):
"Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
"INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
options = {
@@ -219,7 +220,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
"Bandwidth(MBps)": 2401.58}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
options = {
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
index 6339a2dcd..e4a8d6e26 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import unixbench
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
@@ -122,7 +123,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
@@ -137,7 +138,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 74144afd5..2190e9337 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -19,6 +19,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.networking import iperf3
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh')
@@ -118,7 +119,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_sla_jitter(self, mock_ssh):
options = {"protocol": "udp", "bandwidth": "20m"}
@@ -152,7 +153,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_tcp_protocal(self, mock_ssh):
options = {"protocol": "tcp", "nodelay": "yes"}
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
index 5907562c2..a7abcd98a 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
@@ -98,7 +99,7 @@ class NetperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
index 956a9c078..a577dba59 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
@@ -19,6 +19,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf_node
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
@@ -98,7 +99,7 @@ class NetperfNodeTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
index 4adfab120..559e0599e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -74,7 +75,7 @@ class PingTestCase(unittest.TestCase):
p = ping.Ping(args, self.ctx)
mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
index 4662c8537..ad5217a14 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping6
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -98,7 +99,7 @@ class PingTestCase(unittest.TestCase):
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 6aea03aee..4016f5055 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -13,6 +13,7 @@ import unittest
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
@@ -55,11 +56,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
p._iptables_setup()
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"sudo iptables -F; "
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
% 1010, timeout=60)
@@ -74,8 +73,8 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
- mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p._iptables_setup)
+ mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
+ self.assertRaises(y_exc.SSHError, p._iptables_setup)
def test_pktgen_successful_iptables_get_result(self, mock_ssh):
@@ -88,12 +87,14 @@ class PktgenTestCase(unittest.TestCase):
p.number_of_ports = args['options']['number_of_ports']
mock_ssh.SSH.from_node().execute.return_value = (0, '150000', '')
- p._iptables_get_result()
+ result = p._iptables_get_result()
+ expected_result = 150000
+ self.assertEqual(result, expected_result)
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo iptables -L INPUT -vnx |"
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
- % 1010)
+ % 1010, raise_on_error=True)
def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
@@ -106,8 +107,8 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.number_of_ports = args['options']['number_of_ports']
- mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p._iptables_get_result)
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ self.assertRaises(y_exc.SSHError, p._iptables_get_result)
def test_pktgen_successful_no_sla(self, mock_ssh):
@@ -176,7 +177,7 @@ class PktgenTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_unsuccessful_script_error(self, mock_ssh):
@@ -191,8 +192,8 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, result)
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ self.assertRaises(y_exc.SSHError, p.run, result)
def test_pktgen_get_vnic_driver_name(self, mock_ssh):
args = {
@@ -213,9 +214,9 @@ class PktgenTestCase(unittest.TestCase):
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._get_vnic_driver_name)
+ self.assertRaises(y_exc.SSHError, p._get_vnic_driver_name)
def test_pktgen_get_sriov_queue_number(self, mock_ssh):
args = {
@@ -236,9 +237,9 @@ class PktgenTestCase(unittest.TestCase):
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._get_sriov_queue_number)
+ self.assertRaises(y_exc.SSHError, p._get_sriov_queue_number)
def test_pktgen_get_available_queue_number(self, mock_ssh):
args = {
@@ -249,11 +250,11 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
- p._get_available_queue_number()
+ self.assertEqual(p._get_available_queue_number(), 4)
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | head -1 |"
- "awk '{printf $2}'")
+ "awk '{printf $2}'", raise_on_error=True)
def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
args = {
@@ -262,9 +263,9 @@ class PktgenTestCase(unittest.TestCase):
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._get_available_queue_number)
+ self.assertRaises(y_exc.SSHError, p._get_available_queue_number)
def test_pktgen_get_usable_queue_number(self, mock_ssh):
args = {
@@ -275,11 +276,11 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- p._get_usable_queue_number()
+ self.assertEqual(p._get_usable_queue_number(), 1)
mock_ssh.SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | tail -1 |"
- "awk '{printf $2}'")
+ "awk '{printf $2}'", raise_on_error=True)
def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
args = {
@@ -288,9 +289,9 @@ class PktgenTestCase(unittest.TestCase):
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._get_usable_queue_number)
+ self.assertRaises(y_exc.SSHError, p._get_usable_queue_number)
def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
args = {
@@ -332,12 +333,12 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
p._get_usable_queue_number = mock.Mock(return_value=1)
p._get_available_queue_number = mock.Mock(return_value=4)
- self.assertRaises(RuntimeError, p._enable_ovs_multiqueue)
+ self.assertRaises(y_exc.SSHError, p._enable_ovs_multiqueue)
def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
args = {
@@ -351,7 +352,7 @@ class PktgenTestCase(unittest.TestCase):
p._setup_irqmapping_ovs(4)
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"echo 8 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
@@ -366,7 +367,7 @@ class PktgenTestCase(unittest.TestCase):
p._setup_irqmapping_ovs(1)
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
@@ -377,9 +378,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 4)
+ self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 4)
def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
args = {
@@ -389,9 +390,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 1)
+ self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 1)
def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
args = {
@@ -405,7 +406,7 @@ class PktgenTestCase(unittest.TestCase):
p._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"echo 2 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
@@ -420,7 +421,7 @@ class PktgenTestCase(unittest.TestCase):
p._setup_irqmapping_sriov(1)
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
@@ -431,9 +432,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 2)
+ self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 2)
def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
args = {
@@ -443,9 +444,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 1)
+ self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 1)
def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
args = {
@@ -456,10 +457,11 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- p._is_irqbalance_disabled()
+ result = p._is_irqbalance_disabled()
+ self.assertFalse(result)
mock_ssh.SSH.from_node().execute.assert_called_with(
- "grep ENABLED /etc/default/irqbalance")
+ "grep ENABLED /etc/default/irqbalance", raise_on_error=True)
def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
args = {
@@ -468,9 +470,9 @@ class PktgenTestCase(unittest.TestCase):
p = pktgen.Pktgen(args, self.ctx)
p.server = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._is_irqbalance_disabled)
+ self.assertRaises(y_exc.SSHError, p._is_irqbalance_disabled)
def test_pktgen_disable_irqbalance(self, mock_ssh):
args = {
@@ -480,11 +482,11 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ mock_ssh.SSH.from_node().run.return_value = (0, '', '')
p._disable_irqbalance()
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ mock_ssh.SSH.from_node().run.assert_called_with(
"sudo service irqbalance disable")
def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
@@ -495,9 +497,9 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
- self.assertRaises(RuntimeError, p._disable_irqbalance)
+ self.assertRaises(y_exc.SSHError, p._disable_irqbalance)
def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
args = {
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index 976087148..bcd417830 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -12,6 +12,7 @@ import unittest
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+from yardstick.common import exceptions as y_exc
class PktgenDPDKLatencyTestCase(unittest.TestCase):
@@ -162,9 +163,9 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
- def test_pktgen_dpdk_unsuccessful_script_error(self):
+ def test_pktgen_dpdk_run_unsuccessful_get_port_mac(self):
args = {
'options': {'packetsize': 60},
@@ -176,3 +177,19 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
+
+ def test_pktgen_dpdk_run_unsuccessful_script_error(self):
+ args = {
+ 'options': {'packetsize': 60}
+ }
+
+ p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+
+ self.mock_ssh.SSH.from_node().execute.side_effect = ((0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ (0, '', ''),
+ y_exc.SSHError)
+ self.assertRaises(y_exc.SSHError, p.run, {})
+ self.assertEqual(self.mock_ssh.SSH.from_node().execute.call_count, 6)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
index e90fb07c7..39392e4bb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
@@ -16,6 +16,7 @@ from oslo_serialization import jsonutils
import mock
from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -131,7 +132,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_dpdk_throughput_unsuccessful_script_error(
self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 2885dc6fb..bb1a7aaca 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -628,7 +628,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
'extra_args': {'arg1': 'value1', 'arg2': 'value2'},
'flow': {'flow': {}},
'imix': {'imix': {'64B': 100}},
- 'uplink': {}}
+ 'uplink': {},
+ 'duration': 30}
)
mock_tprofile_get.assert_called_once_with(fake_vnfd)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index 419605b26..a606543e5 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -12,31 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
-
-from __future__ import absolute_import
-try:
- from unittest import mock
-except ImportError:
- import mock
+import mock
import unittest
+import subprocess
+import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.networking import vsperf
+from yardstick import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
class VsperfTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
"host": {
"ip": "10.229.47.137",
"user": "ubuntu",
"password": "ubuntu",
},
}
- self.args = {
+ self.scenario_cfg = {
'options': {
'testname': 'p2p_rfc2544_continuous',
'traffic_type': 'continuous',
@@ -57,70 +52,154 @@ class VsperfTestCase(unittest.TestCase):
}
}
- def test_vsperf_setup(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+
+ self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
+ self.mock_subprocess_call = self._mock_subprocess_call.start()
+ self.mock_subprocess_call.return_value = None
+
+ self.addCleanup(self._stop_mock)
+
+ self.scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_subprocess_call.stop()
+
+ def test_setup(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ def test_setup_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh setup', shell=True)
+ self.assertIsNone(scenario.tg_port1)
+ self.assertIsNone(scenario.tg_port2)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_setup_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertIsNone(scenario.setup_script)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_run_ok(self):
+ self.scenario.setup()
+
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- def test_vsperf_teardown(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_ok_setup_not_done(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- p.teardown()
- self.assertFalse(p.setup_done)
+ self.assertTrue(self.scenario.setup_done)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_vsperf_run_ok(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ def test_run_failed_vsperf_execution(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (1, '', ''))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+ def test_run_failed_csv_report(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (0, '', ''),
+ (1, '', ''))
- result = {}
- p.run(result)
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
- self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+ def test_run_sla_fail(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
- def test_vsperf_run_falied_vsperf_execution(self, mock_ssh,
- mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self.assertTrue('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)'
+ in str(raised.exception))
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ def test_run_sla_fail_metric_not_collected(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_sla_fail_metric_not_defined_in_sla(self):
+ del self.scenario_cfg['sla']['throughput_rx_fps']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ scenario.run({})
+ self.assertTrue('throughput_rx_fps is not defined in SLA'
+ in str(raised.exception))
+
+ def test_teardown(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.teardown()
+ self.assertFalse(self.scenario.setup_done)
+
+ def test_teardown_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh teardown', shell=True)
+ self.assertFalse(scenario.setup_done)
+
+ def test_teardown_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertFalse(scenario.setup_done)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index 1d2278e21..c05d2ced2 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -19,6 +19,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+from yardstick import exceptions as y_exc
class VsperfDPDKTestCase(unittest.TestCase):
@@ -211,3 +212,47 @@ class VsperfDPDKTestCase(unittest.TestCase):
result = {}
self.assertRaises(RuntimeError, self.scenario.run, result)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_sla_not_defined(self, *args):
+ del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps is not defined in SLA',
+ str(raised.exception))
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
index f149cee69..6e69ddc6d 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import fio
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.storage.fio.ssh')
@@ -203,7 +204,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_successful_bw_iops_sla(self, mock_ssh):
@@ -252,7 +253,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py
index 9361a97f2..d02a34d24 100644
--- a/yardstick/tests/unit/common/test_openstack_utils.py
+++ b/yardstick/tests/unit/common/test_openstack_utils.py
@@ -59,6 +59,12 @@ class GetShadeClientTestCase(unittest.TestCase):
mock_openstack_cloud.assert_called_once_with(
**constants.OS_CLOUD_DEFAULT_CONFIG)
+ @mock.patch.object(shade, 'operator_cloud', return_value='os_client')
+ def test_get_shade_operator_client(self, mock_operator_cloud):
+ self.assertEqual('os_client', openstack_utils.get_shade_operator_client())
+ mock_operator_cloud.assert_called_once_with(
+ **constants.OS_CLOUD_DEFAULT_CONFIG)
+
class DeleteNeutronNetTestCase(unittest.TestCase):
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 5fd91c87f..6247afd18 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -24,9 +24,10 @@ from yardstick import ssh
from yardstick.common import constants
from yardstick.common import utils
from yardstick.common import exceptions
+from yardstick.tests.unit import base as ut_base
-class IterSubclassesTestCase(unittest.TestCase):
+class IterSubclassesTestCase(ut_base.BaseUnitTestCase):
# Disclaimer: this class is a modified copy from
# rally/tests/unit/common/plugin/test_discover.py
# Copyright 2015: Mirantis Inc.
@@ -47,7 +48,7 @@ class IterSubclassesTestCase(unittest.TestCase):
self.assertEqual([B, C, D], list(utils.itersubclasses(A)))
-class ImportModulesFromPackageTestCase(unittest.TestCase):
+class ImportModulesFromPackageTestCase(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.walk')
def test_import_modules_from_package_no_mod(self, mock_walk):
@@ -72,7 +73,7 @@ class ImportModulesFromPackageTestCase(unittest.TestCase):
mock_import_module.assert_called_once_with('bar.baz')
-class GetParaFromYaml(unittest.TestCase):
+class GetParaFromYaml(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.environ.get')
def test_get_param_para_not_found(self, get_env):
@@ -96,7 +97,7 @@ class GetParaFromYaml(unittest.TestCase):
return file_path
-class CommonUtilTestCase(unittest.TestCase):
+class CommonUtilTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self.data = {
@@ -186,14 +187,14 @@ class CommonUtilTestCase(unittest.TestCase):
self.assertEqual(mock_open.call_count, mock_open_call_count)
-class TestMacAddressToHex(unittest.TestCase):
+class TestMacAddressToHex(ut_base.BaseUnitTestCase):
def test_mac_address_to_hex_list(self):
self.assertEqual(utils.mac_address_to_hex_list("ea:3e:e1:9a:99:e8"),
['0xea', '0x3e', '0xe1', '0x9a', '0x99', '0xe8'])
-class TranslateToStrTestCase(unittest.TestCase):
+class TranslateToStrTestCase(ut_base.BaseUnitTestCase):
def test_translate_to_str_unicode(self):
input_str = u'hello'
@@ -219,7 +220,7 @@ class TranslateToStrTestCase(unittest.TestCase):
self.assertIs(input_value, result)
-class TestParseCpuInfo(unittest.TestCase):
+class TestParseCpuInfo(ut_base.BaseUnitTestCase):
def test_single_socket_no_hyperthread(self):
cpuinfo = """\
@@ -804,7 +805,7 @@ power management:
self.assertEqual(sockets, [0, 1])
-class ChangeObjToDictTestCase(unittest.TestCase):
+class ChangeObjToDictTestCase(ut_base.BaseUnitTestCase):
def test_change_obj_to_dict(self):
class A(object):
@@ -817,7 +818,7 @@ class ChangeObjToDictTestCase(unittest.TestCase):
self.assertEqual(obj_r, obj_s)
-class SetDictValueTestCase(unittest.TestCase):
+class SetDictValueTestCase(ut_base.BaseUnitTestCase):
def test_set_dict_value(self):
input_dic = {
@@ -827,7 +828,7 @@ class SetDictValueTestCase(unittest.TestCase):
self.assertEqual(output_dic.get('welcome', {}).get('to'), 'yardstick')
-class RemoveFileTestCase(unittest.TestCase):
+class RemoveFileTestCase(ut_base.BaseUnitTestCase):
def test_remove_file(self):
try:
@@ -837,7 +838,83 @@ class RemoveFileTestCase(unittest.TestCase):
self.assertTrue(isinstance(e, OSError))
-class TestUtils(unittest.TestCase):
+class ParseIniFileTestCase(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._mock_config_parser_type = mock.patch.object(configparser,
+ 'ConfigParser')
+ self.mock_config_parser_type = self._mock_config_parser_type.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_config_parser_type.stop()
+
+ def test_parse_ini_file(self):
+ defaults = {'default1': 'value1',
+ 'default2': 'value2'}
+ s1 = {'key1': 'value11',
+ 'key2': 'value22'}
+ s2 = {'key1': 'value123',
+ 'key2': 'value234'}
+
+ mock_config_parser = mock.Mock()
+ self.mock_config_parser_type.return_value = mock_config_parser
+ mock_config_parser.read.return_value = True
+ mock_config_parser.sections.return_value = ['s1', 's2']
+ mock_config_parser.items.side_effect = iter([
+ defaults.items(),
+ s1.items(),
+ s2.items(),
+ ])
+
+ expected = {'DEFAULT': defaults,
+ 's1': s1,
+ 's2': s2}
+ result = utils.parse_ini_file('my_path')
+ self.assertDictEqual(expected, result)
+
+ @mock.patch.object(utils, 'logger')
+ def test_parse_ini_file_missing_section_header(self, *args):
+ mock_config_parser = mock.Mock()
+ self.mock_config_parser_type.return_value = mock_config_parser
+ mock_config_parser.read.side_effect = (
+ configparser.MissingSectionHeaderError(
+ mock.Mock(), 321, mock.Mock()))
+
+ with self.assertRaises(configparser.MissingSectionHeaderError):
+ utils.parse_ini_file('my_path')
+
+ def test_parse_ini_file_no_file(self):
+ mock_config_parser = mock.Mock()
+ self.mock_config_parser_type.return_value = mock_config_parser
+ mock_config_parser.read.return_value = False
+ with self.assertRaises(RuntimeError):
+ utils.parse_ini_file('my_path')
+
+ def test_parse_ini_file_no_default_section_header(self):
+ s1 = {'key1': 'value11',
+ 'key2': 'value22'}
+ s2 = {'key1': 'value123',
+ 'key2': 'value234'}
+
+ mock_config_parser = mock.Mock()
+ self.mock_config_parser_type.return_value = mock_config_parser
+ mock_config_parser.read.return_value = True
+ mock_config_parser.sections.return_value = ['s1', 's2']
+ mock_config_parser.items.side_effect = iter([
+ configparser.NoSectionError(mock.Mock()),
+ s1.items(),
+ s2.items(),
+ ])
+
+ expected = {'DEFAULT': {},
+ 's1': s1,
+ 's2': s2}
+ result = utils.parse_ini_file('my_path')
+ self.assertDictEqual(expected, result)
+
+
+class TestUtils(ut_base.BaseUnitTestCase):
@mock.patch('yardstick.common.utils.os.makedirs')
def test_makedirs(self, *_):
@@ -992,7 +1069,7 @@ class TestUtils(unittest.TestCase):
utils.validate_non_string_sequence(1, raise_exc=RuntimeError)
-class TestUtilsIpAddrMethods(unittest.TestCase):
+class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
GOOD_IP_V4_ADDRESS_STR_LIST = [
u'0.0.0.0',
@@ -1119,7 +1196,7 @@ class TestUtilsIpAddrMethods(unittest.TestCase):
self.assertEqual(utils.ip_to_hex(value), value)
-class SafeDecodeUtf8TestCase(unittest.TestCase):
+class SafeDecodeUtf8TestCase(ut_base.BaseUnitTestCase):
@unittest.skipIf(six.PY2,
'This test should only be launched with Python 3.x')
@@ -1130,7 +1207,7 @@ class SafeDecodeUtf8TestCase(unittest.TestCase):
self.assertEqual('this is a byte array', out)
-class ReadMeminfoTestCase(unittest.TestCase):
+class ReadMeminfoTestCase(ut_base.BaseUnitTestCase):
MEMINFO = (b'MemTotal: 65860500 kB\n'
b'MemFree: 28690900 kB\n'
@@ -1156,7 +1233,7 @@ class ReadMeminfoTestCase(unittest.TestCase):
self.assertEqual(self.MEMINFO_DICT, output)
-class TimerTestCase(unittest.TestCase):
+class TimerTestCase(ut_base.BaseUnitTestCase):
def test__getattr(self):
with utils.Timer() as timer:
@@ -1174,8 +1251,19 @@ class TimerTestCase(unittest.TestCase):
with utils.Timer(timeout=1):
time.sleep(2)
+ def test__enter_with_timeout_no_exception(self):
+ with utils.Timer(timeout=1, raise_exception=False):
+ time.sleep(2)
+
+ def test__iter(self):
+ iterations = []
+ for i in utils.Timer(timeout=2):
+ iterations.append(i)
+ time.sleep(1.1)
+ self.assertEqual(2, len(iterations))
+
-class WaitUntilTrueTestCase(unittest.TestCase):
+class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
def test_no_timeout(self):
self.assertIsNone(utils.wait_until_true(lambda: True,
diff --git a/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
index 9d94e3d0b..e19311613 100644
--- a/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
+++ b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
@@ -289,7 +289,7 @@ class TestDpdkNode(unittest.TestCase):
dpdk_helper.force_dpdk_rebind = mock_helper_func = mock.Mock()
dpdk_node._force_rebind()
- self.assertEqual(mock_helper_func.call_count, 1)
+ mock_helper_func.assert_called_once()
class TestDpdkBindHelper(unittest.TestCase):
diff --git a/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
index 6d5e1da60..e66e7fbb8 100644
--- a/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
+++ b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
@@ -223,7 +223,7 @@ class TestMultiPortConfig(unittest.TestCase):
mock.Mock(return_value={'link_config': 0, 'arp_config': '',
'arp_config6': '', 'actions': '',
'arp_route_tbl': '', 'arp_route_tbl6': '',
- 'rules': ''})
+ 'flows': ''})
opnfv_vnf.port_pair_list = [("xe0", "xe1")]
self.assertIsNotNone(opnfv_vnf.generate_script(self.VNFD))
opnfv_vnf.lb_config = 'HW'
@@ -249,66 +249,6 @@ class TestMultiPortConfig(unittest.TestCase):
opnfv_vnf.generate_rule_config = mock.Mock()
self.assertIsNotNone(opnfv_vnf.generate_script_data())
- def test_generate_rule_config(self):
- topology_file = mock.Mock()
- config_tpl = mock.Mock()
- tmp_file = mock.Mock()
- vnfd_mock = mock.MagicMock()
- opnfv_vnf = samplevnf_helper.MultiPortConfig(
- topology_file, config_tpl, tmp_file, vnfd_mock)
- opnfv_vnf.get_config_tpl_data = mock.MagicMock()
- opnfv_vnf.socket = 0
- opnfv_vnf.start_core = 0
- opnfv_vnf.update_write_parser = mock.MagicMock()
- opnfv_vnf.generate_script_data = \
- mock.Mock(return_value={'link_config': 0, 'arp_config': '',
- 'arp_config6': '', 'actions': '',
- 'rules': ''})
- opnfv_vnf.port_pair_list = [("xe0", "xe1")]
- opnfv_vnf.get_port_pairs = mock.Mock()
- opnfv_vnf.vnf_type = 'ACL'
- opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
- opnfv_vnf.get_netmask_gateway = mock.Mock(
- return_value=u'255.255.255.0')
- opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
- opnfv_vnf.get_netmask_gateway6 = mock.Mock(
- return_value=u'255.255.255.0')
- opnfv_vnf.txrx_pipeline = ''
- opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
- opnfv_vnf.rules = ''
- self.assertIsNotNone(opnfv_vnf.generate_rule_config())
- opnfv_vnf.rules = 'new'
- self.assertIsNotNone(opnfv_vnf.generate_rule_config())
-
- def test_generate_action_config(self):
- topology_file = mock.Mock()
- config_tpl = mock.Mock()
- tmp_file = mock.Mock()
- vnfd_mock = mock.MagicMock()
- opnfv_vnf = samplevnf_helper.MultiPortConfig(
- topology_file, config_tpl, tmp_file, vnfd_mock)
- opnfv_vnf.get_config_tpl_data = mock.MagicMock()
- opnfv_vnf.socket = 0
- opnfv_vnf.start_core = 0
- opnfv_vnf.update_write_parser = mock.MagicMock()
- opnfv_vnf.generate_script_data = \
- mock.Mock(return_value={'link_config': 0, 'arp_config': '',
- 'arp_config6': '', 'actions': '',
- 'rules': ''})
- opnfv_vnf.port_pair_list = [("xe0", "xe1")]
- opnfv_vnf.get_port_pairs = mock.Mock()
- opnfv_vnf.vnf_type = 'VFW'
- opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
- opnfv_vnf.get_netmask_gateway = mock.Mock(
- return_value=u'255.255.255.0')
- opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
- opnfv_vnf.get_netmask_gateway6 = mock.Mock(
- return_value=u'255.255.255.0')
- opnfv_vnf.txrx_pipeline = ''
- opnfv_vnf.rules = ''
- self.assertIsNotNone(opnfv_vnf.generate_action_config())
-
def test_generate_arp_config6(self):
topology_file = mock.Mock()
config_tpl = mock.Mock()
diff --git a/yardstick/tests/unit/network_services/test_yang_model.py b/yardstick/tests/unit/network_services/test_yang_model.py
deleted file mode 100644
index a7eb36b8a..000000000
--- a/yardstick/tests/unit/network_services/test_yang_model.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import mock
-import unittest
-
-from yardstick.network_services.yang_model import YangModel
-
-
-class YangModelTestCase(unittest.TestCase):
- """Test all Yang Model methods."""
-
- ENTRIES = {
- 'access-list1': {
- 'acl': {
- 'access-list-entries': [{
- 'ace': {
- 'ace-oper-data': {
- 'match-counter': 0},
- 'actions': 'drop,count',
- 'matches': {
- 'destination-ipv4-network':
- '152.16.40.20/24',
- 'destination-port-range': {
- 'lower-port': 0,
- 'upper-port': 65535},
- 'source-ipv4-network': '0.0.0.0/0',
- 'source-port-range': {
- 'lower-port': 0,
- 'upper-port': 65535}},
- 'rule-name': 'rule1588'}},
- {
- 'ace': {
- 'ace-oper-data': {
- 'match-counter': 0},
- 'actions': 'drop,count',
- 'matches': {
- 'destination-ipv4-network':
- '0.0.0.0/0',
- 'destination-port-range': {
- 'lower-port': 0,
- 'upper-port': 65535},
- 'source-ipv4-network':
- '152.16.100.20/24',
- 'source-port-range': {
- 'lower-port': 0,
- 'upper-port': 65535}},
- 'rule-name': 'rule1589'}}],
- 'acl-name': 'sample-ipv4-acl',
- 'acl-type': 'ipv4-acl'}
- }
- }
-
- def test__init__(self):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- self.assertEqual(y.config_file, cfg)
-
- def test_config_file_setter(self):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- self.assertEqual(y.config_file, cfg)
- cfg2 = "yang2.yaml"
- y.config_file = cfg2
- self.assertEqual(y.config_file, cfg2)
-
- def test__get_entries(self):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- y._options = self.ENTRIES
- y._get_entries()
- self.assertIn("p acl add", y._rules)
-
- def test__get_entries_no_options(self):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- y._get_entries()
- self.assertEqual(y._rules, '')
-
- @mock.patch('yardstick.network_services.yang_model.open')
- @mock.patch('yardstick.network_services.yang_model.yaml_load')
- def test__read_config(self, mock_safe_load, *args):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'}
- y._read_config()
- self.assertDictEqual(y._options, expected)
-
- @mock.patch('yardstick.network_services.yang_model.open')
- def test__read_config_open_error(self, mock_open):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- mock_open.side_effect = IOError('my error')
-
- self.assertEqual(y._options, {})
- with self.assertRaises(IOError) as raised:
- y._read_config()
-
- self.assertIn('my error', str(raised.exception))
- self.assertEqual(y._options, {})
-
- def test_get_rules(self):
- cfg = "yang.yaml"
- y = YangModel(cfg)
- y._read_config = read_mock = mock.Mock()
- y._get_entries = get_mock = mock.Mock()
-
- y._rules = None
- self.assertIsNone(y.get_rules())
- self.assertEqual(read_mock.call_count, 1)
- self.assertEqual(get_mock.call_count, 1)
-
- # True value should prevent calling read and get
- y._rules = 999
- self.assertEqual(y.get_rules(), 999)
- self.assertEqual(read_mock.call_count, 1)
- self.assertEqual(get_mock.call_count, 1)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 641064cbe..55276af58 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -69,5 +69,20 @@ class TestTrafficProfile(unittest.TestCase):
class TestDummyProfile(unittest.TestCase):
def test_execute(self):
- dummy_profile = base.DummyProfile(base.TrafficProfile)
+ tp_config = {'traffic_profile': {'duration': 15}}
+ dummy_profile = base.DummyProfile(tp_config)
self.assertIsNone(dummy_profile.execute({}))
+
+
+class TrafficProfileConfigTestCase(unittest.TestCase):
+
+ def test__init(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ self.assertEqual({'64B': 100}, tp_config_obj.packet_sizes)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_SCHEMA,
+ tp_config_obj.schema)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_FRAME_RATE,
+ tp_config_obj.frame_rate)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
+ tp_config_obj.duration)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
index 39905e6b1..2f6713760 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
@@ -102,8 +102,7 @@ class TestFixedProfile(unittest.TestCase):
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
def test___init__(self):
- fixed_profile = \
- FixedProfile(TrafficProfile)
+ fixed_profile = FixedProfile(self.TRAFFIC_PROFILE)
self.assertIsNotNone(fixed_profile)
def test_execute(self):
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_http.py b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
index 0d1b916a7..d44fab2b5 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_http.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
@@ -11,30 +11,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import unittest
-from yardstick.network_services.traffic_profile.base import TrafficProfile
-from yardstick.network_services.traffic_profile.http import \
- TrafficProfileGenericHTTP
+from yardstick.network_services.traffic_profile import http
class TestTrafficProfileGenericHTTP(unittest.TestCase):
+
+ TP_CONFIG = {'traffic_profile': {'duration': 10}}
+
def test___init__(self):
- traffic_profile_generic_htt_p = \
- TrafficProfileGenericHTTP(TrafficProfile)
- self.assertIsNotNone(traffic_profile_generic_htt_p)
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
+ self.assertIsNotNone(tp_generic_http)
def test_execute(self):
- traffic_profile_generic_htt_p = \
- TrafficProfileGenericHTTP(TrafficProfile)
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
traffic_generator = {}
- self.assertIsNone(
- traffic_profile_generic_htt_p.execute(traffic_generator))
+ self.assertIsNone(tp_generic_http.execute(traffic_generator))
def test__send_http_request(self):
- traffic_profile_generic_htt_p = \
- TrafficProfileGenericHTTP(TrafficProfile)
- self.assertIsNone(traffic_profile_generic_htt_p._send_http_request(
- "10.1.1.1", "250", "/req"))
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
+ self.assertIsNone(tp_generic_http._send_http_request(
+ '10.1.1.1', '250', '/req'))
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index 7bfd67fe0..036746e6b 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -51,6 +51,11 @@ class TestProxBinSearchProfile(unittest.TestCase):
fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
traffic_generator = mock.MagicMock()
+ attrs1 = {'get.return_value' : 10}
+ traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
+
+ attrs2 = {'__getitem__.return_value' : 10, 'get.return_value': 10}
+ traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
profile_helper = mock.MagicMock()
profile_helper.run_test = target
@@ -60,13 +65,14 @@ class TestProxBinSearchProfile(unittest.TestCase):
profile._profile_helper = profile_helper
profile.execute_traffic(traffic_generator)
+
self.assertEqual(round(profile.current_lower, 2), 74.69)
self.assertEqual(round(profile.current_upper, 2), 76.09)
- self.assertEqual(len(runs), 7)
+ self.assertEqual(len(runs), 77)
# Result Samples inc theor_max
result_tuple = {'Result_Actual_throughput': 5e-07,
- 'Result_theor_max_throughput': 0.00012340000000000002,
+ 'Result_theor_max_throughput': 7.5e-07,
'Result_pktSize': 200}
profile.queue.put.assert_called_with(result_tuple)
@@ -123,6 +129,11 @@ class TestProxBinSearchProfile(unittest.TestCase):
fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
traffic_generator = mock.MagicMock()
+ attrs1 = {'get.return_value': 10}
+ traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
+
+ attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0}
+ traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
profile_helper = mock.MagicMock()
profile_helper.run_test = target
@@ -172,7 +183,8 @@ class TestProxBinSearchProfile(unittest.TestCase):
# Result Samples
- result_tuple = {"Result_theor_max_throughput": 0, "Result_pktSize": 200}
+ result_tuple = {'Result_Actual_throughput': 0, "Result_theor_max_throughput": 0,
+ "Result_pktSize": 200}
profile.queue.put.assert_called_with(result_tuple)
# Check for success_ tuple (None expected)
@@ -182,3 +194,81 @@ class TestProxBinSearchProfile(unittest.TestCase):
for k in call_detail:
if "Success_" in k:
self.assertRaises(AttributeError)
+
+ def test_execute_4(self):
+
+ def target(*args, **_):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 75.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_generator = mock.MagicMock()
+ attrs1 = {'get.return_value': 100000}
+ traffic_generator.scenario_helper.all_options.configure_mock(**attrs1)
+
+ attrs2 = {'__getitem__.return_value': 0, 'get.return_value': 0}
+ traffic_generator.scenario_helper.scenario_cfg["runner"].configure_mock(**attrs2)
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxBinSearchProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.execute_traffic(traffic_generator)
+ self.assertEqual(round(profile.current_lower, 2), 74.69)
+ self.assertEqual(round(profile.current_upper, 2), 76.09)
+ self.assertEqual(len(runs), 7)
+
+ # Result Samples inc theor_max
+ result_tuple = {'Result_Actual_throughput': 5e-07,
+ 'Result_theor_max_throughput': 7.5e-07,
+ 'Result_pktSize': 200}
+
+ profile.queue.put.assert_called_with(result_tuple)
+
+ success_result_tuple = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 0.00012340000000000002}
+
+ calls = profile.queue.put(success_result_tuple)
+ profile.queue.put.assert_has_calls(calls)
+
+ success_result_tuple2 = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 123.4,
+ "Success_can_be_lost": 409600,
+ "Success_drop_total": 20480,
+ "Success_rx_total": 4075520,
+ "Success_tx_total": 4096000}
+
+ calls = profile.queue.put(success_result_tuple2)
+ profile.queue.put.assert_has_calls(calls)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
index e466305ea..cf31cc27c 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
@@ -124,5 +124,5 @@ class TestProxProfile(unittest.TestCase):
for _ in profile.bounds_iterator(mock_logger):
pass
- self.assertEqual(mock_logger.debug.call_count, 1)
+ mock_logger.debug.assert_called_once()
self.assertEqual(mock_logger.info.call_count, 10)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index 2684e0ba1..0cf93f9ae 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -11,33 +11,26 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-import unittest
import mock
-from yardstick.tests import STL_MOCKS
-
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
+from yardstick.network_services.traffic_profile import rfc2544
+from yardstick.tests.unit import base
-if stl_patch:
- from yardstick.network_services.traffic_profile.trex_traffic_profile \
- import TrexProfile
- from yardstick.network_services.traffic_profile.rfc2544 import \
- RFC2544Profile
-
-class TestRFC2544Profile(unittest.TestCase):
+class TestRFC2544Profile(base.BaseUnitTestCase):
TRAFFIC_PROFILE = {
"schema": "isb:traffic_profile:0.1",
"name": "fixed",
"description": "Fixed traffic profile to run UDP traffic",
"traffic_profile": {
"traffic_type": "FixedTraffic",
- "frame_rate": 100, # pps
+ "frame_rate": 100,
"flow_number": 10,
"frame_size": 64}}
@@ -45,233 +38,251 @@ class TestRFC2544Profile(unittest.TestCase):
'name': 'rfc2544',
'traffic_profile': {'traffic_type': 'RFC2544Profile',
'frame_rate': 100},
- 'downlink_0': {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'}},
- 'outer_l3v4': {'dstip4': '1.1.1.1-1.15.255.255',
- 'proto': 'udp',
- 'srcip4': '90.90.1.1-90.105.255.255',
- 'dscp': 0, 'ttl': 32, 'count': 1},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234', 'count': 1}}},
- 'uplink_0': {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'}},
- 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
- 'proto': 'udp',
- 'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32, 'count': 1},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234', 'count': 1}}},
+ 'downlink_0':
+ {'ipv4':
+ {'outer_l2':
+ {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp',
+ 'srcip4': '90.90.1.1-90.105.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4':
+ {'srcport': '2001',
+ 'dsrport': '1234', 'count': 1}}},
+ 'uplink_0':
+ {'ipv4':
+ {'outer_l2':
+ {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4':
+ {'dstport': '2001',
+ 'srcport': '1234', 'count': 1}}},
'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- self.assertIsNotNone(r_f_c2544_profile.rate)
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ self.assertEqual(rfc2544_profile.max_rate, rfc2544_profile.rate)
+ self.assertEqual(0, rfc2544_profile.min_rate)
- def test_execute(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.networks = {
- "uplink_0": ["xe0"],
- "downlink_0": ["xe1"],
- }
- traffic_generator.client.return_value = True
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.params = self.PROFILE
- r_f_c2544_profile.first_run = True
- self.assertIsNone(r_f_c2544_profile.execute_traffic(traffic_generator))
+ def test_stop_traffic(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ mock_generator = mock.Mock()
+ rfc2544_profile.stop_traffic(traffic_generator=mock_generator)
+ mock_generator.client.stop.assert_called_once()
+ mock_generator.client.reset.assert_called_once()
+ mock_generator.client.remove_all_streams.assert_called_once()
- def test_get_drop_percentage(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.networks = {
- "uplink_0": ["xe0"],
- "downlink_0": ["xe1"],
- }
- traffic_generator.client.return_value = True
+ def test_execute_traffic(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ mock_generator = mock.Mock()
+ mock_generator.networks = {
+ 'downlink_0': ['xe0', 'xe1'],
+ 'uplink_0': ['xe2', 'xe3'],
+ 'downlink_1': []}
+ mock_generator.port_num.side_effect = [10, 20, 30, 40]
+ mock_generator.rfc2544_helper.correlated_traffic = False
+ rfc2544_profile.params = {
+ 'downlink_0': 'profile1',
+ 'uplink_0': 'profile2'}
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.params = self.PROFILE
- r_f_c2544_profile.register_generator(traffic_generator)
- self.assertIsNone(r_f_c2544_profile.execute_traffic(traffic_generator))
+ with mock.patch.object(rfc2544_profile, '_create_profile') as \
+ mock_create_profile:
+ rfc2544_profile.execute_traffic(traffic_generator=mock_generator)
+ mock_create_profile.assert_has_calls([
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY)])
+ mock_generator.client.add_streams.assert_has_calls([
+ mock.call(mock.ANY, ports=[10]),
+ mock.call(mock.ANY, ports=[20]),
+ mock.call(mock.ANY, ports=[30]),
+ mock.call(mock.ANY, ports=[40])])
+ mock_generator.client.start(ports=[10, 20, 30, 40],
+ duration=rfc2544_profile.config.duration,
+ force=True)
- samples = {}
- for ifname in range(1):
- name = "xe{}".format(ifname)
- samples[name] = {
- "rx_throughput_fps": 20,
- "tx_throughput_fps": 20,
- "rx_throughput_mbps": 10,
- "tx_throughput_mbps": 10,
- "in_packets": 1000,
- "out_packets": 1000,
- }
+ @mock.patch.object(trex_stl_streams, 'STLProfile')
+ def test__create_profile(self, mock_stl_profile):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ port_pg_id = mock.ANY
+ profile_data = {'packetid_1': {'outer_l2': {'framesize': 'imix_info'}}}
+ rate = 100
+ with mock.patch.object(rfc2544_profile, '_create_imix_data') as \
+ mock_create_imix, \
+ mock.patch.object(rfc2544_profile, '_create_vm') as \
+ mock_create_vm, \
+ mock.patch.object(rfc2544_profile, '_create_streams') as \
+ mock_create_streams:
+ mock_create_imix.return_value = 'imix_data'
+ mock_create_streams.return_value = ['stream1']
+ rfc2544_profile._create_profile(profile_data, rate, port_pg_id)
- expected = {
- 'DropPercentage': 0.0,
- 'RxThroughput': 100 / 3.0,
- 'TxThroughput': 100 / 3.0,
- 'CurrentDropPercentage': 0.0,
- 'Throughput': 66.66666666666667,
- 'xe0': {
- 'tx_throughput_fps': 20,
- 'in_packets': 1000,
- 'out_packets': 1000,
- 'rx_throughput_mbps': 10,
- 'tx_throughput_mbps': 10,
- 'rx_throughput_fps': 20,
- },
- }
- traffic_generator.generate_samples.return_value = samples
- traffic_generator.RUN_DURATION = 30
- traffic_generator.rfc2544_helper.tolerance_low = 0.0001
- traffic_generator.rfc2544_helper.tolerance_high = 0.0001
- result = r_f_c2544_profile.get_drop_percentage(traffic_generator)
- self.assertDictEqual(result, expected)
+ mock_create_imix.assert_called_once_with('imix_info')
+ mock_create_vm.assert_called_once_with(
+ {'outer_l2': {'framesize': 'imix_info'}})
+ mock_create_streams.assert_called_once_with('imix_data', 100,
+ port_pg_id)
+ mock_stl_profile.assert_called_once_with(['stream1'])
- def test_get_drop_percentage_update(self):
- traffic_generator = mock.Mock(autospec=RFC2544Profile)
- traffic_generator.networks = {
- "uplink_0": ["xe0"],
- "downlink_0": ["xe1"],
- }
- traffic_generator.client = mock.Mock(return_value=True)
+ def test__create_imix_data(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ data = {'64B': 50, '128B': 50}
+ self.assertEqual({'64': 50.0, '128': 50.0},
+ rfc2544_profile._create_imix_data(data))
+ data = {'64B': 1, '128b': 3}
+ self.assertEqual({'64': 25.0, '128': 75.0},
+ rfc2544_profile._create_imix_data(data))
+ data = {}
+ self.assertEqual({}, rfc2544_profile._create_imix_data(data))
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.params = self.PROFILE
- r_f_c2544_profile.register_generator(traffic_generator)
- self.assertIsNone(r_f_c2544_profile.execute_traffic())
+ def test__create_vm(self):
+ packet = {'outer_l2': 'l2_definition'}
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ with mock.patch.object(rfc2544_profile, '_set_outer_l2_fields') as \
+ mock_l2_fileds:
+ rfc2544_profile._create_vm(packet)
+ mock_l2_fileds.assert_called_once_with('l2_definition')
- samples = {}
- for ifname in range(1):
- name = "xe{}".format(ifname)
- samples[name] = {
- "rx_throughput_fps": 20,
- "tx_throughput_fps": 20,
- "rx_throughput_mbps": 10,
- "tx_throughput_mbps": 10,
- "in_packets": 1000,
- "out_packets": 1002,
- }
- expected = {
- 'DropPercentage': 0.1996,
- 'RxThroughput': 33.333333333333336,
- 'TxThroughput': 33.4,
- 'CurrentDropPercentage': 0.1996,
- 'Throughput': 66.66666666666667,
- 'xe0': {
- 'tx_throughput_fps': 20,
- 'in_packets': 1000,
- 'out_packets': 1002,
- 'rx_throughput_mbps': 10,
- 'tx_throughput_mbps': 10,
- 'rx_throughput_fps': 20,
- },
- }
- traffic_generator.generate_samples = mock.MagicMock(
- return_value=samples)
- traffic_generator.RUN_DURATION = 30
- traffic_generator.rfc2544_helper.tolerance_low = 0.0001
- traffic_generator.rfc2544_helper.tolerance_high = 0.0001
- result = r_f_c2544_profile.get_drop_percentage(traffic_generator)
- self.assertDictEqual(expected, result)
+ @mock.patch.object(trex_stl_packet_builder_scapy, 'STLPktBuilder',
+ return_value='packet')
+ def test__create_single_packet(self, mock_pktbuilder):
+ size = 128
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.ether_packet = Pkt.Eth()
+ rfc2544_profile.ip_packet = Pkt.IP()
+ rfc2544_profile.udp_packet = Pkt.UDP()
+ rfc2544_profile.trex_vm = 'trex_vm'
+ base_pkt = (rfc2544_profile.ether_packet / rfc2544_profile.ip_packet /
+ rfc2544_profile.udp_packet)
+ pad = (size - len(base_pkt)) * 'x'
+ output = rfc2544_profile._create_single_packet(size=size)
+ mock_pktbuilder.assert_called_once_with(pkt=base_pkt / pad,
+ vm='trex_vm')
+ self.assertEqual(output, 'packet')
- def test_get_drop_percentage_div_zero(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.networks = {
- "uplink_0": ["xe0"],
- "downlink_0": ["xe1"],
- }
- traffic_generator.client = \
- mock.Mock(return_value=True)
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.params = self.PROFILE
- self.assertIsNone(
- r_f_c2544_profile.execute_traffic(traffic_generator))
- samples = {}
- for ifname in range(1):
- name = "xe{}".format(ifname)
- samples[name] = {"rx_throughput_fps": 20,
- "tx_throughput_fps": 20,
- "rx_throughput_mbps": 10,
- "tx_throughput_mbps": 10,
- "in_packets": 1000,
- "out_packets": 0}
- r_f_c2544_profile.throughput_max = 0
- expected = {
- 'DropPercentage': 100.0, 'RxThroughput': 100 / 3.0,
- 'TxThroughput': 0.0, 'CurrentDropPercentage': 100.0,
- 'Throughput': 66.66666666666667,
- 'xe0': {
- 'tx_throughput_fps': 20, 'in_packets': 1000,
- 'out_packets': 0, 'rx_throughput_mbps': 10,
- 'tx_throughput_mbps': 10, 'rx_throughput_fps': 20
- }
- }
- traffic_generator.generate_samples = mock.Mock(return_value=samples)
- traffic_generator.RUN_DURATION = 30
- traffic_generator.rfc2544_helper.tolerance_low = 0.0001
- traffic_generator.rfc2544_helper.tolerance_high = 0.0001
- self.assertDictEqual(expected,
- r_f_c2544_profile.get_drop_percentage(traffic_generator))
+ @mock.patch.object(trex_stl_packet_builder_scapy, 'STLPktBuilder',
+ return_value='packet')
+ def test__create_single_packet_qinq(self, mock_pktbuilder):
+ size = 128
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.ether_packet = Pkt.Eth()
+ rfc2544_profile.ip_packet = Pkt.IP()
+ rfc2544_profile.udp_packet = Pkt.UDP()
+ rfc2544_profile.trex_vm = 'trex_vm'
+ rfc2544_profile.qinq = True
+ rfc2544_profile.qinq_packet = Pkt.Dot1Q(vlan=1) / Pkt.Dot1Q(vlan=2)
+ base_pkt = (rfc2544_profile.ether_packet /
+ rfc2544_profile.qinq_packet / rfc2544_profile.ip_packet /
+ rfc2544_profile.udp_packet)
+ pad = (size - len(base_pkt)) * 'x'
+ output = rfc2544_profile._create_single_packet(size=size)
+ mock_pktbuilder.assert_called_once_with(pkt=base_pkt / pad,
+ vm='trex_vm')
+ self.assertEqual(output, 'packet')
+
+ @mock.patch.object(trex_stl_streams, 'STLFlowLatencyStats')
+ @mock.patch.object(trex_stl_streams, 'STLTXCont')
+ @mock.patch.object(trex_stl_client, 'STLStream')
+ def test__create_streams(self, mock_stream, mock_txcont, mock_latency):
+ imix_data = {'64': 25, '512': 75}
+ rate = 35
+ port_pg_id = rfc2544.PortPgIDMap()
+ port_pg_id.add_port(10)
+ mock_stream.side_effect = ['stream1', 'stream2']
+ mock_txcont.side_effect = ['txcont1', 'txcont2']
+ mock_latency.side_effect = ['latency1', 'latency2']
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ with mock.patch.object(rfc2544_profile, '_create_single_packet'):
+ output = rfc2544_profile._create_streams(imix_data, rate,
+ port_pg_id)
+ self.assertEqual(['stream1', 'stream2'], output)
+ mock_latency.assert_has_calls([
+ mock.call(pg_id=1), mock.call(pg_id=2)])
+ mock_txcont.assert_has_calls([
+ mock.call(percentage=float(25 * 35) / 100),
+ mock.call(percentage=float(75 * 35) / 100)], any_order=True)
+
+ def test_get_drop_percentage(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ samples = [
+ {'xe1': {'tx_throughput_fps': 100,
+ 'rx_throughput_fps': 101,
+ 'out_packets': 2000,
+ 'in_packets': 2010},
+ 'xe2': {'tx_throughput_fps': 200,
+ 'rx_throughput_fps': 201,
+ 'out_packets': 4000,
+ 'in_packets': 4010}},
+ {'xe1': {'tx_throughput_fps': 106,
+ 'rx_throughput_fps': 108,
+ 'out_packets': 2031,
+ 'in_packets': 2040,
+ 'latency': 'Latency1'},
+ 'xe2': {'tx_throughput_fps': 203,
+ 'rx_throughput_fps': 215,
+ 'out_packets': 4025,
+ 'in_packets': 4040,
+ 'latency': 'Latency2'}}
+ ]
+ output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ expected = {'DropPercentage': 0.3963,
+ 'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
+ 'RxThroughput': 312.5,
+ 'TxThroughput': 304.5,
+ 'CurrentDropPercentage': 0.3963,
+ 'Rate': 100,
+ 'Throughput': 312.5}
+ self.assertEqual(expected, output)
- def test_get_multiplier(self):
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.max_rate = 100
- r_f_c2544_profile.min_rate = 100
- self.assertEqual("1.0", r_f_c2544_profile.get_multiplier())
- def test_calculate_pps(self):
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.rate = 100
- r_f_c2544_profile.pps = 100
- samples = {'Throughput': 4549093.33}
- self.assertEqual((2274546.67, 1.0),
- r_f_c2544_profile.calculate_pps(samples))
+class PortPgIDMapTestCase(base.BaseUnitTestCase):
- def test_create_single_stream(self):
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile._create_single_packet = mock.MagicMock()
- r_f_c2544_profile.pg_id = 1
- self.assertIsNotNone(
- r_f_c2544_profile.create_single_stream(64, 2274546.67))
+ def test_add_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ self.assertEqual(10, port_pg_id_map._last_port)
+ self.assertEqual([], port_pg_id_map._port_pg_id_map[10])
- def test_create_single_stream_no_pg_id(self):
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile._create_single_packet = mock.MagicMock()
- r_f_c2544_profile.pg_id = 0
- self.assertIsNotNone(
- r_f_c2544_profile.create_single_stream(64, 2274546.67))
+ def test_get_pg_ids(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ port_pg_id_map.increase_pg_id()
+ port_pg_id_map.increase_pg_id()
+ port_pg_id_map.add_port(20)
+ port_pg_id_map.increase_pg_id()
+ self.assertEqual([1, 2], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual([3], port_pg_id_map.get_pg_ids(20))
- def test_execute_latency(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.networks = {
- "private_0": ["xe0"],
- "public_0": ["xe1"],
- }
- traffic_generator.client = \
- mock.Mock(return_value=True)
- r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
- r_f_c2544_profile.params = self.PROFILE
- r_f_c2544_profile.first_run = True
- samples = {}
- for ifname in range(1):
- name = "xe{}".format(ifname)
- samples[name] = {"rx_throughput_fps": 20,
- "tx_throughput_fps": 20,
- "rx_throughput_mbps": 10,
- "tx_throughput_mbps": 10,
- "in_packets": 1000,
- "out_packets": 0}
+ def test_increase_pg_id_no_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ self.assertIsNone(port_pg_id_map.increase_pg_id())
- samples['Throughput'] = 4549093.33
- r_f_c2544_profile.calculate_pps = mock.Mock(return_value=[2274546.67,
- 1.0])
+ def test_increase_pg_id_last_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ self.assertEqual(1, port_pg_id_map.increase_pg_id())
+ self.assertEqual([1], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual(10, port_pg_id_map._last_port)
- self.assertIsNone(r_f_c2544_profile.execute_latency(traffic_generator,
- samples))
+ def test_increase_pg_id(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ port_pg_id_map.increase_pg_id()
+ self.assertEqual(2, port_pg_id_map.increase_pg_id(port=20))
+ self.assertEqual([1], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual([2], port_pg_id_map.get_pg_ids(20))
+ self.assertEqual(20, port_pg_id_map._last_port)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
index 5fe1b7326..628e85459 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
@@ -14,29 +14,12 @@
import ipaddress
-import mock
import six
import unittest
-from yardstick.tests import STL_MOCKS
from yardstick.common import exceptions as y_exc
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.traffic_profile.base import TrafficProfile
- from yardstick.network_services.traffic_profile.trex_traffic_profile import TrexProfile
- from yardstick.network_services.traffic_profile.trex_traffic_profile import SRC
- from yardstick.network_services.traffic_profile.trex_traffic_profile import DST
- from yardstick.network_services.traffic_profile.trex_traffic_profile import ETHERNET
- from yardstick.network_services.traffic_profile.trex_traffic_profile import IP
- from yardstick.network_services.traffic_profile.trex_traffic_profile import IPv6
- from yardstick.network_services.traffic_profile.trex_traffic_profile import UDP
- from yardstick.network_services.traffic_profile.trex_traffic_profile import SRC_PORT
- from yardstick.network_services.traffic_profile.trex_traffic_profile import DST_PORT
- from yardstick.network_services.traffic_profile.trex_traffic_profile import TYPE_OF_SERVICE
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
class TestTrexProfile(unittest.TestCase):
@@ -59,7 +42,7 @@ class TestTrexProfile(unittest.TestCase):
'name': 'rfc2544',
'traffic_profile': {'traffic_type': 'RFC2544Profile',
'frame_rate': 100},
- TrafficProfile.DOWNLINK: {
+ tp_base.TrafficProfile.DOWNLINK: {
'ipv4': {'outer_l2': {'framesize': {'64B': '100',
'1518B': '0',
'128B': '0',
@@ -77,7 +60,7 @@ class TestTrexProfile(unittest.TestCase):
'outer_l4': {'srcport': '2001',
'dsrport': '1234',
'count': 1}}},
- TrafficProfile.UPLINK: {
+ tp_base.TrafficProfile.UPLINK: {
'ipv4':
{'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
@@ -99,7 +82,7 @@ class TestTrexProfile(unittest.TestCase):
'name': 'rfc2544',
'traffic_profile': {'traffic_type': 'RFC2544Profile',
'frame_rate': 100},
- TrafficProfile.DOWNLINK: {
+ tp_base.TrafficProfile.DOWNLINK: {
'ipv6': {'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
'128B': '0', '1400B': '0',
@@ -118,7 +101,7 @@ class TestTrexProfile(unittest.TestCase):
'outer_l4': {'srcport': '2001',
'dsrport': '1234',
'count': 1}}},
- TrafficProfile.UPLINK: {
+ tp_base.TrafficProfile.UPLINK: {
'ipv6': {'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
'128B': '0', '1400B': '0',
@@ -140,17 +123,15 @@ class TestTrexProfile(unittest.TestCase):
'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
- TrafficProfile.params = self.PROFILE
- trex_profile = \
- TrexProfile(TrafficProfile)
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
self.assertEqual(trex_profile.pps, 100)
def test_qinq(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
qinq = {"S-VLAN": {"id": 128, "priority": 0, "cfi": 0},
"C-VLAN": {"id": 512, "priority": 0, "cfi": 0}}
- trex_profile = \
- TrexProfile(TrafficProfile)
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
self.assertIsNone(trex_profile.set_qinq(qinq))
qinq = {"S-VLAN": {"id": "128-130", "priority": 0, "cfi": 0},
@@ -158,64 +139,39 @@ class TestTrexProfile(unittest.TestCase):
self.assertIsNone(trex_profile.set_qinq(qinq))
def test__set_outer_l2_fields(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
qinq = {"S-VLAN": {"id": 128, "priority": 0, "cfi": 0},
"C-VLAN": {"id": 512, "priority": 0, "cfi": 0}}
- outer_l2 = self.PROFILE[TrafficProfile.UPLINK]['ipv4']['outer_l2']
+ outer_l2 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l2']
outer_l2['QinQ'] = qinq
self.assertIsNone(trex_profile._set_outer_l2_fields(outer_l2))
def test__set_outer_l3v4_fields(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- outer_l3v4 = self.PROFILE[TrafficProfile.UPLINK]['ipv4']['outer_l3v4']
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l3v4 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l3v4']
outer_l3v4['proto'] = 'tcp'
self.assertIsNone(trex_profile._set_outer_l3v4_fields(outer_l3v4))
def test__set_outer_l3v6_fields(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- outer_l3v6 = self.PROFILE_v6[TrafficProfile.UPLINK]['ipv6']['outer_l3v4']
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l3v6 = self.PROFILE_v6[
+ tp_base.TrafficProfile.UPLINK]['ipv6']['outer_l3v4']
outer_l3v6['proto'] = 'tcp'
outer_l3v6['tc'] = 1
outer_l3v6['hlim'] = 10
self.assertIsNone(trex_profile._set_outer_l3v6_fields(outer_l3v6))
def test__set_outer_l4_fields(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- outer_l4 = self.PROFILE[TrafficProfile.UPLINK]['ipv4']['outer_l4']
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l4 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l4']
self.assertIsNone(trex_profile._set_outer_l4_fields(outer_l4))
- def test_get_streams(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- trex_profile.params = self.PROFILE
- profile_data = self.PROFILE[TrafficProfile.UPLINK]
- self.assertIsNotNone(trex_profile.get_streams(profile_data))
- trex_profile.pg_id = 1
- self.assertIsNotNone(trex_profile.get_streams(profile_data))
- trex_profile.params = self.PROFILE_v6
- trex_profile.profile_data = self.PROFILE_v6[TrafficProfile.UPLINK]
- self.assertIsNotNone(trex_profile.get_streams(profile_data))
- trex_profile.pg_id = 1
- self.assertIsNotNone(trex_profile.get_streams(profile_data))
-
- def test_generate_packets(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- trex_profile.fsize = 10
- trex_profile.base_pkt = [10]
- self.assertIsNone(trex_profile.generate_packets())
-
- def test_generate_imix_data_error(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- self.assertEqual({}, trex_profile.generate_imix_data(False))
-
def test__count_ip_ipv4(self):
- start, end, count = TrexProfile._count_ip('1.1.1.1', '1.2.3.4')
+ start, end, count = trex_traffic_profile.TrexProfile._count_ip(
+ '1.1.1.1', '1.2.3.4')
self.assertEqual('1.1.1.1', str(start))
self.assertEqual('1.2.3.4', str(end))
diff = (int(ipaddress.IPv4Address(six.u('1.2.3.4'))) -
@@ -225,7 +181,8 @@ class TestTrexProfile(unittest.TestCase):
def test__count_ip_ipv6(self):
start_ip = '0064:ff9b:0:0:0:0:9810:6414'
end_ip = '0064:ff9b:0:0:0:0:9810:6420'
- start, end, count = TrexProfile._count_ip(start_ip, end_ip)
+ start, end, count = trex_traffic_profile.TrexProfile._count_ip(
+ start_ip, end_ip)
self.assertEqual(0x98106414, start)
self.assertEqual(0x98106420, end)
self.assertEqual(0x98106420 - 0x98106414, count)
@@ -234,10 +191,10 @@ class TestTrexProfile(unittest.TestCase):
start_ip = '0064:ff9b:0:0:0:0:9810:6420'
end_ip = '0064:ff9b:0:0:0:0:9810:6414'
with self.assertRaises(y_exc.IPv6RangeError):
- TrexProfile._count_ip(start_ip, end_ip)
+ trex_traffic_profile.TrexProfile._count_ip(start_ip, end_ip)
def test__dscp_range_action_partial_actual_count_zero(self):
- traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
dscp_partial = traffic_profile._dscp_range_action_partial()
flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
@@ -245,7 +202,7 @@ class TestTrexProfile(unittest.TestCase):
self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
def test__dscp_range_action_partial_count_greater_than_actual(self):
- traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
dscp_partial = traffic_profile._dscp_range_action_partial()
flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
@@ -253,7 +210,7 @@ class TestTrexProfile(unittest.TestCase):
self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
def test__udp_range_action_partial_actual_count_zero(self):
- traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
traffic_profile.udp['field1'] = 'value1'
udp_partial = traffic_profile._udp_range_action_partial('field1')
@@ -262,48 +219,59 @@ class TestTrexProfile(unittest.TestCase):
self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
def test__udp_range_action_partial_count_greater_than_actual(self):
- traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
traffic_profile.udp['field1'] = 'value1'
- udp_partial = traffic_profile._udp_range_action_partial('field1', 'not_used_count')
-
+ udp_partial = traffic_profile._udp_range_action_partial(
+ 'field1', 'not_used_count')
flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
udp_partial('1', '10', '100')
self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
def test__general_single_action_partial(self):
- trex_profile = TrexProfile(TrafficProfile)
-
- trex_profile._general_single_action_partial(ETHERNET)(SRC)(
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ trex_profile._general_single_action_partial(
+ trex_traffic_profile.ETHERNET)(trex_traffic_profile.SRC)(
self.EXAMPLE_ETHERNET_ADDR)
self.assertEqual(self.EXAMPLE_ETHERNET_ADDR,
trex_profile.ether_packet.src)
- trex_profile._general_single_action_partial(IP)(DST)(
- self.EXAMPLE_IP_ADDR)
+ trex_profile._general_single_action_partial(trex_traffic_profile.IP)(
+ trex_traffic_profile.DST)(self.EXAMPLE_IP_ADDR)
self.assertEqual(self.EXAMPLE_IP_ADDR, trex_profile.ip_packet.dst)
- trex_profile._general_single_action_partial(IPv6)(DST)(
- self.EXAMPLE_IPv6_ADDR)
+ trex_profile._general_single_action_partial(trex_traffic_profile.IPv6)(
+ trex_traffic_profile.DST)(self.EXAMPLE_IPv6_ADDR)
self.assertEqual(self.EXAMPLE_IPv6_ADDR, trex_profile.ip6_packet.dst)
- trex_profile._general_single_action_partial(UDP)(SRC_PORT)(5060)
+ trex_profile._general_single_action_partial(trex_traffic_profile.UDP)(
+ trex_traffic_profile.SRC_PORT)(5060)
self.assertEqual(5060, trex_profile.udp_packet.sport)
- trex_profile._general_single_action_partial(IP)(TYPE_OF_SERVICE)(0)
+ trex_profile._general_single_action_partial(trex_traffic_profile.IP)(
+ trex_traffic_profile.TYPE_OF_SERVICE)(0)
self.assertEqual(0, trex_profile.ip_packet.tos)
def test__set_proto_addr(self):
- trex_profile = TrexProfile(TrafficProfile)
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
ether_range = "00:00:00:00:00:01-00:00:00:00:00:02"
ip_range = "1.1.1.2-1.1.1.10"
ipv6_range = '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420'
- trex_profile._set_proto_addr(ETHERNET, SRC, ether_range)
- trex_profile._set_proto_addr(ETHERNET, DST, ether_range)
- trex_profile._set_proto_addr(IP, SRC, ip_range)
- trex_profile._set_proto_addr(IP, DST, ip_range)
- trex_profile._set_proto_addr(IPv6, SRC, ipv6_range)
- trex_profile._set_proto_addr(IPv6, DST, ipv6_range)
- trex_profile._set_proto_addr(UDP, SRC_PORT, "5060-5090")
- trex_profile._set_proto_addr(UDP, DST_PORT, "5060")
+ trex_profile._set_proto_addr(trex_traffic_profile.ETHERNET,
+ trex_traffic_profile.SRC, ether_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.ETHERNET,
+ trex_traffic_profile.DST, ether_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IP,
+ trex_traffic_profile.SRC, ip_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IP,
+ trex_traffic_profile.DST, ip_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IPv6,
+ trex_traffic_profile.SRC, ipv6_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IPv6,
+ trex_traffic_profile.DST, ipv6_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.UDP,
+ trex_traffic_profile.SRC_PORT,
+ '5060-5090')
+ trex_profile._set_proto_addr(trex_traffic_profile.UDP,
+ trex_traffic_profile.DST_PORT, '5060')
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index f75fa226a..ce32a31e2 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -16,10 +16,13 @@
import unittest
import mock
import os
+import re
+import copy
from yardstick.tests import STL_MOCKS
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
from yardstick.common import utils
+from yardstick.common import exceptions
STLClient = mock.MagicMock()
@@ -28,6 +31,7 @@ stl_patch.start()
if stl_patch:
from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxVnf
+ from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
@@ -311,7 +315,6 @@ class TestAclApproxVnf(unittest.TestCase):
acl_approx_vnf._run()
acl_approx_vnf.ssh_helper.run.assert_called_once()
- @mock.patch("yardstick.network_services.vnf_generic.vnf.acl_vnf.YangModel")
@mock.patch.object(utils, 'find_relative_file')
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
@mock.patch(SSH_HELPER)
@@ -350,6 +353,145 @@ class TestAclApproxVnf(unittest.TestCase):
class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
+ ACL_CONFIG = {"access-list-entries": [{
+ "actions": [
+ "count",
+ {"fwd": {
+ "port": 0
+ }
+ }
+ ],
+ "matches": {
+ "destination-ipv4-network": "152.16.0.0/24",
+ "destination-port-range": {
+ "lower-port": 0,
+ "upper-port": 65535
+ },
+ "source-ipv4-network": "0.0.0.0/0",
+ "source-port-range": {
+ "lower-port": 0,
+ "upper-port": 65535
+ },
+ "protocol-mask": 255,
+ "protocol": 127,
+ "priority": 1
+ },
+ "rule-name": "rule1588"
+ }
+ ]}
+
+ def test_get_default_flows(self):
+ """Check if default ACL SampleVNF CLI commands are
+ generated correctly"""
+ ssh_helper = mock.Mock()
+ vnfd_helper = VnfdHelper({'vdu': [
+ {'external-interface': [
+ {
+ 'virtual-interface': {
+ 'local_ip': '152.16.100.19',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'dst_ip': '152.16.100.20',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'local_ip': '152.16.40.19',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'dst_ip': '152.16.40.20',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ }
+ ]}
+ ]})
+ setup_helper = AclApproxSetupEnvSetupEnvHelper(vnfd_helper, ssh_helper, None)
+ self.check_acl_commands(setup_helper.get_flows_config(), [
+ # format: (<cli pattern>, <number of expected matches>)
+ ("^p action add [0-9]+ accept$", 2),
+ ("^p action add [0-9]+ count$", 2),
+ ("^p action add [0-9]+ fwd 1$", 1),
+ ("^p action add [0-9]+ fwd 0$", 1),
+ ("^p acl add 1 152.16.100.0 24 152.16.40.0 24 0 65535 0 65535 0 0 [0-9]+$", 1),
+ ("^p acl add 1 152.16.40.0 24 152.16.100.0 24 0 65535 0 65535 0 0 [0-9]+$", 1),
+ ("^p acl applyruleset$", 1)
+ ])
+
+ @mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
+ def test_get_flows_config(self, get_default_flows):
+ """Check if provided ACL config can be converted to
+ ACL SampleVNF CLI commands correctly"""
+ ssh_helper = mock.Mock()
+ setup_helper = AclApproxSetupEnvSetupEnvHelper(None, ssh_helper, None)
+ get_default_flows.return_value = ({}, [])
+ self.check_acl_commands(setup_helper.get_flows_config(self.ACL_CONFIG), [
+ # format: (<cli pattern>, <number of expected matches>)
+ ("^p action add [0-9]+ count$", 1),
+ ("^p action add [0-9]+ fwd 0$", 1),
+ ("^p acl add 1 0.0.0.0 0 152.16.0.0 24 0 65535 0 65535 127 0 [0-9]+$", 1),
+ ("^p acl applyruleset$", 1)
+ ])
+
+ @mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
+ def test_get_flows_config_invalid_action(self, get_default_flows):
+ """Check if incorrect ACL config fails to convert
+ to ACL SampleVNF CLI commands"""
+ ssh_helper = mock.Mock()
+ setup_helper = AclApproxSetupEnvSetupEnvHelper(None, ssh_helper, None)
+ get_default_flows.return_value = ({}, [])
+ # duplicate config and add invald action
+ acl_config = copy.deepcopy(self.ACL_CONFIG)
+ acl_config['access-list-entries'][0]["actions"].append({"xnat": {}})
+ self.assertRaises(exceptions.AclUknownActionTemplate,
+ setup_helper.get_flows_config, acl_config)
+
+ @mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
+ def test_get_flows_config_invalid_action_param(self, get_default_flows):
+ """Check if ACL config with invalid action parameter fails to convert
+ to ACL SampleVNF CLI commands"""
+ ssh_helper = mock.Mock()
+ setup_helper = AclApproxSetupEnvSetupEnvHelper(None, ssh_helper, None)
+ get_default_flows.return_value = ({}, [])
+ # duplicate config and add action with invalid parameter
+ acl_config = copy.deepcopy(self.ACL_CONFIG)
+ acl_config['access-list-entries'][0]["actions"].append(
+ {"nat": {"xport": 0}})
+ self.assertRaises(exceptions.AclMissingActionArguments,
+ setup_helper.get_flows_config, acl_config)
+
+ def check_acl_commands(self, config, expected_cli_patterns):
+ """Check if expected ACL CLI commands (given as a list of patterns,
+ `expected_cli_patterns` parameter) present in SampleVNF ACL
+ configuration (given as a multiline string, `config` parameter)"""
+ # Example of expected config:
+ # ---------------------------
+ # p action add 1 accept
+ # p action add 1 fwd 1
+ # p action add 2 accept
+ # p action add 2 count
+ # p action add 2 fwd 0
+ # p acl add 1 152.16.100.0 24 152.16.40.0 24 0 65535 0 65535 0 0 1
+ # p acl add 1 152.16.40.0 24 152.16.100.0 24 0 65535 0 65535 0 0 2
+ # p acl applyruleset
+ # ---------------------------
+ # NOTE: The config above consists of actions ids, which are actually
+ # unknown (generated at runtime), thus it's incorrect just to compare
+ # the example ACL config above with the configuration returned by
+ # get_flows_config() function. It's more correct to use CLI patterns
+ # (RE) to find the required SampleVNF CLI commands in the multiline
+ # string (SampleVNF ACL configuration).
+ for pattern, num_of_match in expected_cli_patterns:
+ # format: (<cli pattern>, <number of expected matches>)
+ result = re.findall(pattern, config, re.MULTILINE)
+ self.assertEqual(len(result), num_of_match)
+
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
@mock.patch.object(utils, 'find_relative_file')
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
@@ -359,14 +501,17 @@ class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.options = {}
scenario_helper.all_options = {}
acl_approx_setup_helper = AclApproxSetupEnvSetupEnvHelper(vnfd_helper,
ssh_helper,
scenario_helper)
+ acl_approx_setup_helper.get_flows_config = mock.Mock()
acl_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
acl_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
acl_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
expected = 'sudo tool_path -p 0x3 -f /tmp/acl_config -s /tmp/acl_script --hwlb 3'
self.assertEqual(acl_approx_setup_helper.build_config(), expected)
+ acl_approx_setup_helper.get_flows_config.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
index bd8f53e21..6b6f3ef34 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
@@ -11,30 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
from copy import deepcopy
-import os
+import time
import mock
import unittest
-
+from yardstick.benchmark.contexts import base as ctx_base
from yardstick.common import utils
-
-from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-
-
-
-from yardstick.network_services.vnf_generic.vnf.cgnapt_vnf import CgnaptApproxVnf, \
- CgnaptApproxSetupEnvHelper
+from yardstick.common import process
from yardstick.network_services.vnf_generic.vnf import cgnapt_vnf
-from yardstick.network_services.nfvi.resource import ResourceProfile
-
-TEST_FILE_YAML = 'nsb_test_case.yaml'
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.nfvi import resource
+TEST_FILE_YAML = 'nsb_test_case.yaml'
name = 'vnf__0'
@@ -42,8 +34,9 @@ class TestCgnaptApproxSetupEnvHelper(unittest.TestCase):
def test__generate_ip_from_pool(self):
- ip = CgnaptApproxSetupEnvHelper._generate_ip_from_pool("1.2.3.4")
- self.assertEqual(next(ip), '1.2.3.4')
+ _ip = '1.2.3.4'
+ ip = cgnapt_vnf.CgnaptApproxSetupEnvHelper._generate_ip_from_pool(_ip)
+ self.assertEqual(next(ip), _ip)
self.assertEqual(next(ip), '1.2.4.4')
self.assertEqual(next(ip), '1.2.5.4')
@@ -62,19 +55,22 @@ link 1 up
"""
header = "This is a header"
- out = CgnaptApproxSetupEnvHelper._update_cgnat_script_file(header, sample.splitlines())
+ out = cgnapt_vnf.CgnaptApproxSetupEnvHelper._update_cgnat_script_file(
+ header, sample.splitlines())
self.assertNotIn("This is a header", out)
def test__get_cgnapt_config(self):
vnfd_helper = mock.MagicMock()
vnfd_helper.port_pairs.uplink_ports = [{"name": 'a'}, {"name": "b"}, {"name": "c"}]
- helper = CgnaptApproxSetupEnvHelper(vnfd_helper, mock.Mock(), mock.Mock())
+ helper = cgnapt_vnf.CgnaptApproxSetupEnvHelper(
+ vnfd_helper, mock.Mock(), mock.Mock())
result = helper._get_cgnapt_config()
self.assertIsNotNone(result)
def test_scale(self):
- helper = CgnaptApproxSetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock())
+ helper = cgnapt_vnf.CgnaptApproxSetupEnvHelper(
+ mock.Mock(), mock.Mock(), mock.Mock())
with self.assertRaises(NotImplementedError):
helper.scale()
@@ -87,11 +83,11 @@ link 1 up
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.options = {}
scenario_helper.all_options = {}
- cgnat_approx_setup_helper = CgnaptApproxSetupEnvHelper(vnfd_helper,
- ssh_helper,
- scenario_helper)
+ cgnat_approx_setup_helper = cgnapt_vnf.CgnaptApproxSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
cgnat_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
cgnat_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
@@ -100,7 +96,7 @@ link 1 up
self.assertEqual(cgnat_approx_setup_helper.build_config(), expected)
-@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process")
+@mock.patch.object(sample_vnf, 'Process')
class TestCgnaptApproxVnf(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
@@ -220,7 +216,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
'ip': '1.2.1.1',
'interfaces':
{'xe0': {'local_iface_name': 'ens513f0',
- 'vld_id': CgnaptApproxVnf.DOWNLINK,
+ 'vld_id': cgnapt_vnf.CgnaptApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.20',
'dst_mac': '00:00:00:00:00:01',
@@ -248,7 +244,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
'ip': '1.2.1.1',
'interfaces':
{'xe0': {'local_iface_name': 'ens785f0',
- 'vld_id': CgnaptApproxVnf.UPLINK,
+ 'vld_id': cgnapt_vnf.CgnaptApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.20',
'dst_mac': '00:00:00:00:00:02',
@@ -273,7 +269,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
'ip': '1.2.1.1',
'interfaces':
{'xe0': {'local_iface_name': 'ens786f0',
- 'vld_id': CgnaptApproxVnf.UPLINK,
+ 'vld_id': cgnapt_vnf.CgnaptApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.19',
'dst_mac': '00:00:00:00:00:04',
@@ -283,7 +279,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
'vpci': '0000:05:00.0',
'dpdk_port_num': 0},
'xe1': {'local_iface_name': 'ens786f1',
- 'vld_id': CgnaptApproxVnf.DOWNLINK,
+ 'vld_id': cgnapt_vnf.CgnaptApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.19',
'dst_mac': '00:00:00:00:00:03',
@@ -322,81 +318,59 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test___init__(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
self.assertIsNone(cgnapt_approx_vnf._vnf_process)
- @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
- @mock.patch(SSH_HELPER)
- def test_collect_kpi(self, ssh, *args):
- mock_ssh(ssh)
-
+ @mock.patch.object(process, 'check_if_process_failed')
+ def test_collect_kpi(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
cgnapt_approx_vnf._vnf_process = mock.MagicMock(
**{"is_alive.return_value": True, "exitcode": None})
cgnapt_approx_vnf.q_in = mock.MagicMock()
cgnapt_approx_vnf.q_out = mock.MagicMock()
cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
- cgnapt_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
+ cgnapt_approx_vnf.resource = mock.Mock(
+ autospec=resource.ResourceProfile)
result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0}
- self.assertEqual(result, cgnapt_approx_vnf.collect_kpi())
-
- @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
- @mock.patch(SSH_HELPER)
- def test_vnf_execute_command(self, ssh, *args):
- mock_ssh(ssh)
+ with mock.patch.object(cgnapt_approx_vnf, 'get_stats',
+ return_value=''):
+ self.assertEqual(result, cgnapt_approx_vnf.collect_kpi())
+ @mock.patch.object(time, 'sleep')
+ def test_vnf_execute_command(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf.q_in = mock.MagicMock()
- cgnapt_approx_vnf.q_out = mock.MagicMock()
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.q_in = mock.Mock()
+ cgnapt_approx_vnf.q_out = mock.Mock()
cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
- cmd = "quit"
- self.assertEqual("", cgnapt_approx_vnf.vnf_execute(cmd))
-
- @mock.patch(SSH_HELPER)
- def test_get_stats(self, ssh, *args):
- mock_ssh(ssh)
+ self.assertEqual("", cgnapt_approx_vnf.vnf_execute('quit'))
+ def test_get_stats(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf.q_in = mock.MagicMock()
- cgnapt_approx_vnf.q_out = mock.MagicMock()
- cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
- result = \
- "CG-NAPT(.*\n)*Received 100, Missed 0, Dropped 0,Translated 100,ingress"
- cgnapt_approx_vnf.vnf_execute = mock.Mock(return_value=result)
- self.assertListEqual(list(result), list(cgnapt_approx_vnf.get_stats()))
-
- def _get_file_abspath(self, filename):
- curr_path = os.path.dirname(os.path.abspath(__file__))
- file_path = os.path.join(curr_path, filename)
- return file_path
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.hex")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.eval")
- @mock.patch('yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.open')
- @mock.patch(SSH_HELPER)
- def test_run_vcgnapt(self, ssh, *args):
- mock_ssh(ssh)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ with mock.patch.object(cgnapt_approx_vnf, 'vnf_execute') as mock_exec:
+ mock_exec.return_value = 'output'
+ self.assertEqual('output', cgnapt_approx_vnf.get_stats())
+
+ mock_exec.assert_called_once_with('p cgnapt stats')
+ def test_run_vcgnapt(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf._build_config = mock.MagicMock()
- cgnapt_approx_vnf.queue_wrapper = mock.MagicMock()
- cgnapt_approx_vnf.ssh_helper = mock.MagicMock()
- cgnapt_approx_vnf.ssh_helper.run = mock.MagicMock()
- cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
- cgnapt_approx_vnf._run()
- cgnapt_approx_vnf.ssh_helper.run.assert_called_once()
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.ssh_helper = mock.Mock()
+ cgnapt_approx_vnf.setup_helper = mock.Mock()
+ with mock.patch.object(cgnapt_approx_vnf, '_build_config'), \
+ mock.patch.object(cgnapt_approx_vnf, '_build_run_kwargs'):
+ cgnapt_approx_vnf._run()
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
- @mock.patch(SSH_HELPER)
- def test_instantiate(self, ssh, *args):
- mock_ssh(ssh)
+ cgnapt_approx_vnf.ssh_helper.run.assert_called_once()
+ cgnapt_approx_vnf.setup_helper.kill_vnf.assert_called_once()
+ @mock.patch.object(ctx_base.Context, 'get_context_from_server')
+ def test_instantiate(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
cgnapt_approx_vnf.deploy_helper = mock.MagicMock()
cgnapt_approx_vnf.resource_helper = mock.MagicMock()
cgnapt_approx_vnf._build_config = mock.MagicMock()
@@ -405,51 +379,25 @@ class TestCgnaptApproxVnf(unittest.TestCase):
cgnapt_approx_vnf.q_out.put("pipeline>")
cgnapt_vnf.WAIT_TIME = 3
self.scenario_cfg.update({"nodes": {"vnf__0": ""}})
- self.assertIsNone(cgnapt_approx_vnf.instantiate(self.scenario_cfg,
- self.context_cfg))
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
- def test_terminate(self, ssh, *args):
- mock_ssh(ssh)
-
- vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf._vnf_process = mock.MagicMock()
- cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
- cgnapt_approx_vnf.used_drivers = {"01:01.0": "i40e",
- "01:01.1": "i40e"}
- cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
- cgnapt_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
- cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
- self.assertIsNone(cgnapt_approx_vnf.terminate())
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
- def test__vnf_up_post(self, ssh, *args):
- mock_ssh(ssh)
+ with mock.patch.object(cgnapt_approx_vnf, '_start_vnf'):
+ self.assertIsNone(cgnapt_approx_vnf.instantiate(
+ self.scenario_cfg, self.context_cfg))
+ @mock.patch.object(time, 'sleep')
+ def test__vnf_up_post(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
self.scenario_cfg['options'][name]['napt'] = 'static'
-
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf._vnf_process = mock.MagicMock()
- cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
- cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.vnf_execute = mock.Mock()
cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
- cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
- cgnapt_approx_vnf._vnf_up_post()
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
- def test__vnf_up_post_short(self, ssh, *args):
- mock_ssh(ssh)
+ with mock.patch.object(cgnapt_approx_vnf, 'setup_helper') as \
+ mock_setup_helper:
+ mock_setup_helper._generate_ip_from_pool.return_value = ['ip1']
+ mock_setup_helper._get_cgnapt_config.return_value = ['gw_ip1']
+ cgnapt_approx_vnf._vnf_up_post()
+ def test__vnf_up_post_short(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf._vnf_process = mock.MagicMock()
- cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
- cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
- cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
cgnapt_approx_vnf._vnf_up_post()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
index c74a6cf75..1c3acb6e5 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
@@ -208,13 +208,14 @@ class TestProxTestDataTuple(unittest.TestCase):
my_mock_logger = mock.MagicMock()
prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, [6.1, 6.9, 6.4], 7, 8, 9)
prox_test_data.log_data()
- self.assertEqual(my_mock_logger.debug.call_count, 0)
- self.assertEqual(mock_logger.debug.call_count, 0)
+
+ my_mock_logger.debug.assert_not_called()
+ mock_logger.debug.assert_not_called()
mock_logger.debug.reset_mock()
prox_test_data.log_data(my_mock_logger)
- self.assertEqual(my_mock_logger.debug.call_count, 0)
- self.assertEqual(mock_logger.debug.call_count, 0)
+ my_mock_logger.assert_not_called()
+ mock_logger.debug.assert_not_called()
class TestPacketDump(unittest.TestCase):
@@ -293,7 +294,12 @@ no data length value
class TestProxSocketHelper(unittest.TestCase):
def setUp(self):
- self.mock_time_sleep = mock.patch.object(time, 'sleep').start()
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_time_sleep.stop()
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
def test___init__(self, mock_socket):
@@ -306,7 +312,7 @@ class TestProxSocketHelper(unittest.TestCase):
mock_sock = mock.MagicMock()
prox = ProxSocketHelper(mock_sock)
prox.connect('10.20.30.40', 23456)
- self.assertEqual(mock_sock.connect.call_count, 1)
+ mock_sock.connect.assert_called_once()
def test_get_sock(self):
mock_sock = mock.MagicMock()
@@ -554,6 +560,31 @@ class TestProxSocketHelper(unittest.TestCase):
result = prox.core_stats([3, 4, 5], 16)
self.assertEqual(result, expected)
+ def test_multi_port_stats(self):
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5')
+ expected = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5]]
+ result = prox.multi_port_stats([0, 1])
+ self.assertEqual(result, expected)
+
+ prox.get_data = mock.MagicMock(return_value='0,1,2,3,4,5;1,1,2,3,4,5')
+ result = prox.multi_port_stats([0])
+ expected = [0]
+ self.assertEqual(result, expected)
+
+ prox.get_data = mock.MagicMock(return_value='0,1,2,3;1,1,2,3,4,5')
+ result = prox.multi_port_stats([0, 1])
+ expected = [0] * 2
+ self.assertEqual(result, expected)
+
+ prox.get_data = mock.MagicMock(return_value='99,1,2,3,4,5;1,1,2,3,4,5')
+ expected = [0] * 2
+ result = prox.multi_port_stats([0, 1])
+ self.assertEqual(result, expected)
+
+
def test_port_stats(self):
port_stats = [
','.join(str(n) for n in range(3, 15)),
@@ -616,7 +647,7 @@ class TestProxSocketHelper(unittest.TestCase):
mock_socket = mock.MagicMock()
prox = ProxSocketHelper(mock_socket)
prox.dump_rx(3, 5, 8)
- self.assertEqual(mock_socket.sendall.call_count, 1)
+ mock_socket.sendall.assert_called_once()
def test_quit(self):
mock_socket = mock.MagicMock()
@@ -1540,34 +1571,34 @@ class TestProxDataHelper(unittest.TestCase):
vnfd_helper.port_pairs.all_ports = list(range(4))
sut = mock.MagicMock()
- sut.port_stats.return_value = list(range(10))
+ sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5],
+ [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]
data_helper = ProxDataHelper(
vnfd_helper, sut, pkt_size, 25, None,
constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
- self.assertEqual(data_helper.rx_total, 6)
- self.assertEqual(data_helper.tx_total, 7)
+ self.assertEqual(data_helper.rx_total, 4)
+ self.assertEqual(data_helper.tx_total, 8)
self.assertEqual(data_helper.requested_pps, 6.25e6)
def test_samples(self):
vnfd_helper = mock.MagicMock()
- vnfd_helper.port_pairs.all_ports = list(range(4))
- vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)]
+ vnfd_helper.ports_iter.return_value = [('xe0', 0), ('xe1', 1)]
sut = mock.MagicMock()
- sut.port_stats.return_value = list(range(10))
+ sut.multi_port_stats.return_value = [[0, 1, 2, 3, 4, 5], [1, 11, 12, 3, 4, 5]]
data_helper = ProxDataHelper(vnfd_helper, sut, None, None, None, None)
expected = {
- 'xe1': {
- 'in_packets': 6,
- 'out_packets': 7,
+ 'xe0': {
+ 'in_packets': 1,
+ 'out_packets': 2,
},
- 'xe2': {
- 'in_packets': 6,
- 'out_packets': 7,
+ 'xe1': {
+ 'in_packets': 11,
+ 'out_packets': 12,
},
}
result = data_helper.samples
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index fa2d462ab..b23854e9f 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -339,16 +339,17 @@ class TestProxApproxVnf(unittest.TestCase):
mock_ssh(ssh)
resource_helper = mock.MagicMock()
- resource_helper.execute.return_value = list(range(12))
+ resource_helper.execute.return_value = [[0, 1, 2, 3, 4, 5], [1, 1, 2, 3, 4, 5],
+ [2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]
resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}}
prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
prox_approx_vnf.resource_helper = resource_helper
expected = {
- 'packets_in': 6,
- 'packets_dropped': 1,
- 'packets_fwd': 7,
+ 'packets_in': 4,
+ 'packets_dropped': 4,
+ 'packets_fwd': 8,
'collect_stats': {'core': {'result': 234}},
}
result = prox_approx_vnf.collect_kpi()
@@ -436,7 +437,7 @@ class TestProxApproxVnf(unittest.TestCase):
prox_approx_vnf.resource_helper = resource_helper = mock.Mock()
prox_approx_vnf._vnf_up_post()
- self.assertEqual(resource_helper.up_post.call_count, 1)
+ resource_helper.up_post.assert_called_once()
@mock.patch(SSH_HELPER)
def test_vnf_execute_oserror(self, ssh, *args):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index b46ba657c..ce849d174 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
from copy import deepcopy
@@ -19,37 +18,29 @@ import unittest
import mock
import six
-from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-from yardstick.tests import STL_MOCKS
from yardstick.benchmark.contexts.base import Context
from yardstick.common import exceptions as y_exceptions
from yardstick.common import utils
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
-
-
-class MockError(BaseException):
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFDeployHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base
+
+
+class MockError(Exception):
pass
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf import sample_vnf
- from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFDeployHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import ResourceHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
-
-
class TestVnfSshHelper(unittest.TestCase):
VNFD_0 = {
@@ -192,12 +183,12 @@ class TestVnfSshHelper(unittest.TestCase):
self.assertFalse(ssh_helper.is_connected)
cfg_file = ssh_helper.upload_config_file('my/prefix', 'my content')
self.assertTrue(ssh_helper.is_connected)
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
self.assertTrue(cfg_file.startswith('/tmp'))
cfg_file = ssh_helper.upload_config_file('/my/prefix', 'my content')
self.assertTrue(ssh_helper.is_connected)
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
self.assertEqual(cfg_file, '/my/prefix')
def test_join_bin_path(self):
@@ -234,17 +225,17 @@ class TestVnfSshHelper(unittest.TestCase):
self.assertFalse(ssh_helper.is_connected)
ssh_helper.provision_tool()
self.assertTrue(ssh_helper.is_connected)
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
- self.assertEqual(mock_provision_tool.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
+ mock_provision_tool.assert_called_once()
ssh_helper.provision_tool(tool_file='my_tool.sh')
self.assertTrue(ssh_helper.is_connected)
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
self.assertEqual(mock_provision_tool.call_count, 2)
ssh_helper.provision_tool('tool_path', 'my_tool.sh')
self.assertTrue(ssh_helper.is_connected)
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
self.assertEqual(mock_provision_tool.call_count, 3)
@@ -572,6 +563,7 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
scenario_helper.vnf_cfg = {}
+ scenario_helper.options = {}
scenario_helper.all_options = {}
dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
@@ -579,10 +571,11 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
result = dpdk_setup_helper.build_config()
self.assertEqual(result, expected)
self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2)
- self.assertGreaterEqual(mock_find.call_count, 1)
- self.assertGreaterEqual(mock_multi_port_config.generate_config.call_count, 1)
- self.assertGreaterEqual(mock_multi_port_config.generate_script.call_count, 1)
+ mock_find.assert_called()
+ mock_multi_port_config.generate_config.assert_called()
+ mock_multi_port_config.generate_script.assert_called()
+ scenario_helper.options = {'rules': 'fake_file'}
scenario_helper.vnf_cfg = {'file': 'fake_file'}
dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
mock_open_rf.side_effect = mock.mock_open(read_data='fake_data')
@@ -590,12 +583,12 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
result = dpdk_setup_helper.build_config()
- mock_open_rf.assert_called_once()
+ mock_open_rf.assert_called()
self.assertEqual(result, expected)
self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2)
- self.assertGreaterEqual(mock_find.call_count, 1)
- self.assertGreaterEqual(mock_multi_port_config.generate_config.call_count, 1)
- self.assertGreaterEqual(mock_multi_port_config.generate_script.call_count, 1)
+ mock_find.assert_called()
+ mock_multi_port_config.generate_config.assert_called()
+ mock_multi_port_config.generate_script.assert_called()
def test__build_pipeline_kwargs(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
@@ -1001,203 +994,69 @@ class TestClientResourceHelper(unittest.TestCase):
}
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.LOG')
- @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.STLError',
- new_callable=lambda: MockError)
- def test_get_stats_not_connected(self, mock_state_error, *args):
+ @mock.patch.object(sample_vnf, 'STLError', new_callable=lambda: MockError)
+ def test_get_stats_not_connected(self, mock_stl_error, *args):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
- client_resource_helper.client = mock.MagicMock()
- client_resource_helper.client.get_stats.side_effect = mock_state_error
+ client_resource_helper.client = mock.Mock()
+ client_resource_helper.client.get_stats.side_effect = mock_stl_error
self.assertEqual(client_resource_helper.get_stats(), {})
- self.assertEqual(client_resource_helper.client.get_stats.call_count, 1)
-
- def test_generate_samples(self):
- vnfd_helper = VnfdHelper(self.VNFD_0)
- ssh_helper = mock.Mock()
- scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
- client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
- client_resource_helper.client = mock.MagicMock()
- client_resource_helper.client.get_stats.return_value = {
- 0: {
- 'rx_pps': 5.5,
- 'tx_pps': 4.9,
- 'rx_bps': 234.78,
- 'tx_bps': 243.11,
- 'ipackets': 34251,
- 'opackets': 52342,
- },
- 1: {
- 'tx_pps': 5.9,
- 'rx_bps': 434.78,
- 'opackets': 48791,
- },
- }
-
- expected = {
- 'xe0': {
- "rx_throughput_fps": 5.5,
- "tx_throughput_fps": 4.9,
- "rx_throughput_mbps": 234.78,
- "tx_throughput_mbps": 243.11,
- "in_packets": 34251,
- "out_packets": 52342,
- },
- 'xe1': {
- "rx_throughput_fps": 0.0,
- "tx_throughput_fps": 5.9,
- "rx_throughput_mbps": 434.78,
- "tx_throughput_mbps": 0.0,
- "in_packets": 0,
- "out_packets": 48791,
- },
- }
- ports = vnfd_helper.port_nums(vnfd_helper.port_pairs.all_ports)
- result = client_resource_helper.generate_samples(ports)
- self.assertDictEqual(result, expected)
-
- def test_generate_samples_with_key(self):
- vnfd_helper = VnfdHelper(self.VNFD_0)
- ssh_helper = mock.Mock()
- scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
- client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
- client_resource_helper.client = mock.MagicMock()
- client_resource_helper.client.get_stats.return_value = {
- 'key_name': 'key_value',
- 0: {
- 'rx_pps': 5.5,
- 'tx_pps': 4.9,
- 'rx_bps': 234.78,
- 'tx_bps': 243.11,
- 'ipackets': 34251,
- 'opackets': 52342,
- },
- 1: {
- 'tx_pps': 5.9,
- 'rx_bps': 434.78,
- 'opackets': 48791,
- },
- }
-
- expected = {
- 'xe0': {
- 'key_name': 'key_value',
- "rx_throughput_fps": 5.5,
- "tx_throughput_fps": 4.9,
- "rx_throughput_mbps": 234.78,
- "tx_throughput_mbps": 243.11,
- "in_packets": 34251,
- "out_packets": 52342,
- },
- 'xe1': {
- 'key_name': 'key_value',
- "rx_throughput_fps": 0.0,
- "tx_throughput_fps": 5.9,
- "rx_throughput_mbps": 434.78,
- "tx_throughput_mbps": 0.0,
- "in_packets": 0,
- "out_packets": 48791,
- },
- }
- ports = vnfd_helper.port_nums(vnfd_helper.port_pairs.all_ports)
- result = client_resource_helper.generate_samples(ports, 'key_name')
- self.assertDictEqual(result, expected)
-
- def test_generate_samples_with_key_and_default(self):
- vnfd_helper = VnfdHelper(self.VNFD_0)
- ssh_helper = mock.Mock()
- scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
- client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
- client_resource_helper.client = mock.MagicMock()
- client_resource_helper.client.get_stats.return_value = {
- 0: {
- 'rx_pps': 5.5,
- 'tx_pps': 4.9,
- 'rx_bps': 234.78,
- 'tx_bps': 243.11,
- 'ipackets': 34251,
- 'opackets': 52342,
- },
- 1: {
- 'tx_pps': 5.9,
- 'rx_bps': 434.78,
- 'opackets': 48791,
- },
- }
-
- expected = {
- 'xe0': {
- 'key_name': 'default',
- "rx_throughput_fps": 5.5,
- "tx_throughput_fps": 4.9,
- "rx_throughput_mbps": 234.78,
- "tx_throughput_mbps": 243.11,
- "in_packets": 34251,
- "out_packets": 52342,
- },
- 'xe1': {
- 'key_name': 'default',
- "rx_throughput_fps": 0.0,
- "tx_throughput_fps": 5.9,
- "rx_throughput_mbps": 434.78,
- "tx_throughput_mbps": 0.0,
- "in_packets": 0,
- "out_packets": 48791,
- },
- }
- ports = vnfd_helper.port_nums(vnfd_helper.port_pairs.all_ports)
- result = client_resource_helper.generate_samples(ports, 'key_name', 'default')
- self.assertDictEqual(result, expected)
+ client_resource_helper.client.get_stats.assert_called_once()
def test_clear_stats(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
client_resource_helper.client = mock.Mock()
self.assertIsNone(client_resource_helper.clear_stats())
- self.assertEqual(client_resource_helper.client.clear_stats.call_count, 1)
+ self.assertEqual(
+ client_resource_helper.client.clear_stats.call_count, 1)
def test_clear_stats_of_ports(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
client_resource_helper.client = mock.Mock()
self.assertIsNone(client_resource_helper.clear_stats([3, 4]))
- self.assertEqual(client_resource_helper.client.clear_stats.call_count, 1)
+ self.assertEqual(
+ client_resource_helper.client.clear_stats.call_count, 1)
def test_start(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
client_resource_helper.client = mock.Mock()
self.assertIsNone(client_resource_helper.start())
- self.assertEqual(client_resource_helper.client.start.call_count, 1)
+ client_resource_helper.client.start.assert_called_once()
def test_start_ports(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
client_resource_helper.client = mock.Mock()
self.assertIsNone(client_resource_helper.start([3, 4]))
- self.assertEqual(client_resource_helper.client.start.call_count, 1)
+ client_resource_helper.client.start.assert_called_once()
def test_collect_kpi_with_queue(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
@@ -1219,17 +1078,15 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertDictEqual(result, expected)
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
- @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.LOG')
- @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.STLError',
- new_callable=lambda: MockError)
- def test__connect_with_failures(self, mock_error, *args):
+ @mock.patch.object(sample_vnf, 'STLError')
+ def test__connect_with_failures(self, mock_stl_error, *args):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
client = mock.MagicMock()
- client.connect.side_effect = mock_error
+ client.connect.side_effect = mock_stl_error(msg='msg')
self.assertIs(client_resource_helper._connect(client), client)
@@ -1405,7 +1262,7 @@ class TestSampleVNFDeployHelper(unittest.TestCase):
self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
sample_vnf_deploy_helper.DISABLE_DEPLOY = True
self.assertEqual(ssh_helper.execute.call_count, 5)
- self.assertEqual(ssh_helper.put.call_count, 1)
+ ssh_helper.put.assert_called_once()
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
@mock.patch('subprocess.check_output')
@@ -1420,7 +1277,7 @@ class TestSampleVNFDeployHelper(unittest.TestCase):
self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
self.assertEqual(ssh_helper.execute.call_count, 5)
- self.assertEqual(ssh_helper.put.call_count, 1)
+ ssh_helper.put.assert_called_once()
@mock.patch('subprocess.check_output')
def test_deploy_vnfs_early_success(self, *args):
@@ -1433,8 +1290,8 @@ class TestSampleVNFDeployHelper(unittest.TestCase):
sample_vnf_deploy_helper.DISABLE_DEPLOY = False
self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
- self.assertEqual(ssh_helper.execute.call_count, 1)
- self.assertEqual(ssh_helper.put.call_count, 0)
+ ssh_helper.execute.assert_called_once()
+ ssh_helper.put.assert_not_called()
class TestScenarioHelper(unittest.TestCase):
@@ -1678,7 +1535,7 @@ class TestSampleVnf(unittest.TestCase):
@mock.patch("yardstick.ssh.SSH")
def test_instantiate(self, ssh):
- mock_ssh(ssh)
+ test_base.mock_ssh(ssh)
nodes = {
'vnf1': 'name1',
@@ -1778,7 +1635,7 @@ class TestSampleVnf(unittest.TestCase):
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@mock.patch("yardstick.ssh.SSH")
def test_wait_for_instantiate_empty_queue(self, ssh, *args):
- mock_ssh(ssh, exec_result=(1, "", ""))
+ test_base.mock_ssh(ssh, exec_result=(1, "", ""))
queue_size_list = [
0,
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
index 5bebbbf94..59594a3c3 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
@@ -16,12 +16,12 @@
import subprocess
import mock
-import unittest
import six
+import unittest
-from yardstick.tests import STL_MOCKS
from yardstick import ssh
from yardstick.common import utils
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index 956c192aa..dca8098fa 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,8 +18,9 @@ import mock
import six
import unittest
-from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia
+from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia
TEST_FILE_YAML = 'nsb_test_case.yaml'
@@ -30,8 +31,7 @@ NAME = "tg__1"
class TestIxiaResourceHelper(unittest.TestCase):
def setUp(self):
- self._mock_IxNextgen = mock.patch.object(tg_rfc2544_ixia,
- 'IxNextgen')
+ self._mock_IxNextgen = mock.patch.object(ixnet_api, 'IxNextgen')
self.mock_IxNextgen = self._mock_IxNextgen.start()
self.addCleanup(self._stop_mocks)
@@ -48,12 +48,10 @@ class TestIxiaResourceHelper(unittest.TestCase):
def test_stop_collect_with_client(self):
mock_client = mock.Mock()
-
ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
-
ixia_resource_helper.client = mock_client
ixia_resource_helper.stop_collect()
- self.assertEqual(mock_client.ix_stop_traffic.call_count, 1)
+ self.assertEqual(1, ixia_resource_helper._terminated.value)
def test_run_traffic(self):
mock_tprofile = mock.Mock()
@@ -70,8 +68,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
self.assertEqual('fake_samples', ixia_rhelper._queue.get())
-@mock.patch(
- "yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.IxNextgen")
+@mock.patch.object(tg_rfc2544_ixia, 'ixnet_api')
class TestIXIATrafficGen(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 8b1b8a39c..9531b90c4 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -11,44 +11,37 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-import unittest
import mock
+import unittest
-from yardstick.tests import STL_MOCKS
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
-
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.tg_rfc2544_trex import TrexTrafficGenRFC, \
- TrexRfcResourceHelper
- from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex
- from yardstick.network_services.traffic_profile.base import TrafficProfile
- from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base \
- import FileAbsPath, mock_ssh
-
-MODULE_PATH = FileAbsPath(__file__)
-get_file_abspath = MODULE_PATH.get_path
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex
class TestTrexRfcResouceHelper(unittest.TestCase):
- @mock.patch('yardstick.network_services.helpers.samplevnf_helper.MultiPortConfig')
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_rfc2544_trex.time")
- @mock.patch(SSH_HELPER)
- def test__run_traffic_once(self, ssh, *_):
- mock_ssh(ssh)
+ def test__run_traffic_once(self):
+ mock_setup_helper = mock.Mock()
+ mock_traffic_profile = mock.Mock()
+ mock_traffic_profile.config.duration = 3
+ mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
+ 'port_pg_id_map')
+ mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
+ rfc_rh.TRANSIENT_PERIOD = 0
+ rfc_rh.rfc2544_helper = mock.Mock()
- mock_traffic_profile = mock.MagicMock(autospec=TrafficProfile,
- **{'get_drop_percentage.return_value': {}})
- sut = TrexRfcResourceHelper(mock.MagicMock(), mock.MagicMock())
- sut.client = mock.MagicMock()
- sut._run_traffic_once(mock_traffic_profile)
+ with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
+ rfc_rh._run_traffic_once(mock_traffic_profile)
+
+ mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
+ mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)
+ mock_traffic_profile.stop_traffic.assert_called_once()
+ mock_get_samples.assert_has_calls([
+ mock.call('fake_ports', port_pg_id='port_pg_id_map'),
+ mock.call('fake_ports', port_pg_id='port_pg_id_map')])
class TestTrexTrafficGenRFC(unittest.TestCase):
@@ -219,33 +212,24 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
'schema': 'yardstick:task:0.1',
}
- @mock.patch(SSH_HELPER)
- def test___init__(self, ssh):
- mock_ssh(ssh)
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value)
+ def setUp(self):
+ self._mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper')
+ self.mock_ssh_helper = self._mock_ssh_helper.start()
+ self.addCleanup(self._stop_mocks)
- @mock.patch(SSH_HELPER)
- def test_collect_kpi(self, ssh):
- mock_ssh(ssh)
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- self.assertEqual(trex_traffic_gen.collect_kpi(), {})
+ def _stop_mocks(self):
+ self._mock_ssh_helper.stop()
- @mock.patch(SSH_HELPER)
- def test_listen_traffic(self, ssh):
- mock_ssh(ssh)
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- self.assertIsNone(trex_traffic_gen.listen_traffic({}))
-
- @mock.patch(SSH_HELPER)
- def test_instantiate(self, ssh):
- mock_ssh(ssh)
+ def test___init__(self):
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value)
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ def test_instantiate(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen.resource_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
@@ -274,15 +258,12 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
scenario_cfg.update({"nodes": ["tg_1", "vnf_1"]})
self.assertIsNone(trex_traffic_gen.instantiate(scenario_cfg, {}))
- @mock.patch(SSH_HELPER)
- def test_instantiate_error(self, ssh):
- mock_ssh(ssh, exec_result=(1, "", ""))
-
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ def test_instantiate_error(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
trex_traffic_gen.resource_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
scenario_cfg = {
@@ -310,29 +291,3 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
},
}
trex_traffic_gen.instantiate(scenario_cfg, {})
-
- @mock.patch(SSH_HELPER)
- def test__start_server(self, ssh):
- mock_ssh(ssh)
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- trex_traffic_gen.resource_helper = mock.MagicMock()
- self.assertIsNone(trex_traffic_gen._start_server())
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_rfc2544_trex.time")
- @mock.patch(SSH_HELPER)
- def test__generate_trex_cfg(self, ssh, _):
- mock_ssh(ssh)
-
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- trex_traffic_gen.ssh_helper = mock.MagicMock()
- trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
- self.assertIsNone(trex_traffic_gen.resource_helper.generate_cfg())
-
- def test_terminate(self):
- with mock.patch(SSH_HELPER) as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
- ssh.from_node.return_value = ssh_mock
- trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- trex_traffic_gen.resource_helper = mock.MagicMock()
- self.assertIsNone(trex_traffic_gen.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
index aae3d468f..4f8742477 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
@@ -11,31 +11,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import copy
import mock
import unittest
-from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-from yardstick.tests import STL_MOCKS
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import rfc2544
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_trex
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
NAME = 'vnf_1'
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.tg_trex import \
- TrexTrafficGen, TrexResourceHelper
- from yardstick.network_services.traffic_profile.base import TrafficProfile
-
class TestTrexTrafficGen(unittest.TestCase):
+
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
[{'short-name': 'VpeVnf',
@@ -168,7 +160,7 @@ class TestTrexTrafficGen(unittest.TestCase):
"interfaces": {
"xe0": {
"local_iface_name": "ens786f0",
- "vld_id": TrafficProfile.UPLINK,
+ "vld_id": tp_base.TrafficProfile.UPLINK,
"netmask": "255.255.255.0",
"vpci": "0000:05:00.0",
"local_ip": "152.16.100.19",
@@ -180,7 +172,7 @@ class TestTrexTrafficGen(unittest.TestCase):
},
"xe1": {
"local_iface_name": "ens786f1",
- "vld_id": TrafficProfile.DOWNLINK,
+ "vld_id": tp_base.TrafficProfile.DOWNLINK,
"netmask": "255.255.255.0",
"vpci": "0000:05:00.1",
"local_ip": "152.16.40.19",
@@ -236,7 +228,7 @@ class TestTrexTrafficGen(unittest.TestCase):
"interfaces": {
"xe0": {
"local_iface_name": "ens513f0",
- "vld_id": TrafficProfile.DOWNLINK,
+ "vld_id": tp_base.TrafficProfile.DOWNLINK,
"netmask": "255.255.255.0",
"vpci": "0000:02:00.0",
"local_ip": "152.16.40.20",
@@ -270,7 +262,7 @@ class TestTrexTrafficGen(unittest.TestCase):
"interfaces": {
"xe0": {
"local_iface_name": "ens785f0",
- "vld_id": TrafficProfile.UPLINK,
+ "vld_id": tp_base.TrafficProfile.UPLINK,
"netmask": "255.255.255.0",
"vpci": "0000:05:00.0",
"local_ip": "152.16.100.20",
@@ -297,36 +289,35 @@ class TestTrexTrafficGen(unittest.TestCase):
}
}
- @mock.patch(SSH_HELPER)
- def test___init__(self, ssh):
- mock_ssh(ssh)
+ def setUp(self):
+ self._mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper')
+ self.mock_ssh_helper = self._mock_ssh_helper.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_ssh_helper.stop()
+
+ def test___init__(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
- self.assertIsInstance(
- trex_traffic_gen.resource_helper, TrexResourceHelper)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.assertIsInstance(trex_traffic_gen.resource_helper,
+ tg_trex.TrexResourceHelper)
- @mock.patch(SSH_HELPER)
- def test_collect_kpi(self, ssh):
- mock_ssh(ssh)
+ def test_collect_kpi(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.resource_helper._queue.put({})
result = trex_traffic_gen.collect_kpi()
self.assertEqual({}, result)
- @mock.patch(SSH_HELPER)
- def test_listen_traffic(self, ssh):
- mock_ssh(ssh)
+ def test_listen_traffic(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
self.assertIsNone(trex_traffic_gen.listen_traffic({}))
- @mock.patch(SSH_HELPER)
- def test_instantiate(self, ssh):
- mock_ssh(ssh)
-
+ def test_instantiate(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen._tg_process = mock.MagicMock()
trex_traffic_gen._tg_process.start = mock.Mock()
@@ -335,16 +326,12 @@ class TestTrexTrafficGen(unittest.TestCase):
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
+ self.assertIsNone(trex_traffic_gen.instantiate(self.SCENARIO_CFG,
+ self.CONTEXT_CFG))
- self.assertIsNone(trex_traffic_gen.instantiate(
- self.SCENARIO_CFG, self.CONTEXT_CFG))
-
- @mock.patch(SSH_HELPER)
- def test_instantiate_error(self, ssh):
- mock_ssh(ssh, exec_result=(1, "", ""))
-
+ def test_instantiate_error(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen._tg_process = mock.MagicMock()
trex_traffic_gen._tg_process.start = mock.Mock()
@@ -352,62 +339,53 @@ class TestTrexTrafficGen(unittest.TestCase):
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
- self.assertIsNone(trex_traffic_gen.instantiate(
- self.SCENARIO_CFG, self.CONTEXT_CFG))
+ self.assertIsNone(trex_traffic_gen.instantiate(self.SCENARIO_CFG,
+ self.CONTEXT_CFG))
- @mock.patch(SSH_HELPER)
- def test__start_server(self, ssh):
- mock_ssh(ssh)
+ def test__start_server(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.scenario_helper.scenario_cfg = {}
self.assertIsNone(trex_traffic_gen._start_server())
- @mock.patch(SSH_HELPER)
- def test__start_server_multiple_queues(self, ssh):
- mock_ssh(ssh)
+ def test__start_server_multiple_queues(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.scenario_helper.scenario_cfg = {
"options": {NAME: {"queues_per_port": 2}}}
self.assertIsNone(trex_traffic_gen._start_server())
- @mock.patch(SSH_HELPER)
- def test__traffic_runner(self, ssh):
- mock_ssh(ssh)
-
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ def test__traffic_runner(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.execute_traffic.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- self.sut = TrexTrafficGen(NAME, vnfd)
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
self.sut.ssh_helper = mock.Mock()
self.sut.ssh_helper.run = mock.Mock()
- self.sut._connect_client = mock.Mock(autospec=STLClient)
+ self.sut._connect_client = mock.Mock()
self.sut._connect_client.get_stats = mock.Mock(return_value="0")
self.sut.resource_helper.RUN_DURATION = 0
self.sut.resource_helper.QUEUE_WAIT_TIME = 0
- # must generate cfg before we can run traffic so Trex port mapping is created
+ # must generate cfg before we can run traffic so Trex port mapping is
+ # created
self.sut.resource_helper.generate_cfg()
- self.sut._traffic_runner(mock_traffic_profile)
+ with mock.patch.object(self.sut.resource_helper, 'run_traffic'):
+ self.sut._traffic_runner(mock_traffic_profile)
- @mock.patch(SSH_HELPER)
- def test__generate_trex_cfg(self, ssh):
- mock_ssh(ssh)
+ def test__generate_trex_cfg(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
self.assertIsNone(trex_traffic_gen.resource_helper.generate_cfg())
- @mock.patch(SSH_HELPER)
- def test_build_ports_reversed_pci_ordering(self, ssh):
- mock_ssh(ssh)
+ def test_build_ports_reversed_pci_ordering(self):
vnfd = copy.deepcopy(self.VNFD['vnfd:vnfd-catalog']['vnfd'][0])
vnfd['vdu'][0]['external-interface'] = [
{'virtual-interface':
@@ -442,26 +420,24 @@ class TestTrexTrafficGen(unittest.TestCase):
'local_mac': '00:00:00:00:00:01'},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'}]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.generate_cfg()
trex_traffic_gen.resource_helper._build_ports()
- self.assertEqual(
- sorted(trex_traffic_gen.resource_helper.all_ports), [0, 1])
+ self.assertEqual(sorted(trex_traffic_gen.resource_helper.all_ports),
+ [0, 1])
# there is a gap in ordering
- self.assertEqual(dict(trex_traffic_gen.resource_helper.dpdk_to_trex_port_map),
- {0: 0, 2: 1})
-
- @mock.patch(SSH_HELPER)
- def test_run_traffic(self, ssh):
- mock_ssh(ssh)
+ self.assertEqual(
+ {0: 0, 2: 1},
+ dict(trex_traffic_gen.resource_helper.dpdk_to_trex_port_map))
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ def test_run_traffic(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- self.sut = TrexTrafficGen(NAME, vnfd)
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
self.sut.ssh_helper = mock.Mock()
self.sut.ssh_helper.run = mock.Mock()
self.sut._traffic_runner = mock.Mock(return_value=0)
@@ -470,20 +446,60 @@ class TestTrexTrafficGen(unittest.TestCase):
self.sut._traffic_process.terminate()
self.assertIsNotNone(result)
- @mock.patch(SSH_HELPER)
- def test_terminate(self, ssh):
- mock_ssh(ssh)
+ def test_terminate(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
self.assertIsNone(trex_traffic_gen.terminate())
- @mock.patch(SSH_HELPER)
- def test__connect_client(self, ssh):
- mock_ssh(ssh)
+ def test__connect_client(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = TrexTrafficGen(NAME, vnfd)
- client = mock.Mock(autospec=STLClient)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ client = mock.Mock()
client.connect = mock.Mock(return_value=0)
self.assertIsNotNone(trex_traffic_gen.resource_helper._connect(client))
+
+
+class TrexResourceHelperTestCase(unittest.TestCase):
+
+ def test__get_samples(self):
+ mock_setup_helper = mock.Mock()
+ trex_rh = tg_trex.TrexResourceHelper(mock_setup_helper)
+ trex_rh.vnfd_helper.interfaces = [
+ {'name': 'interface1'},
+ {'name': 'interface2'}]
+ stats = {
+ 10: {'rx_pps': 5, 'ipackets': 200},
+ 20: {'rx_pps': 10, 'ipackets': 300},
+ 'latency': {1: {'latency': 'latency_port_10_pg_id_1'},
+ 2: {'latency': 'latency_port_10_pg_id_2'},
+ 3: {'latency': 'latency_port_20_pg_id_3'},
+ 4: {'latency': 'latency_port_20_pg_id_4'}}
+ }
+ port_pg_id = rfc2544.PortPgIDMap()
+ port_pg_id.add_port(10)
+ port_pg_id.increase_pg_id()
+ port_pg_id.increase_pg_id()
+ port_pg_id.add_port(20)
+ port_pg_id.increase_pg_id()
+ port_pg_id.increase_pg_id()
+
+ with mock.patch.object(trex_rh, 'get_stats') as mock_get_stats, \
+ mock.patch.object(trex_rh.vnfd_helper, 'port_num') as \
+ mock_port_num:
+ mock_get_stats.return_value = stats
+ mock_port_num.side_effect = [10, 20]
+ output = trex_rh._get_samples([10, 20], port_pg_id=port_pg_id)
+
+ interface = output['interface1']
+ self.assertEqual(5.0, interface['rx_throughput_fps'])
+ self.assertEqual(200, interface['in_packets'])
+ self.assertEqual('latency_port_10_pg_id_1', interface['latency'][1])
+ self.assertEqual('latency_port_10_pg_id_2', interface['latency'][2])
+
+ interface = output['interface2']
+ self.assertEqual(10.0, interface['rx_throughput_fps'])
+ self.assertEqual(300, interface['in_packets'])
+ self.assertEqual('latency_port_20_pg_id_3', interface['latency'][3])
+ self.assertEqual('latency_port_20_pg_id_4', interface['latency'][4])
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
index ffb5cd6f0..a0a0794ea 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
@@ -334,7 +334,6 @@ pipeline>
vfw_approx_vnf.ssh_helper.run.assert_called_once()
@mock.patch.object(utils, 'find_relative_file')
- @mock.patch("yardstick.network_services.vnf_generic.vnf.vfw_vnf.YangModel")
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
@mock.patch(SSH_HELPER)
def test_instantiate(self, ssh, *args):
@@ -363,12 +362,15 @@ class TestFWApproxSetupEnvHelper(unittest.TestCase):
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.options = {}
scenario_helper.all_options = {}
vfw_approx_setup_helper = FWApproxSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ vfw_approx_setup_helper.get_flows_config = mock.Mock()
vfw_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
vfw_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
vfw_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
expected = 'sudo tool_path -p 0x3 -f /tmp/vfw_config -s /tmp/vfw_script --hwlb 3'
self.assertEqual(vfw_approx_setup_helper.build_config(), expected)
+ vfw_approx_setup_helper.get_flows_config.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index 4fd51f0e3..73f91d1b1 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -536,7 +536,12 @@ class TestVpeApproxVnf(unittest.TestCase):
}
def setUp(self):
- self.mock_sleep = mock.patch.object(time, 'sleep').start()
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_time_sleep.stop()
def test___init__(self):
vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
diff --git a/yardstick/tests/unit/test_cmd/commands/test_env.py b/yardstick/tests/unit/test_cmd/commands/test_env.py
index 57dacbcd3..5d3520986 100644
--- a/yardstick/tests/unit/test_cmd/commands/test_env.py
+++ b/yardstick/tests/unit/test_cmd/commands/test_env.py
@@ -6,60 +6,64 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-import unittest
+
+import os
+import sys
+
import mock
import uuid
-from yardstick.cmd.commands.env import EnvCommand
+from yardstick.cmd.commands import env
+from yardstick.tests.unit import base
-class EnvCommandTestCase(unittest.TestCase):
+class EnvCommandTestCase(base.BaseUnitTestCase):
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ @mock.patch.object(env.EnvCommand, '_start_async_task')
+ @mock.patch.object(env.EnvCommand, '_check_status')
def test_do_influxdb(self, check_status_mock, start_async_task_mock):
- env = EnvCommand()
- env.do_influxdb({})
+ _env = env.EnvCommand()
+ _env.do_influxdb({})
start_async_task_mock.assert_called_once()
check_status_mock.assert_called_once()
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ @mock.patch.object(env.EnvCommand, '_start_async_task')
+ @mock.patch.object(env.EnvCommand, '_check_status')
def test_do_grafana(self, check_status_mock, start_async_task_mock):
- env = EnvCommand()
- env.do_grafana({})
+ _env = env.EnvCommand()
+ _env.do_grafana({})
start_async_task_mock.assert_called_once()
check_status_mock.assert_called_once()
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._start_async_task')
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._check_status')
+ @mock.patch.object(env.EnvCommand, '_start_async_task')
+ @mock.patch.object(env.EnvCommand, '_check_status')
def test_do_prepare(self, check_status_mock, start_async_task_mock):
- env = EnvCommand()
- env.do_prepare({})
+ _env = env.EnvCommand()
+ _env.do_prepare({})
start_async_task_mock.assert_called_once()
check_status_mock.assert_called_once()
- @mock.patch('yardstick.cmd.commands.env.HttpClient.post')
+ @mock.patch.object(env.HttpClient, 'post')
def test_start_async_task(self, post_mock):
data = {'action': 'create_grafana'}
- EnvCommand()._start_async_task(data)
+ env.EnvCommand()._start_async_task(data)
post_mock.assert_called_once()
- @mock.patch('yardstick.cmd.commands.env.HttpClient.get')
- @mock.patch('yardstick.cmd.commands.env.EnvCommand._print_status')
- def test_check_status(self, print_mock, get_mock):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception must be removed. The mocked
- # command call must be tested.
+ @mock.patch.object(env.HttpClient, 'get')
+ @mock.patch.object(env.EnvCommand, '_print_status')
+ def test_check_status(self, mock_print, mock_get):
task_id = str(uuid.uuid4())
- get_mock.return_value = {'status': 2, 'result': 'error'}
- status = EnvCommand()._check_status(task_id, 'hello world')
- self.assertEqual(status, 2)
+ mock_get.return_value = {'status': 2, 'result': 'error'}
+ self.assertEqual(
+ 2, env.EnvCommand()._check_status(task_id, 'hello world'))
+ self.assertEqual(2, mock_print.call_count)
- def test_print_status(self):
- try:
- EnvCommand()._print_status('hello', 'word')
- except Exception as e: # pylint: disable=broad-except
- # NOTE(ralonsoh): try to reduce the scope of this exception.
- self.assertIsInstance(e, IndexError)
+ @mock.patch.object(sys, 'stdout')
+ @mock.patch.object(os, 'popen')
+ def test_print_status(self, mock_popen, mock_stdout):
+ mock_popen_obj = mock.Mock()
+ mock_popen_obj.read.return_value = ''
+ mock_popen.return_value = mock_popen_obj
+ env.EnvCommand()._print_status('hello', 'word')
+ mock_stdout.write.assert_not_called()
+ mock_stdout.flush.assert_not_called()
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index 080d27837..b727e821d 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -193,10 +193,10 @@ class SSHTestCase(unittest.TestCase):
with self.assertRaises(exceptions.SSHError) as raised:
test_ssh._get_client()
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
- self.assertEqual(mock_paramiko.AutoAddPolicy.call_count, 1)
- self.assertEqual(fake_client.set_missing_host_key_policy.call_count, 1)
- self.assertEqual(fake_client.connect.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
+ mock_paramiko.AutoAddPolicy.assert_called_once()
+ fake_client.set_missing_host_key_policy.assert_called_once()
+ fake_client.connect.assert_called_once()
exc_str = str(raised.exception)
self.assertIn('raised during connect', exc_str)
self.assertIn('MyError', exc_str)
@@ -238,6 +238,25 @@ class SSHTestCase(unittest.TestCase):
self.assertEqual("stdout fake data", stdout)
self.assertEqual("stderr fake data", stderr)
+ @mock.patch("yardstick.ssh.six.moves.StringIO")
+ def test_execute_raise_on_error_passed(self, mock_string_io):
+ mock_string_io.side_effect = stdio = [mock.Mock(), mock.Mock()]
+ stdio[0].read.return_value = "stdout fake data"
+ stdio[1].read.return_value = "stderr fake data"
+ with mock.patch.object(self.test_client, "run", return_value=0) \
+ as mock_run:
+ status, stdout, stderr = self.test_client.execute(
+ "cmd",
+ stdin="fake_stdin",
+ timeout=43,
+ raise_on_error=True)
+ mock_run.assert_called_once_with(
+ "cmd", stdin="fake_stdin", stdout=stdio[0],
+ stderr=stdio[1], timeout=43, raise_on_error=True)
+ self.assertEqual(0, status)
+ self.assertEqual("stdout fake data", stdout)
+ self.assertEqual("stderr fake data", stderr)
+
@mock.patch("yardstick.ssh.time")
def test_wait_timeout(self, mock_time):
mock_time.time.side_effect = [1, 50, 150]
@@ -510,7 +529,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh._get_client = mock__get_client = mock.Mock()
auto_connect_ssh._connect()
- self.assertEqual(mock__get_client.call_count, 1)
+ mock__get_client.assert_called_once()
def test___init___negative(self):
with self.assertRaises(TypeError):
@@ -543,7 +562,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.get_file_obj('remote/path', mock.Mock())
- self.assertEqual(mock_sftp.getfo.call_count, 1)
+ mock_sftp.getfo.assert_called_once()
def test__make_dict(self):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')
@@ -580,7 +599,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put('a', 'z')
with mock_scp_client_type() as mock_scp_client:
- self.assertEqual(mock_scp_client.put.call_count, 1)
+ mock_scp_client.put.assert_called_once()
@mock.patch('yardstick.ssh.SCPClient')
def test_get(self, mock_scp_client_type):
@@ -589,7 +608,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.get('a', 'z')
with mock_scp_client_type() as mock_scp_client:
- self.assertEqual(mock_scp_client.get.call_count, 1)
+ mock_scp_client.get.assert_called_once()
def test_put_file(self):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')
@@ -597,4 +616,4 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh._put_file_sftp = mock_put_sftp = mock.Mock()
auto_connect_ssh.put_file('a', 'b')
- self.assertEqual(mock_put_sftp.call_count, 1)
+ mock_put_sftp.assert_called_once()