aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO1
-rw-r--r--INFO.yaml4
-rw-r--r--ansible/install.yaml2
-rw-r--r--ansible/install_dependencies.yml19
-rw-r--r--ansible/multi_port_baremetal_ixia_correlated_test.yaml1
-rw-r--r--ansible/multi_port_baremetal_ixia_test.yaml1
-rw-r--r--ansible/nsb_setup.yml4
-rw-r--r--ansible/roles/add_repos_jumphost/tasks/Debian.yml3
-rw-r--r--ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml3
-rw-r--r--ansible/standalone_ovs_scale_out_ixia_test.yaml1
-rw-r--r--ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml3
-rw-r--r--ansible/standalone_sriov_scale_out_ixia_test.yaml1
-rw-r--r--docker/Dockerfile17
-rw-r--r--docker/Dockerfile.aarch64.patch33
-rw-r--r--docs/release/results/euphrates_fraser_comparsion.rst8
-rw-r--r--docs/release/results/images/tc002_pod.pngbin0 -> 39106 bytes
-rw-r--r--docs/release/results/images/tc002_scenario.pngbin0 -> 44920 bytes
-rw-r--r--docs/release/results/images/tc010_pod.pngbin0 -> 44349 bytes
-rw-r--r--docs/release/results/images/tc010_scenario.pngbin0 -> 51251 bytes
-rw-r--r--docs/release/results/images/tc011_pod.pngbin0 -> 43308 bytes
-rw-r--r--docs/release/results/images/tc011_scenario.pngbin0 -> 43647 bytes
-rw-r--r--docs/release/results/images/tc012_pod.pngbin0 -> 47996 bytes
-rw-r--r--docs/release/results/images/tc012_scenario.pngbin0 -> 51405 bytes
-rw-r--r--docs/release/results/images/tc014_pod.pngbin0 -> 36462 bytes
-rw-r--r--docs/release/results/images/tc014_scenario.pngbin0 -> 42056 bytes
-rw-r--r--docs/release/results/images/tc069_pod.pngbin0 -> 41823 bytes
-rw-r--r--docs/release/results/images/tc069_scenario.pngbin0 -> 46728 bytes
-rw-r--r--docs/release/results/images/tc082_pod.pngbin0 -> 28096 bytes
-rw-r--r--docs/release/results/images/tc082_scenario.pngbin0 -> 16082 bytes
-rw-r--r--docs/release/results/images/tc083_pod.pngbin0 -> 29533 bytes
-rw-r--r--docs/release/results/images/tc083_scenario.pngbin0 -> 16481 bytes
-rw-r--r--docs/release/results/index.rst1
-rw-r--r--docs/release/results/os-nosdn-kvm-ha.rst270
-rw-r--r--docs/release/results/os-nosdn-nofeature-ha.rst492
-rw-r--r--docs/release/results/os-nosdn-nofeature-noha.rst259
-rw-r--r--docs/release/results/os-odl_l2-bgpvpn-ha.rst53
-rw-r--r--docs/release/results/os-odl_l2-nofeature-ha.rst743
-rw-r--r--docs/release/results/os-odl_l2-sfc-ha.rst231
-rw-r--r--docs/release/results/os-onos-nofeature-ha.rst257
-rw-r--r--docs/release/results/os-onos-sfc-ha.rst517
-rw-r--r--docs/release/results/overview.rst76
-rw-r--r--docs/release/results/results.rst32
-rw-r--r--docs/release/results/tc002-network-latency.rst317
-rw-r--r--docs/release/results/tc010-memory-read-latency.rst299
-rw-r--r--docs/release/results/tc011-packet-delay-variation.rst262
-rw-r--r--docs/release/results/tc012-memory-read-write-bandwidth.rst299
-rw-r--r--docs/release/results/tc014-cpu-processing-speed.rst298
-rw-r--r--docs/release/results/tc069-memory-write-bandwidth.rst300
-rw-r--r--docs/release/results/tc082-context-switches-under-load.rst129
-rw-r--r--docs/release/results/tc083-network-throughput-between-vm.rst129
-rwxr-xr-xdocs/testing/developer/devguide/devguide.rst3
-rw-r--r--docs/testing/user/userguide/13-nsb-installation.rst85
-rw-r--r--docs/testing/user/userguide/code/pod_ixia.yaml31
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc088.rst129
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc089.rst129
-rw-r--r--etc/yardstick/nodes/apex_baremetal/pod.yaml46
-rw-r--r--etc/yardstick/nodes/apex_virtual/pod.yaml40
-rw-r--r--etc/yardstick/nodes/pod.yaml.nsb.sample.ixia1
-rw-r--r--etc/yardstick/nodes/standalone/ixia_correlated_template.yaml3
-rw-r--r--etc/yardstick/nodes/standalone/ixia_template.yaml1
-rwxr-xr-xnsb_setup.sh8
-rw-r--r--requirements.txt1
-rw-r--r--samples/test_suite.yaml3
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg6
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg13
-rw-r--r--samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml1
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_ixload.yaml1
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml1
-rwxr-xr-xtests/ci/prepare_env.sh67
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc088.yaml80
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc089.yaml80
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml276
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml313
-rw-r--r--tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml15
-rw-r--r--tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml8
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_base.py8
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py104
-rw-r--r--yardstick/benchmark/contexts/base.py7
-rw-r--r--yardstick/benchmark/contexts/heat.py6
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py15
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py22
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash13
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py12
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py10
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py8
-rw-r--r--yardstick/benchmark/scenarios/lib/attach_volume.py35
-rw-r--r--yardstick/benchmark/scenarios/lib/create_volume.py49
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_volume.py30
-rw-r--r--yardstick/benchmark/scenarios/lib/detach_volume.py33
-rw-r--r--yardstick/benchmark/scenarios/lib/get_flavor.py37
-rw-r--r--yardstick/benchmark/scenarios/lib/get_server.py84
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py2
-rw-r--r--yardstick/common/constants.py3
-rw-r--r--yardstick/common/exceptions.py28
-rw-r--r--yardstick/common/openstack_utils.py214
-rw-r--r--yardstick/network_services/collector/subscriber.py40
-rw-r--r--yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py22
-rw-r--r--yardstick/network_services/nfvi/resource.py61
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py1
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py32
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py113
-rw-r--r--yardstick/orchestrator/heat.py17
-rw-r--r--yardstick/tests/integration/dummy-scenario-heat-context.yaml7
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_base.py7
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py120
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py49
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py53
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py69
-rw-r--r--yardstick/tests/unit/common/test_openstack_utils.py213
-rw-r--r--yardstick/tests/unit/common/test_packages.py88
-rw-r--r--yardstick/tests/unit/network_services/__init__.py (renamed from tests/unit/network_services/collector/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/collector/__init__.py (renamed from tests/unit/network_services/helpers/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/collector/test_publisher.py (renamed from tests/unit/network_services/collector/test_publisher.py)3
-rw-r--r--yardstick/tests/unit/network_services/collector/test_subscriber.py (renamed from tests/unit/network_services/collector/test_subscriber.py)44
-rw-r--r--yardstick/tests/unit/network_services/helpers/__init__.py (renamed from tests/unit/network_services/libs/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml (renamed from tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml)0
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_cpu.py (renamed from tests/unit/network_services/helpers/test_cpu.py)36
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py (renamed from tests/unit/network_services/helpers/test_dpdkbindnic_helper.py)0
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_iniparser.py (renamed from tests/unit/network_services/helpers/test_iniparser.py)8
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py (renamed from tests/unit/network_services/helpers/test_samplevnf_helper.py)1
-rw-r--r--yardstick/tests/unit/network_services/libs/__init__.py (renamed from tests/unit/network_services/libs/ixia_libs/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py (renamed from tests/unit/network_services/nfvi/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py (renamed from tests/unit/network_services/libs/ixia_libs/test_IxNet.py)34
-rw-r--r--yardstick/tests/unit/network_services/nfvi/__init__.py (renamed from tests/unit/network_services/traffic_profile/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/nfvi/test_collectd.py (renamed from tests/unit/network_services/nfvi/test_collectd.py)1
-rw-r--r--yardstick/tests/unit/network_services/nfvi/test_resource.py (renamed from tests/unit/network_services/nfvi/test_resource.py)33
-rw-r--r--yardstick/tests/unit/network_services/test_utils.py (renamed from tests/unit/network_services/test_utils.py)2
-rw-r--r--yardstick/tests/unit/network_services/test_yang_model.py (renamed from tests/unit/network_services/test_yang_model.py)12
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py (renamed from tests/unit/network_services/traffic_profile/test_base.py)0
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_fixed.py (renamed from tests/unit/network_services/traffic_profile/test_fixed.py)6
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http.py (renamed from tests/unit/network_services/traffic_profile/test_http.py)3
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py (renamed from tests/unit/network_services/traffic_profile/test_http_ixload.py)17
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py (renamed from tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py)122
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py (renamed from tests/unit/network_services/traffic_profile/test_prox_acl.py)22
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py (renamed from tests/unit/network_services/traffic_profile/test_prox_binsearch.py)6
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py (renamed from tests/unit/network_services/traffic_profile/test_prox_profile.py)4
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py (renamed from tests/unit/network_services/traffic_profile/test_prox_ramp.py)4
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py (renamed from tests/unit/network_services/traffic_profile/test_rfc2544.py)5
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py (renamed from tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py)2
-rw-r--r--yardstick/tests/unit/orchestrator/test_heat.py19
147 files changed, 4695 insertions, 3829 deletions
diff --git a/INFO b/INFO
index 1a49af295..9bcc2922d 100644
--- a/INFO
+++ b/INFO
@@ -11,7 +11,6 @@ IRC: #opnfv-yardstick
Repository: yardstick
Committers:
-jorgen.w.karlsson@ericsson.com
jean.gaoliang@huawei.com
lvjing5@huawei.com
wu.zhihui1@zte.com.cn
diff --git a/INFO.yaml b/INFO.yaml
index 730cd4a6b..677c470cb 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -34,10 +34,6 @@ repositories:
- 'yardstick'
committers:
- <<: *opnfv_yardstick_ptl
- - name: 'Jörgen Karlsson'
- email: 'jorgen.w.karlsson@ericsson.com'
- company: 'ericsson.com'
- id: 'jnon'
- name: 'Kubi'
email: 'jean.gaoliang@huawei.com'
company: 'huawei.com'
diff --git a/ansible/install.yaml b/ansible/install.yaml
index afffbede2..c446b91f7 100644
--- a/ansible/install.yaml
+++ b/ansible/install.yaml
@@ -38,5 +38,7 @@
- service:
name: nginx
state: restarted
+ when: installation_mode != inst_mode_container
- shell: uwsgi -i /etc/yardstick/yardstick.ini
+ when: installation_mode != inst_mode_container
diff --git a/ansible/install_dependencies.yml b/ansible/install_dependencies.yml
deleted file mode 100644
index 1c7d20170..000000000
--- a/ansible/install_dependencies.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: install yardstick dependencies
- hosts: all
-
- roles:
- - install_dependencies
diff --git a/ansible/multi_port_baremetal_ixia_correlated_test.yaml b/ansible/multi_port_baremetal_ixia_correlated_test.yaml
index ba92b5cd3..0d223181d 100644
--- a/ansible/multi_port_baremetal_ixia_correlated_test.yaml
+++ b/ansible/multi_port_baremetal_ixia_correlated_test.yaml
@@ -42,7 +42,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/multi_port_baremetal_ixia_test.yaml b/ansible/multi_port_baremetal_ixia_test.yaml
index 52bc40b43..d2dfaa3c4 100644
--- a/ansible/multi_port_baremetal_ixia_test.yaml
+++ b/ansible/multi_port_baremetal_ixia_test.yaml
@@ -42,7 +42,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml
index 98a59f984..fcde1d1b2 100644
--- a/ansible/nsb_setup.yml
+++ b/ansible/nsb_setup.yml
@@ -22,7 +22,7 @@
environment:
"{{ proxy_env }}"
roles:
- - install_dependencies
+ - install_dependencies_jumphost
- docker
- name: "handle all openstack stuff when: openrc_file is defined"
@@ -37,7 +37,7 @@
name: yardstick
pull: yes
recreate: yes
- image: opnfv/yardstick:latest
+ image: "{{ yardstick_docker_image|default('opnfv/yardstick:latest') }}"
state: started
restart_policy: always
privileged: yes
diff --git a/ansible/roles/add_repos_jumphost/tasks/Debian.yml b/ansible/roles/add_repos_jumphost/tasks/Debian.yml
index 0b67c6691..626f0b037 100644
--- a/ansible/roles/add_repos_jumphost/tasks/Debian.yml
+++ b/ansible/roles/add_repos_jumphost/tasks/Debian.yml
@@ -37,8 +37,7 @@
replace: "# deb-src "
- name: Add extra architecture
- apt:
- dpkg_options: "add-architecture {{ extra_arch }}"
+ command: "dpkg --add-architecture {{ extra_arch }}"
- name: Define the default release version
copy:
diff --git a/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml b/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
index 516676576..b54ea9b57 100644
--- a/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
+++ b/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
@@ -51,13 +51,12 @@
user: ""
password: ""
key_filename: ~
- tg_config:
+ tg_config:
ixchassis: "1.1.1.127" #ixia chassis ip
tcl_port: "8009" # tcl server port
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_ovs_scale_out_ixia_test.yaml b/ansible/standalone_ovs_scale_out_ixia_test.yaml
index ff665377f..cae373432 100644
--- a/ansible/standalone_ovs_scale_out_ixia_test.yaml
+++ b/ansible/standalone_ovs_scale_out_ixia_test.yaml
@@ -60,7 +60,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml b/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
index 45a4a498b..0e3a0af55 100644
--- a/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
+++ b/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
@@ -43,13 +43,12 @@
user: ""
password: ""
key_filename: ~
- tg_config:
+ tg_config:
ixchassis: "1.1.1.127" #ixia chassis ip
tcl_port: "8009" # tcl server port
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_sriov_scale_out_ixia_test.yaml b/ansible/standalone_sriov_scale_out_ixia_test.yaml
index 659dbef07..8fb09d9b9 100644
--- a/ansible/standalone_sriov_scale_out_ixia_test.yaml
+++ b/ansible/standalone_sriov_scale_out_ixia_test.yaml
@@ -49,7 +49,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/docker/Dockerfile b/docker/Dockerfile
index fed9f9bd0..62ea0d037 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -31,13 +31,16 @@ RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0
RUN mkdir -p ${REPOS_DIR}
RUN git config --global http.sslVerify false
-##RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
-RUN mkdir ${YARDSTICK_REPO_DIR}
-COPY ./ ${YARDSTICK_REPO_DIR}
+#For developers: To test your changes you must comment out the git clone for ${YARDSTICK_REPO_DIR}.
+#You must also uncomment the RUN and COPY commands below.
+#You must run docker build from your yardstick directory on the host.
+RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
+#RUN mkdir ${YARDSTICK_REPO_DIR}
+#COPY ./ ${YARDSTICK_REPO_DIR}
RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
-RUN ansible-playbook -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
@@ -48,10 +51,10 @@ EXPOSE 5000 5672
ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR}
ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR}
-COPY ./docker/exec_tests.sh /usr/local/bin/
+COPY ./exec_tests.sh /usr/local/bin/
-ENV NSB_DIR="/opt/nsb_bin" \
- PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
+ENV NSB_DIR="/opt/nsb_bin"
+ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
WORKDIR ${REPOS_DIR}
CMD ["/usr/bin/supervisord"]
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index e8dbea288..6c7381284 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,24 +1,16 @@
-From: Cristina Pauna <cristina.pauna@enea.com>
-Date: Thu, 11 Jan 2018 19:06:26 +0200
-Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
+From: ting wu <ting.wu@enea.com>
+Date: Tue, 8 May 2018 14:02:52 +0200
+Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file
-Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
-Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com>
+Signed-off-by: ting wu <ting.wu@enea.com>
---
- docker/Dockerfile | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
+ docker/Dockerfile | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 2ee5b4c..23e5ea5 100644
+index 62ea0d0..f2f41771 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
-@@ -1,5 +1,5 @@
- ##############################################################################
--# Copyright (c) 2015 Ericsson AB and others.
-+# Copyright (c) 2017 Enea AB and others.
- #
- # All rights reserved. This program and the accompanying materials
- # are made available under the terms of the Apache License, Version 2.0
@@ -7,9 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
@@ -31,17 +23,17 @@ index 2ee5b4c..23e5ea5 100644
ARG BRANCH=master
-@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
+@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
RELENG_REPO_DIR="${REPOS_DIR}/releng" \
STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
--RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
-+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \
+-RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
++RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \
+ apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean
RUN easy_install -U setuptools==30.0.0
- RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
+ RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
-@@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
@@ -52,3 +44,4 @@ index 2ee5b4c..23e5ea5 100644
COPY ./exec_tests.sh /usr/local/bin/
+
diff --git a/docs/release/results/euphrates_fraser_comparsion.rst b/docs/release/results/euphrates_fraser_comparsion.rst
new file mode 100644
index 000000000..222dc8bb0
--- /dev/null
+++ b/docs/release/results/euphrates_fraser_comparsion.rst
@@ -0,0 +1,8 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+=======================================================
+Test results analysis for Euphrates and Fraser releases
+=======================================================
+
diff --git a/docs/release/results/images/tc002_pod.png b/docs/release/results/images/tc002_pod.png
new file mode 100644
index 000000000..7f92c471d
--- /dev/null
+++ b/docs/release/results/images/tc002_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc002_scenario.png b/docs/release/results/images/tc002_scenario.png
new file mode 100644
index 000000000..0ea1ecec6
--- /dev/null
+++ b/docs/release/results/images/tc002_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc010_pod.png b/docs/release/results/images/tc010_pod.png
new file mode 100644
index 000000000..c7c623481
--- /dev/null
+++ b/docs/release/results/images/tc010_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc010_scenario.png b/docs/release/results/images/tc010_scenario.png
new file mode 100644
index 000000000..7c53a5fab
--- /dev/null
+++ b/docs/release/results/images/tc010_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc011_pod.png b/docs/release/results/images/tc011_pod.png
new file mode 100644
index 000000000..8fec72f5a
--- /dev/null
+++ b/docs/release/results/images/tc011_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc011_scenario.png b/docs/release/results/images/tc011_scenario.png
new file mode 100644
index 000000000..2d78ea372
--- /dev/null
+++ b/docs/release/results/images/tc011_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc012_pod.png b/docs/release/results/images/tc012_pod.png
new file mode 100644
index 000000000..0f2a00910
--- /dev/null
+++ b/docs/release/results/images/tc012_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc012_scenario.png b/docs/release/results/images/tc012_scenario.png
new file mode 100644
index 000000000..16257988d
--- /dev/null
+++ b/docs/release/results/images/tc012_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc014_pod.png b/docs/release/results/images/tc014_pod.png
new file mode 100644
index 000000000..63aead2e8
--- /dev/null
+++ b/docs/release/results/images/tc014_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc014_scenario.png b/docs/release/results/images/tc014_scenario.png
new file mode 100644
index 000000000..98f23ba1b
--- /dev/null
+++ b/docs/release/results/images/tc014_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc069_pod.png b/docs/release/results/images/tc069_pod.png
new file mode 100644
index 000000000..66b272cb4
--- /dev/null
+++ b/docs/release/results/images/tc069_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc069_scenario.png b/docs/release/results/images/tc069_scenario.png
new file mode 100644
index 000000000..caf12f8d5
--- /dev/null
+++ b/docs/release/results/images/tc069_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc082_pod.png b/docs/release/results/images/tc082_pod.png
new file mode 100644
index 000000000..89e01666b
--- /dev/null
+++ b/docs/release/results/images/tc082_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc082_scenario.png b/docs/release/results/images/tc082_scenario.png
new file mode 100644
index 000000000..637a739c3
--- /dev/null
+++ b/docs/release/results/images/tc082_scenario.png
Binary files differ
diff --git a/docs/release/results/images/tc083_pod.png b/docs/release/results/images/tc083_pod.png
new file mode 100644
index 000000000..f874191e4
--- /dev/null
+++ b/docs/release/results/images/tc083_pod.png
Binary files differ
diff --git a/docs/release/results/images/tc083_scenario.png b/docs/release/results/images/tc083_scenario.png
new file mode 100644
index 000000000..afd80aa02
--- /dev/null
+++ b/docs/release/results/images/tc083_scenario.png
Binary files differ
diff --git a/docs/release/results/index.rst b/docs/release/results/index.rst
index 0560152e0..3ec9e1cff 100644
--- a/docs/release/results/index.rst
+++ b/docs/release/results/index.rst
@@ -14,3 +14,4 @@ Yardstick test results
.. include:: ./overview.rst
.. include:: ./results.rst
+.. include:: ./euphrates_fraser_comparsion.rst
diff --git a/docs/release/results/os-nosdn-kvm-ha.rst b/docs/release/results/os-nosdn-kvm-ha.rst
deleted file mode 100644
index a8a56f80e..000000000
--- a/docs/release/results/os-nosdn-kvm-ha.rst
+++ /dev/null
@@ -1,270 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-================================
-Test Results for os-nosdn-kvm-ha
-================================
-
-.. toctree::
- :maxdepth: 2
-
-
-fuel
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD2: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test
-runs, each run on the Ericsson POD2_ or LF POD2_ between August 24 and 30 in
-2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.44 and 0.75 ms.
-A few runs start with a 0.65 - 0.68 ms RTT spike (This could be because of
-normal ARP handling). One test run has a greater RTT spike of 1.49 ms.
-To be able to draw conclusions more runs should be made. SLA set to 10 ms.
-The SLA value is used as a reference, it has not been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth looks similar between different dates, with an
-average between approx. 92 and 204 MB/s. Within each test run the results
-vary, with a minimum 2 MB/s and maximum 819 MB/s on the totality. Most runs
-have a minimum BW of 3 MB/s (one run at 2 MB/s). The maximum BW varies more in
-absolute numbers between the dates, between 238 and 819 MB/s.
-SLA set to 400 MB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC010
------
-The measurements for memory latency are similar between test dates and result
-in approx. 2.07 ns. The variations within each test run are similar, between
-1.41 and 3.53 ns.
-SLA set to 30 ns. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-TC011
------
-Packet delay variation between 2 VMs on different blades is measured using
-Iperf3. The reported packet delay variation varies between 0.0051 and 0.0243 ms,
-with an average delay variation between 0.0081 ms and 0.0195 ms.
-
-TC012
------
-Between test dates, the average measurements for memory bandwidth result in
-approx. 13.6 GB/s. Within each test run the results vary more, with a minimal
-BW of 6.09 GB/s and maximum of 16.47 GB/s on the totality.
-SLA set to 15 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC014
------
-The Unixbench processor test run results vary between scores 2316 and 3619,
-one result each date.
-No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-CPU utilization statistics are collected during UDP flows sent between the VMs
-using pktgen as packet generator tool. The average measurements for CPU
-utilization ratio vary between 1% to 2%. The peak of CPU utilization ratio
-appears around 7%.
-
-TC069
------
-Between test dates, the average measurements for memory bandwidth vary between
-22.6 and 29.1 GB/s. Within each test run the results vary more, with a minimal
-BW of 20.0 GB/s and maximum of 29.5 GB/s on the totality.
-SLA set to 6 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Memory utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for memory
-utilization vary between 225MB to 246MB. The peak of memory utilization appears
-around 340MB.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Cache utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for cache
-utilization vary between 205MB to 212MB.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. Total number of packets received per
-second was average on 200 kpps and total number of packets transmitted per
-second was average on 600 kpps.
-
-Detailed test results
----------------------
-The scenario was run on Ericsson POD2_ and LF POD2_ with:
-Fuel 9.0
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-Conclusions and recommendations
--------------------------------
-The pktgen test configuration has a relatively large base effect on RTT in
-TC037 compared to TC002, where there is no background load at all. Approx.
-15 ms compared to approx. 0.5 ms, which is more than a 3000 percentage
-difference in RTT results.
-Especially RTT and throughput come out with better results than for instance
-the *fuel-os-nosdn-nofeature-ha* scenario does. The reason for this should
-probably be further analyzed and understood. Also of interest could be
-to make further analyzes to find patterns and reasons for lost traffic.
-Also of interest could be to see if there are continuous variations where
-some test cases stand out with better or worse results than the general test
-case.
-
diff --git a/docs/release/results/os-nosdn-nofeature-ha.rst b/docs/release/results/os-nosdn-nofeature-ha.rst
deleted file mode 100644
index 9e52731d5..000000000
--- a/docs/release/results/os-nosdn-nofeature-ha.rst
+++ /dev/null
@@ -1,492 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-======================================
-Test Results for os-nosdn-nofeature-ha
-======================================
-
-.. toctree::
- :maxdepth: 2
-
-
-apex
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD1: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test
-runs, each run on the LF POD1_ between August 25 and 28 in
-2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.74 and 1.08 ms.
-A few runs start with a 0.99 - 1.07 ms RTT spike (This could be because of
-normal ARP handling). One test run has a greater RTT spike of 1.35 ms.
-To be able to draw conclusions more runs should be made. SLA set to 10 ms.
-The SLA value is used as a reference, it has not been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth looks similar between different dates, with an
-average between approx. 128 and 136 MB/s. Within each test run the results
-vary, with a minimum 5 MB/s and maximum 446 MB/s on the totality. Most runs
-have a minimum BW of 5 MB/s (one run at 6 MB/s). The maximum BW varies more in
-absolute numbers between the dates, between 416 and 446 MB/s.
-SLA set to 400 MB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC010
------
-The measurements for memory latency are similar between test dates and result
-in approx. 1.09 ns. The variations within each test run are similar, between
-1.0860 and 1.0880 ns.
-SLA set to 30 ns. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-TC011
------
-Packet delay variation between 2 VMs on different blades is measured using
-Iperf3. The reported packet delay variation varies between 0.0025 and 0.0148 ms,
-with an average delay variation between 0.0056 ms and 0.0157 ms.
-
-TC012
------
-Between test dates, the average measurements for memory bandwidth result in
-approx. 19.70 GB/s. Within each test run the results vary more, with a minimal
-BW of 18.16 GB/s and maximum of 20.13 GB/s on the totality.
-SLA set to 15 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC014
------
-The Unixbench processor test run results vary between scores 3224.4 and 3842.8,
-one result each date. The average score on the total is 3659.5.
-No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-CPU utilization statistics are collected during UDP flows sent between the VMs
-using pktgen as packet generator tool. The average measurements for CPU
-utilization ratio vary between 1% to 2%. The peak of CPU utilization ratio
-appears around 7%.
-
-TC069
------
-Between test dates, the average measurements for memory bandwidth vary between
-22.6 and 29.1 GB/s. Within each test run the results vary more, with a minimal
-BW of 20.0 GB/s and maximum of 29.5 GB/s on the totality.
-SLA set to 6 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Memory utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for memory
-utilization vary between 225MB to 246MB. The peak of memory utilization appears
-around 340MB.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Cache utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for cache
-utilization vary between 205MB to 212MB.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. Total number of packets received per
-second was average on 200 kpps and total number of packets transmitted per
-second was average on 600 kpps.
-
-Detailed test results
----------------------
-The scenario was run on LF POD1_ with:
-Apex
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-
-Joid
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD5: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Intel POD5_ between September 11 and 14 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 1.59 and 1.70 ms.
-Two test runs have reached the same greater RTT spike of 3.06 ms, which are
-1.66 and 1.70 ms average, but only one has the lower RTT of 1.35 ms. The other
-two runs have no similar spike at all. To be able to draw conclusions more runs
-should be made. SLA set to be 10 ms. The SLA value is used as a reference, it
-has not been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput and the
-greatest IO read bandwidth of the four runs is 173.3 MB/s. The IO read
-bandwidth of the four runs looks similar on different four days, with an
-average between 32.7 and 60.4 MB/s. One of the runs has a minimum BW of 429
-KM/s and other has a maximum BW of 173.3 MB/s. The SLA of read bandwidth sets
-to be 400 MB/s, which is used as a reference, and it has not been defined by
-OPNFV.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is 1.1 ns on average. The
-variations within each test run are different, some vary from a large range and
-others have a small change. For example, the largest change is on September 14,
-the memory read latency of which is ranging from 1.12 ns to 1.22 ns. However,
-the results on September 12 change very little, which range from 1.14 ns to
-1.17 ns. The SLA sets to be 30 ns. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC011
------
-Iperf3 is a tool for evaluating the pocket delay variation between 2 VMs on
-different blades. The reported pocket delay variations of the four test runs
-differ from each other. The results on September 13 within the date look
-similar and the values are between 0.0087 and 0.0190 ms, which is 0.0126 ms on
-average. However, on the fourth day, the pocket delay variation has a large
-wide change within the date, which ranges from 0.0032 ms to 0.0121 ms and has
-the minimum average value. The pocket delay variations of other two test runs
-look relatively similar, which are 0.0076 ms and 0.0152 ms on average. The SLA
-value sets to be 10 ms. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the four test runs, the memory
-bandwidth within the second day almost keep stable, which is 11.58 GB/s on
-average. And the memory bandwidth of the fourth day look similar as that of the
-second day, both of which remain stable. The other two test runs relatively
-change from a large wide range, in which the minimum memory bandwidth is 11.22
-GB/s and the maximum bandwidth is 16.65 GB/s with an average bandwidth of about
-12.20 GB/s. Here SLA set to be 15 GB/s. The SLA value is used as a reference,
-it has not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to measure processing speed, that is instructions per
-second. It can be seen from the dashboard that the processing test results
-vary from scores 3272 to 3444, and there is only one result one date. The
-overall average score is 3371. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the four test runs is 119.85, 128.02, 121.40 and
-126.08 kpps, of which the result of the second is the highest. The RTT results
-of all the test runs keep flat at approx. 37 ms. It is obvious that the PPS
-results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 184 kpps and the
-minimum throughput is 49 K respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar average vaue, that is 38 ms, of which the worest RTT is
-93 ms on Sep. 14th.
-
-CPU load of the four test runs have a large change, since the minimum value and
-the peak of CPU load is 0 percent and 51 percent respectively. And the best
-result is obtained on Sep. 14th.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 22.3 GB/s to 26.8 GB/s and then to 18.5 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is 8 kb and 16 kb, the memory write bandwidth
-look similar with a minimal BW of 22.5 GB/s and peak value of 28.7 GB/s. SLA
-sets to be 7 GB/s. The SLA value is used as a a reference, it has not been
-defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT can reach
-more than 80 ms and the average RTT is usually approx. 38 ms. On the whole, the
-average RTTs of the four runs keep flat.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 268 MiB on Sep
-14, which also has the largest minimum memory. Besides, the rest three test
-runs have the similar used memory. On the other hand, the free memory of the
-four runs have the same smallest minimum value, that is about 223 MiB, and the
-maximum free memory of three runs have the similar result, that is 337 MiB,
-except that on Sep. 14th, whose maximum free memory is 254 MiB. On the whole,
-all the test runs have similar average free memory.
-
-Network throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-network throughput of the four test runs seem quite different, ranging from
-119.85 kpps to 128.02 kpps. The average number of flows in these tests is
-24000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding packet throughput
-differ between 49.4k and 193.3k with an average packet throughput of approx.
-125k. On the whole, the PPS results seem consistent. Within each test run of
-the four runs, when number of flows becomes larger, the packet throughput seems
-not larger in the meantime.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT can reach
-more than 94 ms and the average RTT is usually approx. 35 ms. On the whole, the
-average RTTs of the four runs keep flat.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool.The largest
-cache size is 212 MiB in the four runs, and the smallest cache size is 75 MiB.
-On the whole, the average cache size of the four runs is approx. 208 MiB.
-Meanwhile, the tread of the buffer size looks similar with each other.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs seem quite different, ranging from 119.85 kpps to 128.02
-kpps. The average number of flows in these tests is 239.7k, and each run has a
-minimum number of flows of 2 and a maximum number of flows of 1.001 Mil. At the
-same time, the corresponding packet throughput differ between 49.4k and 193.3k
-with an average packet throughput of approx. 125k. On the whole, the PPS results
-seem consistent. Within each test run of the four runs, when number of flows
-becomes larger, the packet throughput seems not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 32 ms. The PPS results are not as consistent as the RTT results.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second differs from each other, in which the smallest number of
-packets transmitted per second is 6 pps on Sep. 12ed and the largest of that is
-210.8 kpps. Meanwhile, the largest total number of packets received per second
-differs from each other, in which the smallest number of packets received per
-second is 2 pps on Sep. 13rd and the largest of that is 250.2 kpps.
-
-In some test runs when running with less than approx. 90000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 1000000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Intel POD5_ with:
-Joid
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-
diff --git a/docs/release/results/os-nosdn-nofeature-noha.rst b/docs/release/results/os-nosdn-nofeature-noha.rst
deleted file mode 100644
index 8b7c184bb..000000000
--- a/docs/release/results/os-nosdn-nofeature-noha.rst
+++ /dev/null
@@ -1,259 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-========================================
-Test Results for os-nosdn-nofeature-noha
-========================================
-
-.. toctree::
- :maxdepth: 2
-
-
-Joid
-=====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD5: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Intel POD5_ between September 12 and 15 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 1.50 and 1.68 ms.
-Only one test run has reached greatest RTT spike of 2.92 ms, which has
-the smallest RTT of 1.06 ms. The other three runs have no similar spike at all,
-the minimum and average RTTs of which are approx. 1.50 ms and 1.68 ms. SLA set to
-be 10 ms. The SLA value is used as a reference, it has not been defined by
-OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 177.5
-MB/s. The IO read bandwidth of the four runs looks similar on different four
-days, with an average between 46.7 and 62.5 MB/s. One of the runs has a minimum
-BW of 680 KM/s and other has a maximum BW of 177.5 MB/s. The SLA of read
-bandwidth sets to be 400 MB/s, which is used as a reference, and it has not
-been defined by OPNFV.
-
-The results of storage IOPS for the four runs look similar with each other. The
-test runs all have an approx. 1.55 K/s for IO reading with an minimum value of
-less than 60 times per second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is between 1.134 ns and 1.227
-ns on average. The variations within each test run are quite different, some
-vary from a large range and others have a small change. For example, the
-largest change is on September 15, the memory read latency of which is ranging
-from 1.116 ns to 1.393 ns. However, the results on September 12 change very
-little, which mainly keep flat and range from 1.124 ns to 1.55 ns. The SLA sets
-to be 30 ns. The SLA value is used as a reference, it has not been defined by
-OPNFV.
-
-TC011
------
-Iperf3 is a tool for evaluating the pocket delay variation between 2 VMs on
-different blades. The reported pocket delay variations of the four test runs
-differ from each other. The results on September 13 within the date look
-similar and the values are between 0.0213 and 0.0225 ms, which is 0.0217 ms on
-average. However, on the third day, the packet delay variation has a large
-wide change within the date, which ranges from 0.008 ms to 0.0225 ms and has
-the minimum value. On Sep. 12, the packet delay is quite long, for the value is
-between 0.0236 and 0.0287 ms and it also has the maximum packet delay of 0.0287
-ms. The packet delay of the last test run is 0.0151 ms on average. The SLA
-value sets to be 10 ms. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the four test runs, the memory
-bandwidth of three test runs almost keep stable within each run, which is
-11.65, 11.57 and 11.64 GB/s on average. However, the memory read and write
-bandwidth on Sep. 14 has a large range, for it ranges from 11.36 GB/s to 16.68
-GB/s. Here SLA set to be 15 GB/s. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3222 to 3585, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the four test runs is 124.8, 160.1, 113.8 and
-137.3 kpps, of which the result of the second is the highest. The RTT results
-of all the test runs keep flat at approx. 37 ms. It is obvious that the PPS
-results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 243.1 kpps and the
-minimum throughput is 37.6 kpps respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar average vaue, that is between 32 ms and 41 ms, of which
-the worest RTT is 155 ms on Sep. 14th.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and 9 percent respectively. And the best result is obtained on Sep.
-15th, with an CPU load of nine percent.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 22.4 GB/s to 26.5 GB/s and then to 18.6 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is 8 kb and 16 kb, the memory write bandwidth
-look similar with a minimal BW of 22.5 GB/s and peak value of 28.7 GB/s. And
-then with the block size becoming larger, the memory write bandwidth tends to
-decrease. SLA sets to be 7 GB/s. The SLA value is used as a a reference, it has
-not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of three test runs look
-similar with each other, and Within these test runs, the maximum RTT can reach
-95 ms and the average RTT is usually approx. 36 ms. The network latency tested
-on Sep. 14 shows that it has a peak latency of 155 ms. But on the whole, the
-average RTTs of the four runs keep flat.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 270 MiB on Sep
-13, which also has the smallest minimum memory utilization. Besides, the rest
-three test runs have the similar used memory with an average memory usage of
-264 MiB. On the other hand, the free memory of the four runs have the same
-smallest minimum value, that is about 223 MiB, and the maximum free memory of
-three runs have the similar result, that is 226 MiB, except that on Sep. 13th,
-whose maximum free memory is 273 MiB. On the whole, all the test runs have
-similar average free memory.
-
-Network throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-network throughput of the four test runs seem quite different, ranging from
-119.85 kpps to 128.02 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding packet throughput
-differ between 38k and 243k with an average packet throughput of approx. 134k.
-On the whole, the PPS results seem consistent. Within each test run of the four
-runs, when number of flows becomes larger, the packet throughput seems not
-larger in the meantime.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT can reach
-79 ms and the average RTT is usually approx. 35 ms. On the whole, the average
-RTTs of the four runs keep flat.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool.The largest
-cache size is 214 MiB in the four runs, and the smallest cache size is 100 MiB.
-On the whole, the average cache size of the four runs is approx. 210 MiB.
-Meanwhile, the tread of the buffer size looks similar with each other. On the
-other hand, the mean buffer size of the four runs keep flat, since they have a
-minimum value of approx. 7 MiB and a maximum value of 8 MiB, with an average
-value of about 8 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs seem quite different, ranging from 113.8 kpps to 124.8 kpps.
-The average number of flows in these tests is 240k, and each run has a minimum
-number of flows of 2 and a maximum number of flows of 1.001 Mil. At the same
-time, the corresponding packet throughput differ between 47.6k and 243.1k with
-an average packet throughput between 113.8k and 160.1k. Within each test run of
-the four runs, when number of flows becomes larger, the packet throughput seems
-not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
-between 0 ms and 79 ms with an average leatency of approx. 35 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the four runs differ from 113.8 kpps to 124.8 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar on the first three runs with a minimum
-number of 10 pps and a maximum number of 97 kpps, except the one on Sep. 15th,
-in which the number of packets transmitted per second is 10 pps. Meanwhile, the
-largest total number of packets received per second differs from each other,
-in which the smallest number of packets received per second is 1 pps and the
-largest of that is 276 kpps.
-
-In some test runs when running with less than approx. 90000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 1000000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Intel POD5_ with:
-Joid
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
diff --git a/docs/release/results/os-odl_l2-bgpvpn-ha.rst b/docs/release/results/os-odl_l2-bgpvpn-ha.rst
deleted file mode 100644
index 2bd6dc35d..000000000
--- a/docs/release/results/os-odl_l2-bgpvpn-ha.rst
+++ /dev/null
@@ -1,53 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-====================================
-Test Results for os-odl_l2-bgpvpn-ha
-====================================
-
-.. toctree::
- :maxdepth: 2
-
-
-fuel
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD2: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Ericsson POD2_ between September 7 and 11 in 2016.
-
-TC043
------
-The round-trip-time (RTT) between 2 nodes is measured using
-ping. Most test run measurements result on average between 0.21 and 0.28 ms.
-A few runs start with a 0.32 - 0.35 ms RTT spike (This could be because of
-normal ARP handling). To be able to draw conclusions more runs should be made.
-SLA set to 10 ms. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-Detailed test results
----------------------
-The scenario was run on Ericsson POD2_ with:
-Fuel 9.0
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
diff --git a/docs/release/results/os-odl_l2-nofeature-ha.rst b/docs/release/results/os-odl_l2-nofeature-ha.rst
deleted file mode 100644
index ac0c5bb59..000000000
--- a/docs/release/results/os-odl_l2-nofeature-ha.rst
+++ /dev/null
@@ -1,743 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-=======================================
-Test Results for os-odl_l2-nofeature-ha
-=======================================
-
-.. toctree::
- :maxdepth: 2
-
-
-apex
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD1: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the LF POD1_ between September 14 and 17 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.49 ms and 0.60 ms.
-Only one test run has reached greatest RTT spike of 0.93 ms. Meanwhile, the
-smallest network latency is 0.33 ms, which is obtained on Sep. 14th.
-SLA set to be 10 ms. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 416
-MB/s. The IO read bandwidth of all four runs looks similar, with an average
-between 128 and 131 MB/s. One of the runs has a minimum BW of 497 KB/s. The SLA
-of read bandwidth sets to be 400 MB/s, which is used as a reference, and it has
-not been defined by OPNFV.
-
-The results of storage IOPS for the four runs look similar with each other. The
-IO read times per second of the four test runs have an average value at 1k per
-second, and meanwhile, the minimum result is only 45 times per second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is between 1.0859 ns and
-1.0869 ns on average. The variations within each test run are quite different,
-some vary from a large range and others have a small change. For example, the
-largest change is on September 14th, the memory read latency of which is ranging
-from 1.091 ns to 1.086 ns. However.
-The SLA sets to be 30 ns. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC011
------
-Packet delay variation between 2 VMs on different blades is measured using
-Iperf3. On the first two test runs the reported packet delay variation varies between
-0.0037 and 0.0740 ms, with an average delay variation between 0.0096 ms and 0.0321.
-On the second date the delay variation varies between 0.0063 and 0.0096 ms, with
-an average delay variation of 0.0124 - 0.0141 ms.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the four test runs, the trend of
-three memory bandwidth almost look similar, which all have a narrow range, and
-the average result is 19.88 GB/s. Here SLA set to be 15 GB/s. The SLA value is
-used as a reference, it has not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3754k to 3831k, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the four test runs is between 307.3 kpps and
-447.1 kpps, of which the result of the third run is the highest. The RTT
-results of all the test runs keep flat at approx. 15 ms. It is obvious that the
-PPS results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 418.1 kpps and the
-minimum throughput is 326.5 kpps respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar average vaue, that is approx. 15 ms.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and nine percent respectively. And the best result is obtained on Sep.
-1, with an CPU load of nine percent. But on the whole, the CPU load is very
-poor, since the average value is quite small.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 28.2 GB/s to 29.5 GB/s and then to 29.2 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is 2 kb or 16 kb, the memory write bandwidth
-look similar with a minimal BW of 25.8 GB/s and peak value of 28.3 GB/s. And
-then with the block size becoming larger, the memory write bandwidth tends to
-decrease. SLA sets to be 7 GB/s. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other, and within these test runs, the maximum RTT can
-reach 39 ms and the average RTT is usually approx. 15 ms. The network latency
-tested on Sep. 1 and Sep. 8 have a peak latency of 39 ms. But on the whole,
-the average RTTs of the five runs keep flat and the network latency is
-relatively short.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 267 MiB for the
-four runs. In general, the four test runs have very large memory utilization,
-which can reach 257 MiB on average. On the other hand, for the mean free memory,
-the four test runs have the similar trend with that of the mean used memory.
-In general, the mean free memory change from 233 MiB to 241 MiB.
-
-Packet throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-packet throughput of the four test runs seem quite different, ranging from
-305.3 kpps to 447.1 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding average packet
-throughput is between 354.4 kpps and 381.8 kpps. In summary, the PPS results
-seem consistent. Within each test run of the four runs, when number of flows
-becomes larger, the packet throughput seems not larger at the same time.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT is only 42
-ms and the average RTT is usually approx. 15 ms. On the whole, the average
-RTTs of the four runs keep stable and the network latency is relatively small.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool. The largest
-cache size is 212 MiB, which is same for the four runs, and the smallest cache
-size is 75 MiB. On the whole, the average cache size of the four runs look the
-same and is between 197 MiB and 211 MiB. Meanwhile, the tread of the buffer
-size keep flat, since they have a minimum value of 7 MiB and a maximum value of
-8 MiB, with an average value of about 7.9 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs differ from 354.4 kpps to 381.8 kpps. The average number of
-flows in these tests is 240k, and each run has a minimum number of flows of 2
-and a maximum number of flows of 1.001 Mil. At the same time, the corresponding
-packet throughput differ between 305.3 kpps to 447.1 kpps. Within each test run
-of the four runs, when number of flows becomes larger, the packet throughput
-seems not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
-between 0 ms and 42 ms with an average leatency of less than 15 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the four runs differ from 354.4 kpps to 381.8 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar for three test runs, whose values change a
-lot from 10 pps to 501 kpps. While results of the rest test run seem the same
-and keep stable with the average number of packets transmitted per second of 10
-pps. However, the total number of packets received per second of the four runs
-look similar, which have a large wide range of 2 pps to 815 kpps.
-
-In some test runs when running with less than approx. 251000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 251000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on LF POD1_ with:
-Apex
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-
-
-fuel
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD2: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Ericsson POD2_ or LF POD2_ between August 25 and 29 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.5 and 0.6 ms.
-A few runs start with a 1 - 1.5 ms RTT spike (This could be because of normal ARP
-handling). One test run has a greater RTT spike of 1.9 ms, which is the same
-one with the 0.7 ms average. The other runs have no similar spike at all.
-To be able to draw conclusions more runs should be made.
-SLA set to 10 ms. The SLA value is used as a reference, it has not
-been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth looks similar between different dates, with an
-average between approx. 170 and 200 MB/s. Within each test run the results
-vary, with a minimum 2 MB/s and maximum 838 MB/s on the totality. Most runs
-have a minimum BW of 3 MB/s (two runs at 2 MB/s). The maximum BW varies more in
-absolute numbers between the dates, between 617 and 838 MB/s.
-SLA set to 400 MB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC010
------
-The measurements for memory latency are similar between test dates and result
-in approx. 1.2 ns. The variations within each test run are similar, between
-1.215 and 1.219 ns. One exception is February 16, where the average is 1.222
-and varies between 1.22 and 1.28 ns.
-SLA set to 30 ns. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-TC011
------
-Packet delay variation between 2 VMs on different blades is measured using
-Iperf3. On the first date the reported packet delay variation varies between
-0.0025 and 0.011 ms, with an average delay variation of 0.0067 ms.
-On the second date the delay variation varies between 0.002 and 0.006 ms, with
-an average delay variation of 0.004 ms.
-
-TC012
------
-Between test dates, the average measurements for memory bandwidth vary between
-17.4 and 17.9 GB/s. Within each test run the results vary more, with a minimal
-BW of 16.4 GB/s and maximum of 18.2 GB/s on the totality.
-SLA set to 15 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC014
------
-The Unixbench processor test run results vary between scores 3080 and 3240,
-one result each date. The average score on the total is 3150.
-No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-CPU utilization statistics are collected during UDP flows sent between the VMs
-using pktgen as packet generator tool. The average measurements for CPU
-utilization ratio vary between 1% to 2%. The peak of CPU utilization ratio
-appears around 7%.
-
-TC069
------
-Between test dates, the average measurements for memory bandwidth vary between
-15.5 and 25.4 GB/s. Within each test run the results vary more, with a minimal
-BW of 9.7 GB/s and maximum of 29.5 GB/s on the totality.
-SLA set to 6 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Memory utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for memory
-utilization vary between 225MB to 246MB. The peak of memory utilization appears
-around 340MB.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Cache utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for cache
-utilization vary between 205MB to 212MB.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. Total number of packets received per
-second was average on 200 kpps and total number of packets transmitted per
-second was average on 600 kpps.
-
-Detailed test results
----------------------
-The scenario was run on Ericsson POD2_ and LF POD2_ with:
-Fuel 9.0
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-Conclusions and recommendations
--------------------------------
-The pktgen test configuration has a relatively large base effect on RTT in
-TC037 compared to TC002, where there is no background load at all. Approx.
-15 ms compared to approx. 0.5 ms, which is more than a 3000 percentage
-difference in RTT results.
-Especially RTT and throughput come out with better results than for instance
-the *fuel-os-nosdn-nofeature-ha* scenario does. The reason for this should
-probably be further analyzed and understood. Also of interest could be
-to make further analyzes to find patterns and reasons for lost traffic.
-Also of interest could be to see if there are continuous variations where
-some test cases stand out with better or worse results than the general test
-case.
-
-
-
-Joid
-=====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD6: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Intel POD6_ between September 1 and 8 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 1.01 ms and 1.88 ms.
-Only one test run has reached greatest RTT spike of 1.88 ms. Meanwhile, the
-smallest network latency is 1.01 ms, which is obtained on Sep. 1st. In general,
-the average of network latency of the four test runs are between 1.29 ms and
-1.34 ms. SLA set to be 10 ms. The SLA value is used as a reference, it has not
-been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 183.65
-MB/s. The IO read bandwidth of the three runs looks similar, with an average
-between 62.9 and 64.3 MB/s, except one on Sep. 1, for its maximum storage
-throughput is only 159.1 MB/s. One of the runs has a minimum BW of 685 KB/s and
-other has a maximum BW of 183.6 MB/s. The SLA of read bandwidth sets to be
-400 MB/s, which is used as a reference, and it has not been defined by OPNFV.
-
-The results of storage IOPS for the four runs look similar with each other. The
-IO read times per second of the four test runs have an average value between
-1.41k per second and 1.64k per second, and meanwhile, the minimum result is
-only 55 times per second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is between 1.152 ns and 1.179
-ns on average. The variations within each test run are quite different, some
-vary from a large range and others have a small change. For example, the
-largest change is on September 8, the memory read latency of which is ranging
-from 1.120 ns to 1.221 ns. However, the results on September 7 change very
-little. The SLA sets to be 30 ns. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC011
------
-Iperf3 is a tool for evaluating the packet delay variation between 2 VMs on
-different blades. The reported packet delay variations of the four test runs
-differ from each other. In general, the packet delay of the first two runs look
-similar, for they both stay stable within each run. And the mean packet delay
-of them are 0.0087 ms and 0.0127 ms respectively. Of the four runs, the fourth
-has the worst result, because the packet delay reaches 0.0187 ms. The SLA value
-sets to be 10 ms. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the four test runs, the trend of
-three memory bandwidth almost look similar, which all have a narrow range, and
-the average result is 11.78 GB/s. Here SLA set to be 15 GB/s. The SLA value is
-used as a reference, it has not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3260k to 3328k, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the four test runs is between 307.3 kpps and
-447.1 kpps, of which the result of the third run is the highest. The RTT
-results of all the test runs keep flat at approx. 15 ms. It is obvious that the
-PPS results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 418.1 kpps and the
-minimum throughput is 326.5 kpps respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar average vaue, that is approx. 15 ms.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and nine percent respectively. And the best result is obtained on Sep.
-1, with an CPU load of nine percent. But on the whole, the CPU load is very
-poor, since the average value is quite small.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 21.9 GB/s to 25.9 GB/s and then to 17.8 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is 2 kb or 16 kb, the memory write bandwidth
-look similar with a minimal BW of 24.8 GB/s and peak value of 27.8 GB/s. And
-then with the block size becoming larger, the memory write bandwidth tends to
-decrease. SLA sets to be 7 GB/s. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other, and within these test runs, the maximum RTT can
-reach 39 ms and the average RTT is usually approx. 15 ms. The network latency
-tested on Sep. 1 and Sep. 8 have a peak latency of 39 ms. But on the whole,
-the average RTTs of the five runs keep flat and the network latency is
-relatively short.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 267 MiB for the
-four runs. In general, the four test runs have very large memory utilization,
-which can reach 257 MiB on average. On the other hand, for the mean free memory,
-the four test runs have the similar trend with that of the mean used memory.
-In general, the mean free memory change from 233 MiB to 241 MiB.
-
-Packet throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-packet throughput of the four test runs seem quite different, ranging from
-305.3 kpps to 447.1 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding average packet
-throughput is between 354.4 kpps and 381.8 kpps. In summary, the PPS results
-seem consistent. Within each test run of the four runs, when number of flows
-becomes larger, the packet throughput seems not larger at the same time.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT is only 42
-ms and the average RTT is usually approx. 15 ms. On the whole, the average
-RTTs of the four runs keep stable and the network latency is relatively small.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool. The largest
-cache size is 212 MiB, which is same for the four runs, and the smallest cache
-size is 75 MiB. On the whole, the average cache size of the four runs look the
-same and is between 197 MiB and 211 MiB. Meanwhile, the tread of the buffer
-size keep flat, since they have a minimum value of 7 MiB and a maximum value of
-8 MiB, with an average value of about 7.9 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs differ from 354.4 kpps to 381.8 kpps. The average number of
-flows in these tests is 240k, and each run has a minimum number of flows of 2
-and a maximum number of flows of 1.001 Mil. At the same time, the corresponding
-packet throughput differ between 305.3 kpps to 447.1 kpps. Within each test run
-of the four runs, when number of flows becomes larger, the packet throughput
-seems not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
-between 0 ms and 42 ms with an average leatency of less than 15 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the four runs differ from 354.4 kpps to 381.8 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar for three test runs, whose values change a
-lot from 10 pps to 501 kpps. While results of the rest test run seem the same
-and keep stable with the average number of packets transmitted per second of 10
-pps. However, the total number of packets received per second of the four runs
-look similar, which have a large wide range of 2 pps to 815 kpps.
-
-In some test runs when running with less than approx. 251000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 251000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Intel POD6_ with:
-Joid
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
diff --git a/docs/release/results/os-odl_l2-sfc-ha.rst b/docs/release/results/os-odl_l2-sfc-ha.rst
deleted file mode 100644
index e27562cae..000000000
--- a/docs/release/results/os-odl_l2-sfc-ha.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-==================================
-Test Results for os-odl_l2-sfc-ha
-==================================
-
-.. toctree::
- :maxdepth: 2
-
-
-Fuel
-=====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD2: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the LF POD2_ or Ericsson POD2_ between September 16 and 20 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.32 ms and 1.42 ms.
-Only one test run on Sep. 20 has reached greatest RTT spike of 4.66 ms.
-Meanwhile, the smallest network latency is 0.16 ms, which is obtained on Sep.
-17th. To sum up, the curve of network latency has very small wave, which is
-less than 5 ms. SLA sets to be 10 ms. The SLA value is used as a reference, it
-has not been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 734
-MB/s. The IO read bandwidth of the first three runs looks similar, with an
-average of less than 100 KB/s, except one on Sep. 20, whose maximum storage
-throughput can reach 734 MB/s. The SLA of read bandwidth sets to be 400 MB/s,
-which is used as a reference, and it has not been defined by OPNFV.
-
-The results of storage IOPS for the four runs look similar with each other. The
-IO read times per second of the four test runs have an average value between
-1.8k per second and 3.27k per second, and meanwhile, the minimum result is
-only 60 times per second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is between 1.085 ns and 1.218
-ns on average. The variations within each test run are quite small. For
-Ericsson pod2, the average of memory latency is approx. 1.217 ms. While for LF
-pod2, the average value is about 1.085 ms. It can be seen that the performance
-of LF is better than Ericsson's. The SLA sets to be 30 ns. The SLA value is
-used as a reference, it has not been defined by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. The four test runs all have a narrow range
-of change with the average memory and write BW of 18.5 GB/s. Here SLA set to be
-15 GB/s. The SLA value is used as a reference, it has not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3209k to 3843k, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the three test runs is between 439 kpps and
-582 kpps, and the test run on Sep. 17th has the lowest average value of 371
-kpps. The RTT results of all the test runs keep flat at approx. 10 ms. It is
-obvious that the PPS results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved, since the largest packet throughput is 680 kpps and the
-minimum throughput is 319 kpps respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar trend of RTT with the average value of approx. 12 ms.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and ten percent respectively. And the best result is obtained on Sep.
-17th, with an CPU load of ten percent. But on the whole, the CPU load is very
-poor, since the average value is quite small.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the average memory write
-bandwidth tends to become larger first and then smaller within every run test
-for the two pods, which rangs from 25.1 GB/s to 29.4 GB/s and then to 19.2 GB/s
-on average. Since the test id is one, it is that only the INT memory write
-bandwidth is tested. On the whole, with the block size becoming larger, the
-memory write bandwidth tends to decrease. SLA sets to be 7 GB/s. The SLA value
-is used as a reference, it has not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other, and within these test runs, the maximum RTT can
-reach 27 ms and the average RTT is usually approx. 12 ms. The network latency
-tested on Sep. 27th has a peak latency of 27 ms. But on the whole, the average
-RTTs of the four runs keep flat.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 269 MiB for the
-four runs. In general, the four test runs have very large memory utilization,
-which can reach 251 MiB on average. On the other hand, for the mean free memory,
-the four test runs have the similar trend with that of the mean used memory.
-In general, the mean free memory change from 231 MiB to 248 MiB.
-
-Packet throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-packet throughput of the four test runs seem quite different, ranging from
-371 kpps to 582 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding average packet
-throughput is between 319 kpps and 680 kpps. In summary, the PPS results
-seem consistent. Within each test run of the four runs, when number of flows
-becomes larger, the packet throughput seems not larger at the same time.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT is only 24
-ms and the average RTT is usually approx. 12 ms. On the whole, the average
-RTTs of the four runs keep stable and the network latency is relatively small.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool. The largest
-cache size is 213 MiB, and the smallest cache size is 99 MiB, which is same for
-the four runs. On the whole, the average cache size of the four runs look the
-same and is between 184 MiB and 205 MiB. Meanwhile, the tread of the buffer
-size keep stable, since they have a minimum value of 7 MiB and a maximum value of
-8 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs differ from 371 kpps to 582 kpps. The average number of
-flows in these tests is 240k, and each run has a minimum number of flows of 2
-and a maximum number of flows of 1.001 Mil. At the same time, the corresponding
-packet throughput differ between 319 kpps to 680 kpps. Within each test run
-of the four runs, when number of flows becomes larger, the packet throughput
-seems not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
-between 0 ms and 24 ms with an average leatency of less than 13 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the four runs differ from 370 kpps to 582 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar for the four test runs, whose values change a
-lot from 10 pps to 697 kpps. However, the total number of packets received per
-second of three runs look similar, which have a large wide range of 2 pps to
-1.497 Mpps, while the results on Sep. 18th and 20th have very small maximum
-number of packets received per second of 817 kpps.
-
-In some test runs when running with less than approx. 251000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 251000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Ericsson POD2_ and LF POD2_ with:
-Fuel 9.0
-OpenStack Mitaka
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
diff --git a/docs/release/results/os-onos-nofeature-ha.rst b/docs/release/results/os-onos-nofeature-ha.rst
deleted file mode 100644
index d8b3ace5f..000000000
--- a/docs/release/results/os-onos-nofeature-ha.rst
+++ /dev/null
@@ -1,257 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-======================================
-Test Results for os-onos-nofeature-ha
-======================================
-
-.. toctree::
- :maxdepth: 2
-
-
-Joid
-=====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD6: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 5 scenario test runs, each run
-on the Intel POD6_ between September 13 and 16 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 1.50 and 1.68 ms.
-Only one test run has reached greatest RTT spike of 2.62 ms, which has
-the smallest RTT of 1.00 ms. The other four runs have no similar spike at all,
-the minimum and average RTTs of which are approx. 1.06 ms and 1.32 ms. SLA set
-to be 10 ms. The SLA value is used as a reference, it has not been defined by
-OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 175.4
-MB/s. The IO read bandwidth of the four runs looks similar on different four
-days, with an average between 58.1 and 62.0 MB/s, except one on Sep. 14, for
-its maximum storage throughput is only 133.0 MB/s. One of the runs has a
-minimum BW of 497 KM/s and other has a maximum BW of 177.4 MB/s. The SLA of read
-bandwidth sets to be 400 MB/s, which is used as a reference, and it has not
-been defined by OPNFV.
-
-The results of storage IOPS for the five runs look similar with each other. The
-IO read times per second of the five test runs have an average value between
-1.20 K/s and 1.61 K/s, and meanwhile, the minimum result is only 41 times per
-second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the five runs is between 1.146 ns and 1.172
-ns on average. The variations within each test run are quite different, some
-vary from a large range and others have a small change. For example, the
-largest change is on September 13, the memory read latency of which is ranging
-from 1.152 ns to 1.221 ns. However, the results on September 14 change very
-little. The SLA sets to be 30 ns. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC011
------
-Iperf3 is a tool for evaluating the packet delay variation between 2 VMs on
-different blades. The reported packet delay variations of the five test runs
-differ from each other. In general, the packet delay of the first two runs look
-similar, for they both stay stable within each run. And the mean packet delay of
-of them are 0.07714 ms and 0.07982 ms respectively. Of the five runs, the third
-has the worst result, because the packet delay reaches 0.08384 ms. The trend of
-therest two runs look the same, for the average packet delay are 0.07808 ms and
-0.07727 ms respectively. The SLA value sets to be 10 ms. The SLA value is used
-as a reference, it has not been defined by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the five test runs, the memory
-bandwidth of last three test runs almost keep stable within each run, which is
-11.64, 11.71 and 11.61 GB/s on average. However, the memory read and write
-bandwidth on Sep. 13 has a large range, for it ranges from 6.68 GB/s to 11.73
-GB/s. Here SLA set to be 15 GB/s. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3208 to 3314, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the five test runs is between 259.6 kpps and
-318.4 kpps, of which the result of the second run is the highest. The RTT
-results of all the test runs keep flat at approx. 20 ms. It is obvious that the
-PPS results are not as consistent as the RTT results.
-
-The No. flows of the five test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 398.9 kpps and the
-minimum throughput is 250.6 kpps respectively.
-
-There are no errors of packets received in the five runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the five
-runs have the similar average vaue, that is between 17 ms and 22 ms, of which
-the worest RTT is 53 ms on Sep. 14th.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and 10 percent respectively. And the best result is obtained on Sep.
-13rd, with an CPU load of 10 percent.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 21.6 GB/s to 26.8 GB/s and then to 18.4 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is 8 kb and 16 kb, the memory write bandwidth
-look similar with a minimal BW of 23.0 GB/s and peak value of 28.6 GB/s. And
-then with the block size becoming larger, the memory write bandwidth tends to
-decrease. SLA sets to be 7 GB/s. The SLA value is used as a a reference, it has
-not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the five test runs
-look similar with each other, and within these test runs, the maximum RTT can
-reach 53 ms and the average RTT is usually approx. 18 ms. The network latency
-tested on Sep. 14 shows that it has a peak latency of 53 ms. But on the whole,
-the average RTTs of the five runs keep flat and the network latency is
-relatively short.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 272 MiB on Sep
-14. In general, the mean used memory of the five test runs have the similar
-trend and the minimum memory used size is approx. 150 MiB, and the average
-used memory size is about 250 MiB. On the other hand, for the mean free memory,
-the five test runs have the similar trend, whose mean free memory change from
-218 MiB to 342 MiB, with an average value of approx. 38 MiB.
-
-Packet throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-packet throughput of the five test runs seem quite different, ranging from
-285.29 kpps to 297.76 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding packet throughput
-differ between 250.6k and 398.9k with an average packet throughput between
-277.2 K and 318.4 K. In summary, the PPS results seem consistent. Within each
-test run of the five runs, when number of flows becomes larger, the packet
-throughput seems not larger at the same time.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the five test runs
-look similar with each other. Within each test run, the maximum RTT is only 49
-ms and the average RTT is usually approx. 20 ms. On the whole, the average
-RTTs of the five runs keep stable and the network latency is relatively short.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool.The largest
-cache size is 215 MiB in the four runs, and the smallest cache size is 95 MiB.
-On the whole, the average cache size of the five runs change a little and is
-about 200 MiB, except the one on Sep. 14th, the mean cache size is very small,
-which keeps 102 MiB. Meanwhile, the tread of the buffer size keep flat, since
-they have a minimum value of 7 MiB and a maximum value of 8 MiB, with an
-average value of about 7.8 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs seem quite different, ranging from 285.29 kpps to 297.76
-kpps. The average number of flows in these tests is 239.7k, and each run has a
-minimum number of flows of 2 and a maximum number of flows of 1.001 Mil. At the
-same time, the corresponding packet throughput differ between 227.3k and 398.9k
-with an average packet throughput between 277.2k and 318.4k. Within each test
-run of the five runs, when number of flows becomes larger, the packet
-throughput seems not larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
- between 0 ms and 49 ms with an average leatency of less than 22 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the five runs differ from 250.6 kpps to 398.9 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar for four test runs, whose values change a
-lot from 10 pps to 399 kpps, except the one on Sep. 14th, whose total number
-of transmitted per second keep stable, that is 10 pps. Similarly, the total
-number of packets received per second look the same for four runs, except the
-one on Sep. 14th, whose value is only 10 pps.
-
-In some test runs when running with less than approx. 90000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 250000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Intel POD6_ with:
-Joid
-OpenStack Mitaka
-Onos Goldeneye
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
diff --git a/docs/release/results/os-onos-sfc-ha.rst b/docs/release/results/os-onos-sfc-ha.rst
deleted file mode 100644
index e52ae3d55..000000000
--- a/docs/release/results/os-onos-sfc-ha.rst
+++ /dev/null
@@ -1,517 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-
-
-===============================
-Test Results for os-onos-sfc-ha
-===============================
-
-.. toctree::
- :maxdepth: 2
-
-
-fuel
-====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD2: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Ericsson POD2_ or LF POD2_ between September 5 and 10 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 0.5 and 0.6 ms.
-A few runs start with a 1 - 1.5 ms RTT spike (This could be because of normal ARP
-handling). One test run has a greater RTT spike of 1.9 ms, which is the same
-one with the 0.7 ms average. The other runs have no similar spike at all.
-To be able to draw conclusions more runs should be made.
-SLA set to 10 ms. The SLA value is used as a reference, it has not
-been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth looks similar between different dates, with an
-average between approx. 170 and 200 MB/s. Within each test run the results
-vary, with a minimum 2 MB/s and maximum 838 MB/s on the totality. Most runs
-have a minimum BW of 3 MB/s (two runs at 2 MB/s). The maximum BW varies more in
-absolute numbers between the dates, between 617 and 838 MB/s.
-SLA set to 400 MB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC010
------
-The measurements for memory latency are similar between test dates and result
-in approx. 1.2 ns. The variations within each test run are similar, between
-1.215 and 1.219 ns. One exception is February 16, where the average is 1.222
-and varies between 1.22 and 1.28 ns.
-SLA set to 30 ns. The SLA value is used as a reference, it has not been defined
-by OPNFV.
-
-TC011
------
-Packet delay variation between 2 VMs on different blades is measured using
-Iperf3. On the first date the reported packet delay variation varies between
-0.0025 and 0.011 ms, with an average delay variation of 0.0067 ms.
-On the second date the delay variation varies between 0.002 and 0.006 ms, with
-an average delay variation of 0.004 ms.
-
-TC012
------
-Between test dates, the average measurements for memory bandwidth vary between
-17.4 and 17.9 GB/s. Within each test run the results vary more, with a minimal
-BW of 16.4 GB/s and maximum of 18.2 GB/s on the totality.
-SLA set to 15 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC014
------
-The Unixbench processor test run results vary between scores 3080 and 3240,
-one result each date. The average score on the total is 3150.
-No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-CPU utilization statistics are collected during UDP flows sent between the VMs
-using pktgen as packet generator tool. The average measurements for CPU
-utilization ratio vary between 1% to 2%. The peak of CPU utilization ratio
-appears around 7%.
-
-TC069
------
-Between test dates, the average measurements for memory bandwidth vary between
-15.5 and 25.4 GB/s. Within each test run the results vary more, with a minimal
-BW of 9.7 GB/s and maximum of 29.5 GB/s on the totality.
-SLA set to 6 GB/s. The SLA value is used as a reference, it has not been
-defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Memory utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for memory
-utilization vary between 225MB to 246MB. The peak of memory utilization appears
-around 340MB.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Cache utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The average measurements for cache
-utilization vary between 205MB to 212MB.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs at
-approx. 15 ms. Some test runs show an increase with many flows, in the range
-towards 16 to 17 ms. One exception standing out is Feb. 15 where the average
-RTT is stable at approx. 13 ms. The PPS results are not as consistent as the
-RTT results.
-In some test runs when running with less than approx. 10000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. Around 20 percent decrease in the worst
-case. For the other test runs there is however no significant change to the PPS
-throughput when the number of flows are increased. In some test runs the PPS
-is also greater with 1000000 flows compared to other test runs where the PPS
-result is less with only 2 flows.
-
-The average PPS throughput in the different runs varies between 414000 and
-452000 PPS. The total amount of packets in each test run is approx. 7500000 to
-8200000 packets. One test run Feb. 15 sticks out with a PPS average of
-558000 and approx. 1100000 packets in total (same as the on mentioned earlier
-for RTT results).
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally range between 100 and 1000 per test run,
-but there are spikes in the range of 10000 lost packets as well, and even
-more in a rare cases.
-
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. Total number of packets received per
-second was average on 200 kpps and total number of packets transmitted per
-second was average on 600 kpps.
-
-Detailed test results
----------------------
-The scenario was run on Ericsson POD2_ and LF POD2_ with:
-Fuel 9.0
-OpenStack Mitaka
-Onos Goldeneye
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
-Conclusions and recommendations
--------------------------------
-The pktgen test configuration has a relatively large base effect on RTT in
-TC037 compared to TC002, where there is no background load at all. Approx.
-15 ms compared to approx. 0.5 ms, which is more than a 3000 percentage
-difference in RTT results.
-Especially RTT and throughput come out with better results than for instance
-the *fuel-os-nosdn-nofeature-ha* scenario does. The reason for this should
-probably be further analyzed and understood. Also of interest could be
-to make further analyzes to find patterns and reasons for lost traffic.
-Also of interest could be to see if there are continuous variations where
-some test cases stand out with better or worse results than the general test
-case.
-
-
-Joid
-=====
-
-.. _Grafana: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
-.. _POD6: https://wiki.opnfv.org/pharos?&#community_test_labs
-
-Overview of test results
-------------------------
-
-See Grafana_ for viewing test result metrics for each respective test case. It
-is possible to chose which specific scenarios to look at, and then to zoom in
-on the details of each run test scenario as well.
-
-All of the test case results below are based on 4 scenario test runs, each run
-on the Intel POD6_ between September 8 and 11 in 2016.
-
-TC002
------
-The round-trip-time (RTT) between 2 VMs on different blades is measured using
-ping. Most test run measurements result on average between 1.35 ms and 1.57 ms.
-Only one test run has reached greatest RTT spike of 2.58 ms. Meanwhile, the
-smallest network latency is 1.11 ms, which is obtained on Sep. 11st. In
-general, the average of network latency of the four test runs are between 1.35
-ms and 1.57 ms. SLA set to be 10 ms. The SLA value is used as a reference, it
-has not been defined by OPNFV.
-
-TC005
------
-The IO read bandwidth actually refers to the storage throughput, which is
-measured by fio and the greatest IO read bandwidth of the four runs is 175.4
-MB/s. The IO read bandwidth of the three runs looks similar, with an average
-between 43.7 and 56.3 MB/s, except one on Sep. 8, for its maximum storage
-throughput is only 107.9 MB/s. One of the runs has a minimum BW of 478 KM/s and
-other has a maximum BW of 168.6 MB/s. The SLA of read bandwidth sets to be
-400 MB/s, which is used as a reference, and it has not been defined by OPNFV.
-
-The results of storage IOPS for the four runs look similar with each other. The
-IO read times per second of the four test runs have an average value between
-978 per second and 1.20 K/s, and meanwhile, the minimum result is only 36 times
-per second.
-
-TC010
------
-The tool we use to measure memory read latency is lmbench, which is a series of
-micro benchmarks intended to measure basic operating system and hardware system
-metrics. The memory read latency of the four runs is between 1.164 ns and 1.244
-ns on average. The variations within each test run are quite different, some
-vary from a large range and others have a small change. For example, the
-largest change is on September 10, the memory read latency of which is ranging
-from 1.128 ns to 1.381 ns. However, the results on September 11 change very
-little. The SLA sets to be 30 ns. The SLA value is used as a reference, it has
-not been defined by OPNFV.
-
-TC011
------
-Iperf3 is a tool for evaluating the packet delay variation between 2 VMs on
-different blades. The reported packet delay variations of the four test runs
-differ from each other. In general, the packet delay of two runs look similar,
-for they both stay stable within each run. And the mean packet delay of them
-are 0.0772 ms and 0.0788 ms respectively. Of the four runs, the fourth has the
-worst result, because the packet delay reaches 0.0838 ms. The rest one has a
-large wide range from 0.0666 ms to 0.0798 ms. The SLA value sets to be 10 ms.
-The SLA value is used as a reference, it has not been defined by OPNFV.
-
-TC012
------
-Lmbench is also used to measure the memory read and write bandwidth, in which
-we use bw_mem to obtain the results. Among the four test runs, the trend of the
-memory bandwidth almost look similar, which all have a large wide range, and
-the minimum and maximum results are 9.02 GB/s and 18.14 GB/s. Here SLA set to
-be 15 GB/s. The SLA value is used as a reference, it has not been defined by
-OPNFV.
-
-TC014
------
-The Unixbench is used to evaluate the IaaS processing speed with regards to
-score of single cpu running and parallel running. It can be seen from the
-dashboard that the processing test results vary from scores 3395 to 3475, and
-there is only one result one date. No SLA set.
-
-TC037
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The mean packet throughput of the four test runs is between 362.1 kpps and
-363.5 kpps, of which the result of the third run is the highest. The RTT
-results of all the test runs keep flat at approx. 17 ms. It is obvious that the
-PPS results are not as consistent as the RTT results.
-
-The No. flows of the four test runs are 240 k on average and the PPS results
-look a little waved since the largest packet throughput is 418.1 kpps and the
-minimum throughput is 326.5 kpps respectively.
-
-There are no errors of packets received in the four runs, but there are still
-lost packets in all the test runs. The RTT values obtained by ping of the four
-runs have the similar average vaue, that is approx. 17 ms, of which the worst
-RTT is 39 ms on Sep. 11st.
-
-CPU load is measured by mpstat, and CPU load of the four test runs seem a
-little similar, since the minimum value and the peak of CPU load is between 0
-percent and nine percent respectively. And the best result is obtained on Sep.
-10, with an CPU load of nine percent.
-
-TC069
------
-With the block size changing from 1 kb to 512 kb, the memory write bandwidth
-tends to become larger first and then smaller within every run test, which
-rangs from 25.9 GB/s to 26.6 GB/s and then to 18.1 GB/s on average. Since the
-test id is one, it is that only the INT memory write bandwidth is tested. On
-the whole, when the block size is from 2 kb to 16 kb, the memory write
-bandwidth look similar with a minimal BW of 22.1 GB/s and peak value of 28.6
-GB/s. And then with the block size becoming larger, the memory write bandwidth
-tends to decrease. SLA sets to be 7 GB/s. The SLA value is used as a reference,
-it has not been defined by OPNFV.
-
-TC070
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other, and within these test runs, the maximum RTT can
-reach 39 ms and the average RTT is usually approx. 17 ms. The network latency
-tested on Sep. 11 shows that it has a peak latency of 39 ms. But on the whole,
-the average RTTs of the five runs keep flat and the network latency is
-relatively short.
-
-Memory utilization is measured by free, which can display amount of free and
-used memory in the system. The largest amount of used memory is 270 MiB on the
-first two runs. In general, the mean used memory of two test runs have very
-large memory utilization, which can reach 264 MiB on average. And the other two
-runs have a large wide range of memory usage with the minimum value of 150 MiB
-and the maximum value of 270 MiB. On the other hand, for the mean free memory,
-the four test runs have the similar trend with that of the mean used memory.
-In general, the mean free memory change from 220 MiB to 342 MiB.
-
-Packet throughput and packet loss can be measured by pktgen, which is a tool
-in the network for generating traffic loads for network experiments. The mean
-packet throughput of the four test runs seem quite different, ranging from
-326.5 kpps to 418.1 kpps. The average number of flows in these tests is
-240000, and each run has a minimum number of flows of 2 and a maximum number
-of flows of 1.001 Mil. At the same time, the corresponding packet throughput
-differ between 326.5 kpps and 418.1 kpps with an average packet throughput between
-361.7 kpps and 363.5 kpps. In summary, the PPS results seem consistent. Within each
-test run of the four runs, when number of flows becomes larger, the packet
-throughput seems not larger at the same time.
-
-TC071
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The network latency is measured by ping, and the results of the four test runs
-look similar with each other. Within each test run, the maximum RTT is only 47
-ms and the average RTT is usually approx. 15 ms. On the whole, the average
-RTTs of the four runs keep stable and the network latency is relatively small.
-
-Cache utilization is measured by cachestat, which can display size of cache and
-buffer in the system. Cache utilization statistics are collected during UDP
-flows sent between the VMs using pktgen as packet generator tool. The largest
-cache size is 214 MiB, which is same for the four runs, and the smallest cache
-size is 94 MiB. On the whole, the average cache size of the four runs look the
-same and is between 198 MiB and 207 MiB. Meanwhile, the tread of the buffer
-size keep flat, since they have a minimum value of 7 MiB and a maximum value of
-8 MiB, with an average value of about 7.9 MiB.
-
-Packet throughput can be measured by pktgen, which is a tool in the network for
-generating traffic loads for network experiments. The mean packet throughput of
-the four test runs seem quite the same, which is approx. 363 kpps. The average
-number of flows in these tests is 240k, and each run has a minimum number of
-flows of 2 and a maximum number of flows of 1.001 Mil. At the same time, the
-corresponding packet throughput differ between 327 kpps and 418 kpps with an
-average packet throughput of about 363 kpps. Within each test run of the four
-runs, when number of flows becomes larger, the packet throughput seems not
-larger in the meantime.
-
-TC072
------
-The amount of packets per second (PPS) and round trip times (RTT) between 2 VMs
-on different blades are measured when increasing the amount of UDP flows sent
-between the VMs using pktgen as packet generator tool.
-
-Round trip times and packet throughput between VMs can typically be affected by
-the amount of flows set up and result in higher RTT and less PPS throughput.
-
-The RTT results are similar throughout the different test dates and runs
-between 0 ms and 47 ms with an average leatency of less than 16 ms. The PPS
-results are not as consistent as the RTT results, for the mean packet
-throughput of the four runs differ from 361.7 kpps to 365.0 kpps.
-
-Network utilization is measured by sar, that is system activity reporter, which
-can display the average statistics for the time since the system was started.
-Network utilization statistics are collected during UDP flows sent between the
-VMs using pktgen as packet generator tool. The largest total number of packets
-transmitted per second look similar for two test runs, whose values change a
-lot from 10 pps to 432 kpps. While results of the other test runs seem the same
-and keep stable with the average number of packets transmitted per second of 10
-pps. However, the total number of packets received per second of the four runs
-look similar, which have a large wide range of 2 pps to 657 kpps.
-
-In some test runs when running with less than approx. 250000 flows the PPS
-throughput is normally flatter compared to when running with more flows, after
-which the PPS throughput decreases. For the other test runs there is however no
-significant change to the PPS throughput when the number of flows are
-increased. In some test runs the PPS is also greater with 250000 flows
-compared to other test runs where the PPS result is less with only 2 flows.
-
-There are lost packets reported in most of the test runs. There is no observed
-correlation between the amount of flows and the amount of lost packets.
-The lost amount of packets normally differs a lot per test run.
-
-Detailed test results
----------------------
-The scenario was run on Intel POD6_ with:
-Joid
-OpenStack Mitaka
-Onos Goldeneye
-OpenVirtualSwitch 2.5.90
-OpenDayLight Beryllium
-
-Rationale for decisions
------------------------
-Pass
-
-Conclusions and recommendations
--------------------------------
-Tests were successfully executed and metrics collected.
-No SLA was verified. To be decided on in next release of OPNFV.
-
diff --git a/docs/release/results/overview.rst b/docs/release/results/overview.rst
index b4a050545..9fd74797c 100644
--- a/docs/release/results/overview.rst
+++ b/docs/release/results/overview.rst
@@ -42,55 +42,31 @@ environment, features or test framework.
The list of scenarios supported by each installer can be described as follows:
-+-------------------------+---------+---------+---------+---------+
-| Scenario | Apex | Compass | Fuel | Joid |
-+=========================+=========+=========+=========+=========+
-| os-nosdn-nofeature-noha | | | X | X |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-nofeature-ha | X | X | X | X |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-nofeature-ha | X | X | X | X |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-nofeature-noha| | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l3-nofeature-ha | X | X | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l3-nofeature-noha| | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-onos-sfc-ha | X | X | X | X |
-+-------------------------+---------+---------+---------+---------+
-| os-onos-sfc-noha | | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-onos-nofeature-ha | X | X | X | X |
-+-------------------------+---------+---------+---------+---------+
-| os-onos-nofeature-noha | | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-sfc-ha | | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-sfc-noha | X | X | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-bgpvpn-ha | X | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-bgpvpn-noha | | X | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-kvm-ha | | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-kvm-noha | | X | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-ovs-ha | | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-ovs-noha | X | | X | |
-+-------------------------+---------+---------+---------+---------+
-| os-ocl-nofeature-ha | | | | |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-lxd-ha | | | | X |
-+-------------------------+---------+---------+---------+---------+
-| os-nosdn-lxd-noha | | | | X |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-fdio-noha | X | | | |
-+-------------------------+---------+---------+---------+---------+
-| os-odl_l2-moon-ha | | X | | |
-+-------------------------+---------+---------+---------+---------+
++-------------------------+------+---------+----------+------+------+-------+
+| Scenario | Apex | Compass | Fuel-arm | Fuel | Joid | Daisy |
++=========================+======+=========+==========+======+======+=======+
+| os-nosdn-nofeature-noha | X | | | | X | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-nofeature-ha | X | | X | X | X | X |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-noha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-ha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-bgpvpn-ha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-calipso-noha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-kvm-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl_l3-nofeature-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-sfc-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-nofeature-ha | | | | X | | X |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-ovs-ha | | | | X | | |
++-------------------------+------+---------+----------+------+------+-------+
To qualify for release, the scenarios must have deployed and been successfully
tested in four consecutive installations to establish stability of deployment
@@ -103,4 +79,4 @@ References
* IEEE Std 829-2008. "Standard for Software and System Test Documentation".
-* OPNFV Colorado release note for Yardstick.
+* OPNFV Fraser release note for Yardstick.
diff --git a/docs/release/results/results.rst b/docs/release/results/results.rst
index 04c6b9f87..c75f5ae94 100644
--- a/docs/release/results/results.rst
+++ b/docs/release/results/results.rst
@@ -2,13 +2,16 @@
.. License.
.. http://creativecommons.org/licenses/by/4.0
-Results listed by scenario
+Results listed by test cases
==========================
-The following sections describe the yardstick results as evaluated for the
-Colorado release scenario validation runs. Each section describes the
-determined state of the specific scenario as deployed in the Colorado
-release process.
+.. _TOM: https://wiki.opnfv.org/display/testing/R+post-processing+of+the+Yardstick+results
+
+
+The following sections describe the yardstick test case results as evaluated
+for the OPNFV Fraser release scenario validation runs. Each section describes
+the determined state of the specific test case as executed in the Fraser release
+process. All test date are analyzed using TOM_ tool.
Scenario Results
================
@@ -16,21 +19,22 @@ Scenario Results
.. _Dashboard: http://testresults.opnfv.org/grafana/dashboard/db/yardstick-main
.. _Jenkins: https://build.opnfv.org/ci/view/yardstick/
+
The following documents contain results of Yardstick test cases executed on
-OPNFV labs, triggered by OPNFV CI pipeline, documented per scenario.
+OPNFV labs, triggered by OPNFV CI pipeline, documented per test case.
.. toctree::
:maxdepth: 1
- os-nosdn-nofeature-ha.rst
- os-nosdn-nofeature-noha.rst
- os-odl_l2-nofeature-ha.rst
- os-odl_l2-bgpvpn-ha.rst
- os-odl_l2-sfc-ha.rst
- os-nosdn-kvm-ha.rst
- os-onos-nofeature-ha.rst
- os-onos-sfc-ha.rst
+ tc002-network-latency.rst
+ tc010-memory-read-latency.rst
+ tc011-packet-delay-variation.rst
+ tc012-memory-read-write-bandwidth.rst
+ tc014-cpu-processing-speed.rst
+ tc069-memory-write-bandwidth.rst
+ tc082-context-switches-under-load.rst
+ tc083-network-throughput-between-vm.rst
Test results of executed tests are avilable in Dashboard_ and logs in Jenkins_.
diff --git a/docs/release/results/tc002-network-latency.rst b/docs/release/results/tc002-network-latency.rst
new file mode 100644
index 000000000..722423473
--- /dev/null
+++ b/docs/release/results/tc002-network-latency.rst
@@ -0,0 +1,317 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+======================================
+Test results for TC002 network latency
+======================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC002 verifies that network latency is within acceptable boundaries when packets travel between hosts located on same or different compute blades.
+Ping packets (ICMP protocol's mandatory ECHO_REQUEST datagram) are sent from host VM to target VM(s) to elicit ICMP ECHO_RESPONSE.
+
+Metric: RTT (Round Trip Time)
+Unit: ms
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [0.214],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [0.309],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [0.3145],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [0.3585],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [0.3765],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [0.403],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [0.413],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [0.494],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [0.5715],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [0.5785],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [0.617],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [0.62],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [0.632],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [0.635],
+
+ "os-odl-bgpvpn-ha:lf-pod1:apex": [0.658],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [0.663],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [0.668],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [0.668],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [0.6815],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [0.7005],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [0.778],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [0.7825],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [0.7885],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [0.795],
+
+ "os-nosdn-ovs-noha:ericsson-virtual1:fuel": [0.8045],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [0.8335],
+
+ "os-nosdn-ovs-noha:ericsson-virtual3:fuel": [0.8755],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [0.8855],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [0.8895],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [0.901],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [0.956],
+
+ "os-nosdn-lxd-noha:intel-pod5:joid": [1.131],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [1.173],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [1.2015],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [1.204],
+
+ "os-nosdn-lxd-ha:intel-pod5:joid": [1.2245],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [1.2285],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [1.3055],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [1.309],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [1.313],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [1.319],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [1.3425],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [1.3475],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [1.348],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [1.432],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual9:compass": [1.442],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1.4505],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [1.497],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [1.504],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [1.519],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [1.5415],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [1.5785],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [1.604],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [1.61],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [1.633],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [1.6485],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [1.7085],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [1.71],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [1.7955],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [1.838],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [1.88],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [1.8975],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [1.923],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [1.944],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [1.968],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [1.986],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [2.0415],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [2.071],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [2.0855],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [2.1085],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [2.1135],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [2.234],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [2.294],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [2.304],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [2.378],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [2.397],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [2.472],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [2.603],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [2.635],
+
+ "os-odl-nofeature-noha:ericsson-virtual3:fuel": [2.9055],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [3.1295],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [3.337],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [3.634],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual1:fuel": [3.875],
+
+ "os-odl-nofeature-noha:ericsson-virtual1:fuel": [3.9655],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [3.9795]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc002_scenario.png
+ :width: 800px
+ :alt: TC002 influence of scenario
+
+{
+
+ "os-odl_l2-moon-ha": [0.3415],
+
+ "os-nosdn-ovs-ha": [0.3625],
+
+ "os-nosdn-ovs_dpdk-noha": [0.378],
+
+ "os-nosdn-ovs_dpdk-ha": [0.5265],
+
+ "os-nosdn-bar-noha": [0.632],
+
+ "os-odl-bgpvpn-ha": [0.658],
+
+ "os-ovn-nofeature-noha": [0.668],
+
+ "os-odl_l3-nofeature-ha": [0.8545],
+
+ "os-nosdn-ovs-noha": [0.8575],
+
+ "os-nosdn-bar-ha": [0.903],
+
+ "os-odl-sfc-ha": [1.127],
+
+ "os-nosdn-lxd-noha": [1.131],
+
+ "os-nosdn-nofeature-ha": [1.152],
+
+ "os-odl_l2-moon-noha": [1.1825],
+
+ "os-nosdn-lxd-ha": [1.2245],
+
+ "os-odl_l3-nofeature-noha": [1.337],
+
+ "os-odl-nofeature-ha": [1.352],
+
+ "os-odl-sfc-noha": [1.4255],
+
+ "os-nosdn-kvm-noha": [1.5045],
+
+ "os-nosdn-openbaton-ha": [1.5665],
+
+ "os-nosdn-nofeature-noha": [1.729],
+
+ "os-nosdn-kvm-ha": [1.7745],
+
+ "os-odl-nofeature-noha": [3.106]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc002_pod.png
+ :width: 800px
+ :alt: TC002 influence of the POD
+
+{
+
+ "huawei-pod2": [0.3925],
+
+ "lf-pod2": [0.5315],
+
+ "lf-pod1": [0.62],
+
+ "flex-pod2": [0.795],
+
+ "huawei-pod12": [0.87],
+
+ "intel-pod5": [1.25],
+
+ "ericsson-virtual3": [1.2655],
+
+ "ericsson-pod1": [1.372],
+
+ "arm-pod5": [1.518],
+
+ "huawei-virtual4": [1.5355],
+
+ "huawei-virtual3": [1.606],
+
+ "intel-pod18": [1.6575],
+
+ "huawei-virtual8": [1.709],
+
+ "huawei-virtual2": [1.872],
+
+ "arm-pod6": [1.895],
+
+ "huawei-virtual9": [2.0745],
+
+ "huawei-virtual1": [2.495],
+
+ "ericsson-virtual2": [2.7895],
+
+ "ericsson-virtual4": [3.768],
+
+ "ericsson-virtual1": [3.8035]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc010-memory-read-latency.rst b/docs/release/results/tc010-memory-read-latency.rst
new file mode 100644
index 000000000..9a296b7a0
--- /dev/null
+++ b/docs/release/results/tc010-memory-read-latency.rst
@@ -0,0 +1,299 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+==========================================
+Test results for TC010 memory read latency
+==========================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC010 measures the memory read latency for varying memory sizes and strides.
+The test results shown below are for memory size of 16MB.
+
+Metric: Memory read latency
+Unit: ns
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [5.3165],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [5.908],
+
+ "os-nosdn-ovs-noha:ericsson-virtual1:fuel": [6.412],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [6.545],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [6.592],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [6.5975],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [6.7675],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [6.7675],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [6.7945],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [6.839],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [6.9695],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [7.123],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [7.289],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [7.4315],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [7.9],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [8.178],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [8.616],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [8.646],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [8.8615],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [8.87],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [8.877],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [8.892],
+
+ "os-nosdn-ovs-noha:ericsson-virtual3:fuel": [8.898],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [8.952],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [8.9745],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [9.0375],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [9.083],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [9.09],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [9.094],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [9.293],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [9.3525],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [9.477],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [9.5445],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [9.5575],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [9.6435],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [9.68],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [9.728],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [9.751],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [9.8645],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [9.969],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [10.029],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [10.088],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [10.2985],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [10.318],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [10.3215],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [10.617],
+
+ "os-odl-nofeature-noha:ericsson-virtual3:fuel": [10.762],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [10.7715],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [10.866],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [10.871],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [11.1605],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [11.227],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [11.348],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [11.453],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [11.571],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [11.5925],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [11.689],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [11.8695],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [12.199],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [12.433],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [12.713],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [15.328],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [15.4265],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [15.428],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [15.545],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [15.55],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [15.6395],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [15.696],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [15.774],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [16.6455],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [16.861],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [18.071],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [18.116],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [18.8365],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [18.927],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [29.557],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [32.492],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [37.623],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [41.345],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [42.3795],
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc010_scenario.png
+ :width: 800px
+ :alt: TC010 influence of scenario
+
+{
+
+ "os-nosdn-ovs-noha": [7.9],
+
+ "os-nosdn-ovs_dpdk-noha": [8.641],
+
+ "os-nosdn-ovs_dpdk-ha": [8.6815],
+
+ "os-nosdn-openbaton-ha": [8.882],
+
+ "os-odl_l2-moon-ha": [8.948],
+
+ "os-odl_l3-nofeature-ha": [8.992],
+
+ "os-nosdn-nofeature-ha": [9.118],
+
+ "os-nosdn-nofeature-noha": [9.174],
+
+ "os-odl_l2-moon-noha": [9.312],
+
+ "os-odl_l3-nofeature-noha": [9.5535],
+
+ "os-odl-nofeature-noha": [9.673],
+
+ "os-odl-sfc-noha": [9.8385],
+
+ "os-odl-sfc-ha": [9.98],
+
+ "os-nosdn-kvm-noha": [10.088],
+
+ "os-nosdn-kvm-ha": [11.1705],
+
+ "os-nosdn-bar-ha": [12.1395],
+
+ "os-nosdn-ovs-ha": [15.3195],
+
+ "os-ovn-nofeature-noha": [15.545],
+
+ "os-odl-nofeature-ha": [16.301],
+
+ "os-nosdn-bar-noha": [16.861]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc010_pod.png
+ :width: 800px
+ :alt: TC010 influence of the POD
+
+{
+
+ "ericsson-pod1": [5.7785],
+
+ "flex-pod2": [5.908],
+
+ "ericsson-virtual1": [6.412],
+
+ "intel-pod18": [6.5905],
+
+ "intel-pod5": [6.6975],
+
+ "ericsson-virtual4": [7.183],
+
+ "ericsson-virtual2": [8.4985],
+
+ "huawei-pod2": [8.877],
+
+ "huawei-pod12": [9.091],
+
+ "ericsson-virtual3": [9.719],
+
+ "huawei-virtual4": [10.1195],
+
+ "huawei-virtual3": [10.19],
+
+ "huawei-virtual1": [10.3045],
+
+ "huawei-virtual9": [10.318],
+
+ "huawei-virtual2": [11.274],
+
+ "lf-pod1": [15.7025],
+
+ "lf-pod2": [15.8495],
+
+ "arm-pod5": [18.092],
+
+ "huawei-virtual8": [33.999],
+
+ "arm-pod6": [41.5605]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc011-packet-delay-variation.rst b/docs/release/results/tc011-packet-delay-variation.rst
new file mode 100644
index 000000000..b07ea8980
--- /dev/null
+++ b/docs/release/results/tc011-packet-delay-variation.rst
@@ -0,0 +1,262 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+=============================================
+Test results for TC011 packet delay variation
+=============================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC011 measures the packet delay variation sending the packets from one VM to the other.
+
+Metric: packet delay variation (jitter)
+Unit: ms
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [2996],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [2996],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [2996],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [2996],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [2997],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [2997],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [2997],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [2997],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [2997],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [2997.5],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [2998],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [2998],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [2999],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [2999.5],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [3000],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [3001],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [3002],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [3002],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [3002],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [3002],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [3003],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [3003.5],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [3004],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [3004],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [3004.5],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [3005],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [3006],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [3006.5],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [3009],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [3010],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [3010],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [3012],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [3017],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [3017],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [3017],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [3018],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [3020],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [3021],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [3022],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [3022],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [3022],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [3022],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [3022],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [3022],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [3022],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [3022],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [3022],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [3022],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [3022],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [3022],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [3022],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [3022],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [3022],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [3022],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [3022],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [3022],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [3022],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [3022],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [3022],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [3023],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [3023],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [3024]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc011_scenario.png
+ :width: 800px
+ :alt: TC011 influence of scenario
+
+{
+
+ "os-nosdn-ovs_dpdk-noha": [2996],
+
+ "os-odl_l3-nofeature-noha": [2997],
+
+ "os-nosdn-kvm-noha": [2999],
+
+ "os-nosdn-ovs_dpdk-ha": [3002],
+
+ "os-nosdn-kvm-ha": [3014.5],
+
+ "os-odl-sfc-noha": [3018],
+
+ "os-nosdn-nofeature-noha": [3020],
+
+ "os-nosdn-openbaton-ha": [3020],
+
+ "os-nosdn-bar-ha": [3022],
+
+ "os-nosdn-bar-noha": [3022],
+
+ "os-nosdn-nofeature-ha": [3022],
+
+ "os-odl-nofeature-ha": [3022],
+
+ "os-odl-sfc-ha": [3022],
+
+ "os-odl_l2-moon-ha": [3022],
+
+ "os-odl_l2-moon-noha": [3022],
+
+ "os-odl_l3-nofeature-ha": [3022],
+
+ "os-ovn-nofeature-noha": [3022]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc011_pod.png
+ :width: 800px
+ :alt: TC011 influence of the POD
+
+{
+
+ "huawei-virtual2": [2997],
+
+ "flex-pod2": [2997.5],
+
+ "huawei-virtual3": [2998],
+
+ "huawei-virtual9": [3000],
+
+ "huawei-virtual8": [3001],
+
+ "huawei-virtual4": [3002],
+
+ "ericsson-virtual3": [3006],
+
+ "huawei-virtual1": [3007],
+
+ "ericsson-virtual2": [3009],
+
+ "intel-pod18": [3010],
+
+ "ericsson-virtual4": [3017],
+
+ "lf-pod2": [3021],
+
+ "arm-pod5": [3022],
+
+ "arm-pod6": [3022],
+
+ "ericsson-pod1": [3022],
+
+ "huawei-pod12": [3022],
+
+ "huawei-pod2": [3022],
+
+ "intel-pod5": [3022],
+
+ "lf-pod1": [3022]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc012-memory-read-write-bandwidth.rst b/docs/release/results/tc012-memory-read-write-bandwidth.rst
new file mode 100644
index 000000000..c28eb1f3c
--- /dev/null
+++ b/docs/release/results/tc012-memory-read-write-bandwidth.rst
@@ -0,0 +1,299 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+==================================================
+Test results for TC012 memory read/write bandwidth
+==================================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC012 measures the rate at which data can be read from and written to the memory (this includes all levels of memory).
+In this test case, the bandwidth to read data from memory and then write data to the same memory location are measured.
+
+Metric: memory bandwidth
+Unit: MBps
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [23126.325],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [23123.975],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [23068.965],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [22972.46],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [22912.015],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [22911.35],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [22900.93],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [22767.56],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [22721.83],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [22511.565],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [22071.235],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [21646.415],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [20229.99],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [17491.18],
+
+ "os-nosdn-ovs-noha:ericsson-virtual1:fuel": [17474.965],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [17141.375],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [17134.99],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [17124.27],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [16599.325],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [16309.13],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [16137.48],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [15960.76],
+
+ "os-nosdn-ovs-noha:ericsson-virtual3:fuel": [15685.505],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [15536.65],
+
+ "os-odl-nofeature-noha:ericsson-virtual3:fuel": [15431.795],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [15129.27],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [15125.51],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [15030.65],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [15019.89],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [15005.11],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [14975.645],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [14968.97],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [14968.97],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [14741.425],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [14714.28],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [14674.38],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [14664.12],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [14587.62],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [14539.94],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [14534.54],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [14511.925],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [14496.875],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [14378.87],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [14366.69],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [14356.695],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [14341.605],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [14327.78],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [14313.81],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [14284.365],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [14157.99],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [14144.86],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [14138.9],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [14117.7],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [14097.255],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [14085.675],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [14071.605],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [14059.51],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [14057.155],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [14051.945],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [14020.74],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [14017.915],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [13954.27],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [13915.87],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [13874.59],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [13812.215],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [13777.59],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [13765.36],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [13559.905],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [13477.52],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [13255.17],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [13189.64],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [12718.545],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [12559.445],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [12409.66],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [8832.515],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [8823.955],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [4398.08],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [4375.75],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [4260.77],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [4259.62]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc012_scenario.png
+ :width: 800px
+ :alt: TC012 influence of scenario
+
+{
+
+ "os-ovn-nofeature-noha": [22900.93],
+
+ "os-nosdn-bar-noha": [22721.83],
+
+ "os-nosdn-ovs-ha": [22063.67],
+
+ "os-odl-nofeature-ha": [17146.05],
+
+ "os-odl-nofeature-noha": [16017.41],
+
+ "os-nosdn-ovs-noha": [16005.74],
+
+ "os-nosdn-nofeature-noha": [15290.94],
+
+ "os-nosdn-nofeature-ha": [15038.74],
+
+ "os-nosdn-bar-ha": [14972.975],
+
+ "os-odl_l2-moon-ha": [14956.955],
+
+ "os-odl_l3-nofeature-ha": [14839.21],
+
+ "os-odl-sfc-ha": [14823.48],
+
+ "os-nosdn-ovs_dpdk-ha": [14822.17],
+
+ "os-nosdn-ovs_dpdk-noha": [14725.9],
+
+ "os-odl_l2-moon-noha": [14665.4],
+
+ "os-odl_l3-nofeature-noha": [14483.09],
+
+ "os-odl-sfc-noha": [14373.21],
+
+ "os-nosdn-openbaton-ha": [14135.325],
+
+ "os-nosdn-kvm-noha": [14020.26],
+
+ "os-nosdn-kvm-ha": [13996.02]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc012_pod.png
+ :width: 800px
+ :alt: TC012 influence of the POD
+
+{
+
+ "lf-pod1": [22912.39],
+
+ "lf-pod2": [22637.67],
+
+ "flex-pod2": [20229.99],
+
+ "ericsson-virtual1": [17474.965],
+
+ "ericsson-pod1": [17127.38],
+
+ "ericsson-virtual4": [16219.97],
+
+ "ericsson-virtual2": [15652.28],
+
+ "ericsson-virtual3": [15551.26],
+
+ "huawei-pod2": [15017.2],
+
+ "huawei-virtual4": [14266.34],
+
+ "huawei-virtual1": [14233.035],
+
+ "huawei-virtual3": [14227.63],
+
+ "huawei-pod12": [14147.245],
+
+ "intel-pod18": [14058.33],
+
+ "huawei-virtual2": [13862.85],
+
+ "intel-pod5": [13280.32],
+
+ "huawei-virtual9": [12559.445],
+
+ "huawei-virtual8": [8998.02],
+
+ "arm-pod5": [4388.875],
+
+ "arm-pod6": [4260.2]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc014-cpu-processing-speed.rst b/docs/release/results/tc014-cpu-processing-speed.rst
new file mode 100644
index 000000000..34d4ad0f9
--- /dev/null
+++ b/docs/release/results/tc014-cpu-processing-speed.rst
@@ -0,0 +1,298 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+===========================================
+Test results for TC014 cpu processing speed
+===========================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC014 measures score of single cpu running using UnixBench.
+
+Metric: score of single CPU running
+Unit: N/A
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-odl-sfc-noha:lf-pod1:apex": [3735.2],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [3725.5],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [3711],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [3708.4],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [3705.7],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [3704],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [3703.2],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [3702.8],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [3698.7],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [3654.8],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [3635.55],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [3633.2],
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [3450.3],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [3406.4],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [3360.4],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [3340.65],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [3208.6],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [3134.8],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [3056.2],
+
+ "os-nosdn-ovs-noha:ericsson-virtual1:fuel": [2988.9],
+
+ "os-nosdn-ovs-ha:ericsson-pod1:fuel": [2773.7],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [2645.85],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [2625.3],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [2601.3],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [2590.4],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [2570.2],
+
+ "os-nosdn-ovs-noha:ericsson-virtual3:fuel": [2558.8],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [2556.5],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [2554.6],
+
+ "os-odl-nofeature-noha:ericsson-virtual3:fuel": [2536.75],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [2533.55],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [2531.85],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [2531.7],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [2531.2],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [2531],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [2529.6],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [2520.5],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [2481.15],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [2474],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [2472.6],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [2471],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [2470.6],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [2464.15],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [2455.9],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [2455.3],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [2446.85],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [2444.75],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [2430.9],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [2421.3],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [2415.7],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [2399.4],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [2391.85],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [2391.45],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [2380.7],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [2379.6],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [2371.9],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [2364.6],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [2363.4],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [2362],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [2358.5],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [2358.45],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [2336],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [2326.6],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [2324.95],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [2320.2],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [2318.5],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [2312.8],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [2311.7],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [2301.15],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [2297.7],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [2232.8],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [2232.1],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [2230],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [2154],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [2150.1],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [2004.3],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [1754.5],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [1754.15],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [716.15],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [716.05]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc014_scenario.png
+ :width: 800px
+ :alt: TC014 influence of scenario
+
+{
+
+ "os-nosdn-ovs-ha": [3725.5],
+
+ "os-ovn-nofeature-noha": [3654.8],
+
+ "os-nosdn-bar-noha": [3633.2],
+
+ "os-odl-nofeature-ha": [3407.8],
+
+ "os-nosdn-ovs-noha": [2583.2],
+
+ "os-odl-nofeature-noha": [2578.9],
+
+ "os-nosdn-nofeature-noha": [2553.2],
+
+ "os-nosdn-nofeature-ha": [2532.8],
+
+ "os-odl_l2-moon-ha": [2530.5],
+
+ "os-nosdn-bar-ha": [2527],
+
+ "os-odl_l3-nofeature-ha": [2501.5],
+
+ "os-nosdn-ovs_dpdk-noha": [2473.65],
+
+ "os-odl-sfc-ha": [2472.9],
+
+ "os-odl_l2-moon-noha": [2470.8],
+
+ "os-nosdn-ovs_dpdk-ha": [2461.9],
+
+ "os-odl_l3-nofeature-noha": [2442.8],
+
+ "os-nosdn-kvm-noha": [2392.9],
+
+ "os-odl-sfc-noha": [2370.5],
+
+ "os-nosdn-kvm-ha": [2358.5],
+
+ "os-nosdn-openbaton-ha": [2231.8]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc014_pod.png
+ :width: 800px
+ :alt: TC014 influence of the POD
+
+{
+
+ "lf-pod2": [3723.95],
+
+ "lf-pod1": [3669],
+
+ "intel-pod5": [3388.6],
+
+ "intel-pod18": [3298.4],
+
+ "flex-pod2": [3208.6],
+
+ "ericsson-virtual1": [2988.9],
+
+ "ericsson-pod1": [2669.1],
+
+ "ericsson-virtual4": [2598.5],
+
+ "ericsson-virtual3": [2553.15],
+
+ "huawei-pod2": [2531.2],
+
+ "ericsson-virtual2": [2526.9],
+
+ "huawei-virtual4": [2407.4],
+
+ "huawei-virtual3": [2374.6],
+
+ "huawei-virtual2": [2326.4],
+
+ "huawei-virtual9": [2324.95],
+
+ "huawei-virtual1": [2302.6],
+
+ "huawei-pod12": [2232.2],
+
+ "huawei-virtual8": [2085.3],
+
+ "arm-pod5": [1754.4],
+
+ "arm-pod6": [716.15]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc069-memory-write-bandwidth.rst b/docs/release/results/tc069-memory-write-bandwidth.rst
new file mode 100644
index 000000000..06e2ec922
--- /dev/null
+++ b/docs/release/results/tc069-memory-write-bandwidth.rst
@@ -0,0 +1,300 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+=============================================
+Test results for TC069 memory write bandwidth
+=============================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC069 measures the maximum possible cache and memory performance while reading and writing certain
+blocks of data (starting from 1Kb and further in power of 2) continuously through ALU and FPU
+respectively. Measure different aspects of memory performance via synthetic simulations.
+Each simulation consists of four performances (Copy, Scale, Add, Triad).
+The test results shown below are for writing 32MB integer block size.
+
+Metric: memory write bandwidth
+Unit: MBps
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-noha:intel-pod18:joid": [20113.395],
+
+ "os-nosdn-openbaton-ha:intel-pod18:joid": [19183.58],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [17851.35],
+
+ "os-nosdn-nofeature-noha:intel-pod5:joid": [16312.37],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [15633.245],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [13332.065],
+
+ "os-odl-nofeature-ha:arm-pod6:fuel": [13327.02],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [9462.74],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [9384.585],
+
+ "os-odl-nofeature-ha:ericsson-pod1:fuel": [9235.98],
+
+ "os-nosdn-nofeature-noha:huawei-pod12:joid": [9213.6],
+
+ "os-nosdn-openbaton-ha:huawei-pod12:joid": [9152.18],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [9079.45],
+
+ "os-odl_l2-moon-ha:huawei-pod2:compass": [9071.13],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [9068.06],
+
+ "os-odl-sfc-ha:huawei-pod2:compass": [9031.24],
+
+ "os-odl_l3-nofeature-ha:huawei-pod2:compass": [9019.53],
+
+ "os-nosdn-bar-ha:huawei-pod2:compass": [8977.3],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-pod2:compass": [8960.635],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [8825.805],
+
+ "os-nosdn-kvm-ha:huawei-pod2:compass": [8282.75],
+
+ "os-odl_l2-moon-noha:huawei-virtual4:compass": [8116.33],
+
+ "os-nosdn-ovs-noha:ericsson-virtual4:fuel": [8083.97],
+
+ "os-odl_l2-moon-noha:huawei-virtual3:compass": [8083.52],
+
+ "os-nosdn-nofeature-noha:huawei-virtual3:compass": [7799.145],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual3:compass": [7776.12],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual3:compass": [7680.37],
+
+ "os-nosdn-ovs-noha:ericsson-virtual1:fuel": [7615.97],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual4:fuel": [7612.62],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual2:fuel": [7518.62],
+
+ "os-nosdn-nofeature-noha:huawei-virtual2:compass": [7489.67],
+
+ "os-nosdn-ovs-noha:ericsson-virtual2:fuel": [7478.57],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual4:compass": [7465.82],
+
+ "os-nosdn-kvm-noha:huawei-virtual3:compass": [7443.16],
+
+ "os-odl-nofeature-noha:ericsson-virtual4:fuel": [7442.855],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [7440.65],
+
+ "os-odl-sfc-noha:huawei-virtual4:compass": [7401.16],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [7389.505],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [7385.76],
+
+ "os-nosdn-nofeature-noha:huawei-virtual1:compass": [7382.345],
+
+ "os-odl_l2-moon-ha:huawei-virtual3:compass": [7286.385],
+
+ "os-odl_l3-nofeature-noha:huawei-virtual4:compass": [7272.06],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual4:compass": [7261.73],
+
+ "os-nosdn-nofeature-noha:ericsson-virtual3:fuel": [7253.64],
+
+ "os-odl-sfc-noha:huawei-virtual3:compass": [7247.89],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual2:compass": [7214.01],
+
+ "os-nosdn-ovs_dpdk-ha:huawei-virtual3:compass": [7207.39],
+
+ "os-nosdn-ovs_dpdk-noha:huawei-virtual4:compass": [7205.565],
+
+ "os-nosdn-ovs-noha:ericsson-virtual3:fuel": [7201.005],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [7132.835],
+
+ "os-odl-nofeature-noha:ericsson-virtual3:fuel": [7117.05],
+
+ "os-odl_l3-nofeature-ha:huawei-virtual3:compass": [7064.18],
+
+ "os-odl_l2-moon-ha:huawei-virtual4:compass": [6997.295],
+
+ "os-odl-nofeature-ha:lf-pod1:apex": [6992.21],
+
+ "os-odl-sfc-ha:huawei-virtual4:compass": [6975.63],
+
+ "os-odl-nofeature-noha:lf-pod1:apex": [6972.63],
+
+ "os-nosdn-nofeature-noha:lf-pod1:apex": [6955],
+
+ "os-ovn-nofeature-noha:lf-pod1:apex": [6954.5],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [6953.35],
+
+ "os-odl-sfc-noha:lf-pod1:apex": [6951.89],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [6932.29],
+
+ "os-nosdn-nofeature-noha:huawei-virtual4:compass": [6929.54],
+
+ "os-nosdn-kvm-noha:huawei-virtual4:compass": [6921.6],
+
+ "os-nosdn-ovs-ha:lf-pod2:fuel": [6913.355],
+
+ "os-odl-nofeature-ha:lf-pod2:fuel": [6848.58],
+
+ "os-odl-sfc-ha:lf-pod1:apex": [6818.74],
+
+ "os-nosdn-bar-noha:lf-pod1:apex": [6812.16],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [6808.18],
+
+ "os-odl-nofeature-noha:ericsson-virtual2:fuel": [6807.565],
+
+ "os-nosdn-bar-ha:lf-pod1:apex": [6774.76],
+
+ "os-nosdn-bar-ha:huawei-virtual4:compass": [6759.4],
+
+ "os-nosdn-nofeature-noha:huawei-virtual8:compass": [6756.9],
+
+ "os-nosdn-bar-ha:huawei-virtual3:compass": [6543.46],
+
+ "os-nosdn-kvm-ha:huawei-virtual3:compass": [6504.34],
+
+ "os-odl-sfc-ha:huawei-virtual3:compass": [6481.005],
+
+ "os-nosdn-kvm-ha:huawei-virtual4:compass": [6461.5],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [6152.375],
+
+ "os-odl-sfc-ha:huawei-virtual8:compass": [5941.7],
+
+ "os-nosdn-kvm-noha:huawei-virtual8:compass": [4564.515]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc069_scenario.png
+ :width: 800px
+ :alt: TC069 influence of scenario
+
+{
+
+ "os-nosdn-openbaton-ha": [9187.16],
+
+ "os-odl_l2-moon-ha": [9010.57],
+
+ "os-nosdn-nofeature-ha": [8886.75],
+
+ "os-odl_l3-nofeature-ha": [8779.67],
+
+ "os-odl_l2-moon-noha": [8114.995],
+
+ "os-nosdn-ovs_dpdk-ha": [7864.07],
+
+ "os-odl_l3-nofeature-noha": [7632.11],
+
+ "os-odl-sfc-ha": [7624.67],
+
+ "os-nosdn-nofeature-noha": [7470.66],
+
+ "os-odl-nofeature-ha": [7372.23],
+
+ "os-nosdn-ovs_dpdk-noha": [7311.54],
+
+ "os-odl-sfc-noha": [7300.56],
+
+ "os-nosdn-ovs-noha": [7280.005],
+
+ "os-odl-nofeature-noha": [7162.67],
+
+ "os-nosdn-kvm-ha": [7130.775],
+
+ "os-nosdn-kvm-noha": [7041.13],
+
+ "os-ovn-nofeature-noha": [6954.5],
+
+ "os-nosdn-ovs-ha": [6913.355],
+
+ "os-nosdn-bar-ha": [6829.17],
+
+ "os-nosdn-bar-noha": [6812.16]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc069_pod.png
+ :width: 800px
+ :alt: TC069 influence of the POD
+
+{
+
+ "intel-pod18": [18871.79],
+
+ "intel-pod5": [16055.79],
+
+ "arm-pod6": [13327.02],
+
+ "flex-pod2": [9384.585],
+
+ "ericsson-pod1": [9331.535],
+
+ "huawei-pod12": [9164.88],
+
+ "huawei-pod2": [9026.52],
+
+ "huawei-virtual9": [8825.805],
+
+ "ericsson-virtual1": [7615.97],
+
+ "ericsson-virtual4": [7539.23],
+
+ "arm-pod5": [7403.38],
+
+ "huawei-virtual3": [7247.89],
+
+ "huawei-virtual2": [7205.35],
+
+ "huawei-virtual1": [7196.405],
+
+ "ericsson-virtual3": [7173.72],
+
+ "huawei-virtual4": [7131.47],
+
+ "ericsson-virtual2": [7129.08],
+
+ "lf-pod1": [6928.18],
+
+ "lf-pod2": [6875.88],
+
+ "huawei-virtual8": [5729.705]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc082-context-switches-under-load.rst b/docs/release/results/tc082-context-switches-under-load.rst
new file mode 100644
index 000000000..d8a9f5493
--- /dev/null
+++ b/docs/release/results/tc082-context-switches-under-load.rst
@@ -0,0 +1,129 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+==================================================
+Test results for TC082 context switches under load
+==================================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC082 measures various software performance events using perf.
+The test results shown below are for context-switches.
+
+Metric: context switches
+Unit: N/A
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (lower is better):
+
+{
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [316],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [340],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [357.5],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [384],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [394.5],
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [435],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [476],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [518],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [863],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [871],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [1002],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [1174],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [1239],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [1430],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [1489],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [1883.5]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc082_scenario.png
+ :width: 800px
+ :alt: TC082 influence of scenario
+
+the influence of the scenario
+
+{
+
+ "os-nosdn-nofeature-ha": [505],
+
+ "os-odl-nofeature-ha": [863]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc082_pod.png
+ :width: 800px
+ :alt: TC082 influence of the POD
+
+the influence of the POD
+
+{
+
+ "huawei-pod12": [316],
+
+ "intel-pod18": [340],
+
+ "intel-pod5": [357.5],
+
+ "ericsson-pod1": [384],
+
+ "lf-pod2": [394.5],
+
+ "lf-pod1": [435],
+
+ "flex-pod2": [476],
+
+ "huawei-pod2": [518],
+
+ "arm-pod5": [869.5],
+
+ "huawei-virtual9": [1002],
+
+ "huawei-virtual4": [1174],
+
+ "huawei-virtual3": [1239],
+
+ "huawei-virtual2": [1430],
+
+ "huawei-virtual1": [1489],
+
+ "arm-pod6": [1883.5]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/release/results/tc083-network-throughput-between-vm.rst b/docs/release/results/tc083-network-throughput-between-vm.rst
new file mode 100644
index 000000000..f846571a5
--- /dev/null
+++ b/docs/release/results/tc083-network-throughput-between-vm.rst
@@ -0,0 +1,129 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+
+
+=====================================================
+Test results for TC083 network throughput between VMs
+=====================================================
+
+.. toctree::
+ :maxdepth: 2
+
+
+Overview of test case
+=====================
+
+TC083 measures network latency and throughput between VMs using netperf.
+The test results shown below are for UDP throughout.
+
+Metric: UDP stream throughput
+Unit: 10^6bits/s
+
+
+Euphrates release
+-----------------
+
+Test results per scenario and pod (higher is better):
+
+{
+
+ "os-nosdn-nofeature-ha:lf-pod1:apex": [2204.42],
+
+ "os-nosdn-nofeature-ha:intel-pod18:joid": [1835.55],
+
+ "os-nosdn-nofeature-ha:lf-pod2:fuel": [1676.705],
+
+ "os-nosdn-nofeature-ha:intel-pod5:joid": [1612.555],
+
+ "os-nosdn-nofeature-ha:flex-pod2:apex": [1370.23],
+
+ "os-nosdn-nofeature-ha:huawei-pod12:joid": [1300.12],
+
+ "os-nosdn-nofeature-ha:huawei-pod2:compass": [1070.455],
+
+ "os-nosdn-nofeature-ha:ericsson-pod1:fuel": [1004.32],
+
+ "os-nosdn-nofeature-ha:huawei-virtual9:compass": [753.46],
+
+ "os-nosdn-nofeature-ha:huawei-virtual4:compass": [735.07],
+
+ "os-odl-nofeature-ha:arm-pod5:fuel": [531.63],
+
+ "os-nosdn-nofeature-ha:huawei-virtual3:compass": [493.985],
+
+ "os-nosdn-nofeature-ha:arm-pod5:fuel": [448.82],
+
+ "os-nosdn-nofeature-ha:arm-pod6:fuel": [193.43],
+
+ "os-nosdn-nofeature-ha:huawei-virtual1:compass": [189.99],
+
+ "os-nosdn-nofeature-ha:huawei-virtual2:compass": [80.15]
+
+}
+
+
+The influence of the scenario
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc083_scenario.png
+ :width: 800px
+ :alt: TC083 influence of scenario
+
+the influence of the scenario
+
+{
+
+ "os-nosdn-nofeature-ha": [1109.12],
+
+ "os-odl-nofeature-ha": [531.63]
+
+}
+
+
+The influence of the POD
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/tc083_pod.png
+ :width: 800px
+ :alt: TC083 influence of the POD
+
+the influence of the POD
+
+{
+
+ "lf-pod1": [2204.42],
+
+ "intel-pod18": [1835.55],
+
+ "lf-pod2": [1676.705],
+
+ "intel-pod5": [1612.555],
+
+ "flex-pod2": [1370.23],
+
+ "huawei-pod12": [1300.12],
+
+ "huawei-pod2": [1070.455],
+
+ "ericsson-pod1": [1004.32],
+
+ "huawei-virtual9": [753.46],
+
+ "huawei-virtual4": [735.07],
+
+ "huawei-virtual3": [493.985],
+
+ "arm-pod5": [451.38],
+
+ "arm-pod6": [193.43],
+
+ "huawei-virtual1": [189.99],
+
+ "huawei-virtual2": [80.15]
+
+}
+
+
+Fraser release
+--------------
diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst
index dade49b75..04d5350be 100755
--- a/docs/testing/developer/devguide/devguide.rst
+++ b/docs/testing/developer/devguide/devguide.rst
@@ -432,7 +432,8 @@ Yardstick committers and contributors to review your codes.
:width: 800px
:alt: Gerrit for code review
-You can find Yardstick people info `here <https://wiki.opnfv.org/display/yardstick/People>`_.
+You can find a list Yardstick people `here <https://wiki.opnfv.org/display/yardstick/People>`_,
+or use the ``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit.
Modify the code under review in Gerrit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/testing/user/userguide/13-nsb-installation.rst b/docs/testing/user/userguide/13-nsb-installation.rst
index 1f6c79b0b..3e0ed0bfb 100644
--- a/docs/testing/user/userguide/13-nsb-installation.rst
+++ b/docs/testing/user/userguide/13-nsb-installation.rst
@@ -1058,40 +1058,8 @@ IxLoad
Config ``pod_ixia.yaml``
- .. code-block:: yaml
-
- nodes:
- -
- name: trafficgen_1
- role: IxNet
- ip: 1.2.1.1 #ixia machine ip
- user: user
- password: r00t
- key_filename: /root/.ssh/id_rsa
- tg_config:
- ixchassis: "1.2.1.7" #ixia chassis ip
- tcl_port: "8009" # tcl server port
- lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
- root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
- py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
- dut_result_dir: "/mnt/ixia"
- version: 8.1
- interfaces:
- xe0: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:5" # Card:port
- driver: "none"
- dpdk_port_num: 0
- local_ip: "152.16.100.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:10:64:14:00"
- xe1: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:6" # [(Card, port)]
- driver: "none"
- dpdk_port_num: 1
- local_ip: "152.40.40.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:28:28:14:00"
+ .. literalinclude:: code/pod_ixia.yaml
+ :language: console
for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
@@ -1113,10 +1081,10 @@ IxLoad
IxNetwork
---------
-1. Software needed: ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz``
- (Download from ixia support site)
- Install - ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz``
-2. Update pod_ixia.yaml file with ixia details.
+IxNetwork testcases use IxNetwork API Python Bindings module, which is
+installed as part of the requirements of the project.
+
+1. Update ``pod_ixia.yaml`` file with ixia details.
.. code-block:: console
@@ -1124,44 +1092,12 @@ IxNetwork
Config pod_ixia.yaml
- .. code-block:: yaml
-
- nodes:
- -
- name: trafficgen_1
- role: IxNet
- ip: 1.2.1.1 #ixia machine ip
- user: user
- password: r00t
- key_filename: /root/.ssh/id_rsa
- tg_config:
- ixchassis: "1.2.1.7" #ixia chassis ip
- tcl_port: "8009" # tcl server port
- lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
- root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
- py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
- dut_result_dir: "/mnt/ixia"
- version: 8.1
- interfaces:
- xe0: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:5" # Card:port
- driver: "none"
- dpdk_port_num: 0
- local_ip: "152.16.100.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:10:64:14:00"
- xe1: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:6" # [(Card, port)]
- driver: "none"
- dpdk_port_num: 1
- local_ip: "152.40.40.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:28:28:14:00"
+ .. literalinclude:: code/pod_ixia.yaml
+ :language: console
for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
-3. Start IxNetwork TCL Server
+2. Start IxNetwork TCL Server
You will also need to configure the IxNetwork machine to start the IXIA
IxNetworkTclServer. This can be started like so:
@@ -1170,6 +1106,5 @@ IxNetwork
``Start->Programs->Ixia->IxNetwork->IxNetwork 7.21.893.14 GA->IxNetworkTclServer``
(or ``IxNetworkApiServer``)
-4. Execute testcase in samplevnf folder e.g.
+3. Execute testcase in samplevnf folder e.g.
``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml``
-
diff --git a/docs/testing/user/userguide/code/pod_ixia.yaml b/docs/testing/user/userguide/code/pod_ixia.yaml
new file mode 100644
index 000000000..4ab56fe4e
--- /dev/null
+++ b/docs/testing/user/userguide/code/pod_ixia.yaml
@@ -0,0 +1,31 @@
+nodes:
+-
+ name: trafficgen_1
+ role: IxNet
+ ip: 1.2.1.1 #ixia machine ip
+ user: user
+ password: r00t
+ key_filename: /root/.ssh/id_rsa
+ tg_config:
+ ixchassis: "1.2.1.7" #ixia chassis ip
+ tcl_port: "8009" # tcl server port
+ lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
+ root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
+ py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
+ dut_result_dir: "/mnt/ixia"
+ version: 8.1
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "2:5" # Card:port
+ driver: "none"
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:10:64:14:00"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "2:6" # [(Card, port)]
+ driver: "none"
+ dpdk_port_num: 1
+ local_ip: "152.40.40.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:28:28:14:00"
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc088.rst b/docs/testing/user/userguide/opnfv_yardstick_tc088.rst
new file mode 100644
index 000000000..2423a6b31
--- /dev/null
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc088.rst
@@ -0,0 +1,129 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC088
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node Openstack Service High Availability - Nova Scheduler |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC088: Control node Openstack service down - |
+| | nova scheduler |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the high availability of the |
+| | compute scheduler service provided by OpenStack (nova- |
+| | scheduler) on control node. |
+| | |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of nova-scheduler service |
+| | on a selected control node, then checks whether the request |
+| | of the related OpenStack command is OK and the killed |
+| | processes are recovered. |
+| | |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the specified |
+| | OpenStack service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | In this case. This parameter should always set to "nova- |
+| | scheduler". |
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | e.g. |
+| | -fault_type: "kill-process" |
+| | -process_name: "nova-scheduler" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, one kind of monitor is needed: |
+| | 1. the "process" monitor check whether a process is running |
+| | on a specific node, which needs three parameters: |
+| | 1) monitor_type: which used for finding the monitor class and|
+| | related scripts. It should be always set to "process" |
+| | for this monitor. |
+| | 2) process_name: which is the process name for monitor |
+| | 3) host: which is the name of the node running the process |
+| | |
+| | e.g. |
+| | monitor: |
+| | -monitor_type: "process" |
+| | -process_name: "nova-scheduler" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|operations | In this test case, the following operations are needed: |
+| | 1. "nova-create-instance": create a VM instance to check |
+| | whether the nova-scheduler works normally. |
+| | |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are one metric: |
+| | 1)process_recover_time: which indicates the maximum time |
+| | (seconds) from the process being killed to recovered |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Developed by the project. Please see folder: |
+| | "yardstick/benchmark/scenarios/availability/ha_tools" |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file: opnfv_yardstick_tc088.yaml |
+| | -Attackers: see above "attackers" description |
+| | -waiting_time: which is the time (seconds) from the process |
+| | being killed to stopping monitors the monitors |
+| | -Monitors: see above "monitors" description |
+| | -SLA: see above "metrics" description |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | create a new instance to check whether the nova scheduler |
+| | works normally. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | stop the monitor after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|post-action | It is the action when the test cases exist. It will check the|
+| | status of the specified process on the host, and restart the |
+| | process if it is not running for next test cases |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc089.rst b/docs/testing/user/userguide/opnfv_yardstick_tc089.rst
new file mode 100644
index 000000000..0a8b2570b
--- /dev/null
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc089.rst
@@ -0,0 +1,129 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC089
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node Openstack Service High Availability - Nova Conductor |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC089: Control node Openstack service down - |
+| | nova conductor |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the high availability of the |
+| | compute database proxy service provided by OpenStack (nova- |
+| | conductor) on control node. |
+| | |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of nova-conductor service |
+| | on a selected control node, then checks whether the request |
+| | of the related OpenStack command is OK and the killed |
+| | processes are recovered. |
+| | |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the specified |
+| | OpenStack service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | In this case. This parameter should always set to "nova- |
+| | conductor". |
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | e.g. |
+| | -fault_type: "kill-process" |
+| | -process_name: "nova-conductor" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, one kind of monitor is needed: |
+| | 1. the "process" monitor check whether a process is running |
+| | on a specific node, which needs three parameters: |
+| | 1) monitor_type: which used for finding the monitor class and|
+| | related scripts. It should be always set to "process" |
+| | for this monitor. |
+| | 2) process_name: which is the process name for monitor |
+| | 3) host: which is the name of the node running the process |
+| | |
+| | e.g. |
+| | monitor: |
+| | -monitor_type: "process" |
+| | -process_name: "nova-conductor" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|operations | In this test case, the following operations are needed: |
+| | 1. "nova-create-instance": create a VM instance to check |
+| | whether the nova-conductor works normally. |
+| | |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are one metric: |
+| | 1)process_recover_time: which indicates the maximum time |
+| | (seconds) from the process being killed to recovered |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Developed by the project. Please see folder: |
+| | "yardstick/benchmark/scenarios/availability/ha_tools" |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file: opnfv_yardstick_tc089.yaml |
+| | -Attackers: see above "attackers" description |
+| | -waiting_time: which is the time (seconds) from the process |
+| | being killed to stopping monitors the monitors |
+| | -Monitors: see above "monitors" description |
+| | -SLA: see above "metrics" description |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | create a new instance to check whether the nova conductor |
+| | works normally. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | stop the monitor after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|post-action | It is the action when the test cases exist. It will check the|
+| | status of the specified process on the host, and restart the |
+| | process if it is not running for next test cases |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/etc/yardstick/nodes/apex_baremetal/pod.yaml b/etc/yardstick/nodes/apex_baremetal/pod.yaml
new file mode 100644
index 000000000..4b058c499
--- /dev/null
+++ b/etc/yardstick/nodes/apex_baremetal/pod.yaml
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: ip1
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node2
+ role: Controller
+ ip: ip2
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node3
+ role: Controller
+ ip: ip3
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node4
+ role: Compute
+ ip: ip4
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node5
+ role: Compute
+ ip: ip5
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node6
+ role: Opendaylight-Cluster-Leader
+ ip: ip6
+ user: heat-admin
+ key_filename: node_keyfile
diff --git a/etc/yardstick/nodes/apex_virtual/pod.yaml b/etc/yardstick/nodes/apex_virtual/pod.yaml
new file mode 100644
index 000000000..59b51d224
--- /dev/null
+++ b/etc/yardstick/nodes/apex_virtual/pod.yaml
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: 192.0.2.15
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node2
+ role: Controller
+ ip: 192.0.2.4
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node3
+ role: Controller
+ ip: 192.0.2.6
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node4
+ role: Compute
+ ip: 192.0.2.10
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node5
+ role: Compute
+ ip: 192.0.2.14
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
diff --git a/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia b/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
index 57a83058e..1f755dc4e 100644
--- a/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
+++ b/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
@@ -26,7 +26,6 @@ nodes:
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/ixia"
version: 8.1
interfaces:
diff --git a/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml b/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
index 7250c4ce3..ef63ea04c 100644
--- a/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
+++ b/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
@@ -26,13 +26,12 @@ nodes:
user: {{gen.user}}
password: {{gen.password}}
key_filename: {{gen.key_filename}}
- tg_config:
+ tg_config:
ixchassis: "{{gen.tg_config.ixchassis}}" #ixia chassis ip
tcl_port: "{{gen.tg_config.tcl_port}}" # tcl server port
lib_path: "{{gen.tg_config.lib_path}}"
root_dir: "{{gen.tg_config.root_dir}}"
py_bin_path: "{{gen.tg_config.py_bin_path}}"
- py_lib_path: "{{gen.tg_config.py_lib_path}}"
dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
version: "{{gen.tg_config.version}}"
interfaces:
diff --git a/etc/yardstick/nodes/standalone/ixia_template.yaml b/etc/yardstick/nodes/standalone/ixia_template.yaml
index 617a65162..98ed8c5c2 100644
--- a/etc/yardstick/nodes/standalone/ixia_template.yaml
+++ b/etc/yardstick/nodes/standalone/ixia_template.yaml
@@ -32,7 +32,6 @@ nodes:
lib_path: "{{gen.tg_config.lib_path}}"
root_dir: "{{gen.tg_config.root_dir}}"
py_bin_path: "{{gen.tg_config.py_bin_path}}"
- py_lib_path: "{{gen.tg_config.py_lib_path}}"
dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
version: "{{gen.tg_config.version}}"
interfaces:
diff --git a/nsb_setup.sh b/nsb_setup.sh
index 50fc017d1..86796c4d4 100755
--- a/nsb_setup.sh
+++ b/nsb_setup.sh
@@ -67,8 +67,16 @@ pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6
ANSIBLE_SCRIPTS="ansible"
+if [[ -n ${1} ]]; then
+ yardstick_docker_image="-e yardstick_docker_image=${1}"
+else
+ yardstick_docker_image=""
+fi
+
+# no quotes for yardstick_docker_image so when empty it is removed as whitespace
cd ${ANSIBLE_SCRIPTS} &&\
ansible-playbook \
-e img_property="nsb" \
+ ${yardstick_docker_image} \
-e YARD_IMG_ARCH='amd64' ${extra_args}\
-i yardstick-install-inventory.ini nsb_setup.yml
diff --git a/requirements.txt b/requirements.txt
index 43f0e796c..4679bc8fb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -26,6 +26,7 @@ flask==0.11.1 # BSD; OSI Approved BSD License
functools32==3.2.3.post2; python_version <= "2.7" # PSF license
futures==3.1.1;python_version=='2.7' # BSD; OSI Approved BSD License
influxdb==4.1.1 # MIT License; OSI Approved MIT License
+IxNetwork==8.40.1124.9 # MIT License; OSI Approved MIT License
jinja2schema==0.1.4 # OSI Approved BSD License
keystoneauth1==3.1.0 # OSI Approved Apache Software License
kubernetes==3.0.0a1 # OSI Approved Apache Software License
diff --git a/samples/test_suite.yaml b/samples/test_suite.yaml
index 9a766b06a..6f5f53b46 100644
--- a/samples/test_suite.yaml
+++ b/samples/test_suite.yaml
@@ -20,7 +20,8 @@ test_cases:
file_name: ping.yaml
-
file_name: ping-template.yaml
- task_args: '{"packetsize": "200"}'
+ task_args:
+ default: '{"packetsize": "200"}'
-
file_name: ping-template.yaml
task_args_file: "/tmp/test-args-file.json"
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
index 192f2f89a..ba0055321 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
@@ -49,7 +49,7 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 2]
@@ -59,7 +59,7 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 3]
@@ -67,12 +67,10 @@ name=rec 0
task=0
mode=lat
rx port=p0
-lat pos=38
[core 4]
name=rec 0
task=0
mode=lat
rx port=p1
-lat pos=38
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
index 0db21b681..41c31bf82 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
@@ -61,7 +61,7 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 2]
@@ -71,7 +71,7 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 3]
@@ -81,7 +81,7 @@ mode=gen
tx port=p2
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac2} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac2} 3c fd fe 9f a5 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 4]
@@ -91,7 +91,7 @@ mode=gen
tx port=p3
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac3} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac3} 3c fd fe 9f a5 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 5]
@@ -99,25 +99,22 @@ name=rec 0
task=0
mode=lat
rx port=p0
-lat pos=38
[core 6]
name=rec 1
task=0
mode=lat
rx port=p1
-lat pos=38
[core 7]
name=rec 2
task=0
mode=lat
rx port=p2
-lat pos=38
[core 8]
name=rec 3
task=0
mode=lat
rx port=p3
-lat pos=38 \ No newline at end of file
+
diff --git a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
index 9b2a152f3..aca7c2102 100644
--- a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
+++ b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
py_bin_path: '{{tg_config.py_bin_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
diff --git a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
index ad4953fce..0324bb8fd 100644
--- a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
+++ b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
py_bin_path: '{{tg_config.py_bin_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
diff --git a/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml b/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
index ffbfbdec0..def8cdcef 100644
--- a/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
+++ b/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
@@ -28,7 +28,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
vdu:
diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh
index d7c60d48f..8b9f887b2 100755
--- a/tests/ci/prepare_env.sh
+++ b/tests/ci/prepare_env.sh
@@ -16,6 +16,7 @@
: ${EXTERNAL_NETWORK:='admin_floating_net'}
: ${USER_NAME:='ubuntu'}
: ${SSH_KEY:='/root/.ssh/id_rsa'}
+: ${DEPLOY_SCENARIO:='unknown'}
# Extract network name from EXTERNAL_NETWORK
# e.g. EXTERNAL_NETWORK='ext-net;flat;192.168.0.2;192.168.0.253;192.168.0.1;192.168.0.0/24'
@@ -62,7 +63,73 @@ verify_connectivity() {
}
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+ # check the connection
+ verify_connectivity "${INSTALLER_IP}"
+
+ pod_yaml="$YARDSTICK_REPO_DIR/etc/yardstick/nodes/apex_baremetal/pod.yaml"
+
+ # update "ip" according to the CI env
+ ssh -l root "${INSTALLER_IP}" -i ${SSH_KEY} ${ssh_options} \
+ "source /home/stack/stackrc && openstack server list -f yaml" > node_info
+
+ controller_ips=($(awk '/control/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'))
+ compute_ips=($(awk '/compute/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'))
+ odl_ip=""
+ # Get ODL's cluster default module-shard Leader IP in HA scenario
+ if [[ ${DEPLOY_SCENARIO} == os-odl-*-ha ]]; then
+ for ip in "${controller_ips[@]}";
+ do
+ if [[ "$odl_ip" ]]; then
+ break
+ fi
+ for ((i=0; i<${#controller_ips[@]}; i++));
+ do
+ ODL_STATE=$(curl -s -u admin:admin -H "Accept: application/json" -H "Content-Type: application/json" \
+ "http://"${ip}":8081/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-"${i}"-shard-default-operational,type=DistributedOperationalDatastore" \
+ | grep -o \"RaftState\"\:\"Leader\" | tr ":" "\n" | sed -n '2p' | sed 's/\"//g');
+
+ if [[ ${ODL_STATE} == "Leader" ]]; then
+ odl_ip=${ip}
+ break
+ fi
+ done;
+ done
+
+ if [[ -z "$odl_ip" ]]; then
+ echo "ERROR: Opendaylight Leader IP is emtpy"
+ exit 1
+ fi
+
+ elif [[ ${DEPLOY_SCENARIO} == *"odl"* ]]; then
+ odl_ip=${controller_ips[0]}
+ fi
+
+ if [[ ${controller_ips[0]} ]]; then
+ sed -i "s|ip1|${controller_ips[0]}|" "${pod_yaml}"
+ fi
+ if [[ ${controller_ips[1]} ]]; then
+ sed -i "s|ip2|${controller_ips[1]}|" "${pod_yaml}"
+ fi
+ if [[ ${controller_ips[2]} ]]; then
+ sed -i "s|ip3|${controller_ips[2]}|" "${pod_yaml}"
+ fi
+ if [[ ${compute_ips[0]} ]]; then
+ sed -i "s|ip4|${compute_ips[0]}|" "${pod_yaml}"
+ fi
+ if [[ ${compute_ips[1]} ]]; then
+ sed -i "s|ip5|${compute_ips[1]}|" "${pod_yaml}"
+ fi
+ if [[ ${odl_ip} ]]; then
+ sed -i "s|ip6|${odl_ip}|" "${pod_yaml}"
+ fi
+
+
+ # update 'key_filename' according to the CI env
+ sed -i "s|node_keyfile|${SSH_KEY}|" "${pod_yaml}"
+
+fi
if [ "$INSTALLER_TYPE" == "fuel" ]; then
# check the connection
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc088.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc088.yaml
new file mode 100644
index 000000000..c2f1cbe33
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc088.yaml
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC088 config file;
+ HA test case: Control node Openstack service down - nova scheduler
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "nova-scheduler" %}
+{% set inst_param = inst_param or "tc088 yardstick-image yardstick-flavor internal-network" %}
+{% set inst_name = inst_name or "tc088" %}
+
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ host: {{attack_host}}
+ key: "kill-process"
+ process_name: "{{ attack_process }}"
+
+ monitors:
+ -
+ monitor_type: "process"
+ key: "service-status"
+ process_name: "{{ attack_process }}"
+ host: {{attack_host}}
+ monitor_time: 30
+ monitor_number: 3
+ sla:
+ max_recover_time: 30
+
+ operations:
+ -
+ operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: {{inst_param}}
+ rollback_parameter:
+ serverconfig: {{inst_name}}
+
+ steps:
+ -
+ actionKey: "kill-process"
+ actionType: "attacker"
+ index: 1
+ -
+ actionKey: "service-status"
+ actionType: "monitor"
+ index: 2
+ -
+ actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 3
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: {{file}}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc089.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc089.yaml
new file mode 100644
index 000000000..d10650e03
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc089.yaml
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC089 config file;
+ HA test case: Control node Openstack service down - nova conductor
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "nova-conductor" %}
+{% set inst_param = inst_param or "tc089 yardstick-image yardstick-flavor internal-network" %}
+{% set inst_name = inst_name or "tc089" %}
+
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ host: {{attack_host}}
+ key: "kill-process"
+ process_name: "{{ attack_process }}"
+
+ monitors:
+ -
+ monitor_type: "process"
+ key: "service-status"
+ process_name: "{{ attack_process }}"
+ host: {{attack_host}}
+ monitor_time: 30
+ monitor_number: 3
+ sla:
+ max_recover_time: 30
+
+ operations:
+ -
+ operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: {{inst_param}}
+ rollback_parameter:
+ serverconfig: {{inst_name}}
+
+ steps:
+ -
+ actionKey: "kill-process"
+ actionType: "attacker"
+ index: 1
+ -
+ actionKey: "service-status"
+ actionType: "monitor"
+ index: 2
+ -
+ actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 3
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: {{file}}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
new file mode 100644
index 000000000..85ec510df
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
@@ -0,0 +1,276 @@
+##############################################################################
+## Copyright (c) 2018 Intracom Telecom and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC092 config file;
+ SDN Controller resilience in HA configuration
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or 'node6' %}
+
+scenarios:
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+ steps:
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ process_name: "opendaylight"
+ key: "kill-process"
+ host: {{attack_host}}
+
+ monitors:
+ - monitor_type: "process"
+ process_name: "opendaylight"
+ host: {{attack_host}}
+ key: "monitor-recovery"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "start-service"
+ host: {{attack_host}}
+ operation_key: "start-service"
+ action_parameter:
+ service: "opendaylight"
+ rollback_parameter:
+ service: "opendaylight"
+
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+
+ steps:
+
+ - actionKey: "monitor-recovery"
+ actionType: "monitor"
+ index: 1
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 3
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 4
+
+ - actionKey: "kill-process"
+ actionType: "attacker"
+ index: 5
+
+ - actionKey: "start-service"
+ actionType: "operation"
+ index: 6
+
+
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 80
+ host: athena
+ sla:
+ max_outage_time: 40
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "hermes"
+ return_parameter:
+ all: "@private_ip"
+
+ - operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: "hermes yardstick-image yardstick-flavor test_one"
+ rollback_parameter:
+ serverconfig: "hermes"
+
+ - operation_type: "general-operation"
+ key: "add-server-to-secgroup"
+ operation_key: "add-server-to-secgroup"
+ action_parameter:
+ serverconfig: "hermes ODLHA1"
+ rollback_parameter:
+ serverconfig: "hermes ODLHA1"
+
+
+ steps:
+ - actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "add-server-to-secgroup"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 3
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 4
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+contexts:
+ -
+ type: Node
+ name: LF
+ file: {{file}}
+ -
+ name: ODLHA1
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+ host: athena
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ ares:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ networks:
+ test_one:
+ cidr: '10.0.1.0/24'
+ router: 'test_router'
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
new file mode 100644
index 000000000..a034471aa
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
@@ -0,0 +1,313 @@
+##############################################################################
+## Copyright (c) 2018 Intracom Telecom and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC093 config file;
+ SDN Vswitch resilience in non-HA or HA configuration
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host_cmp_one = attack_host_cmp_one or 'node4' %}
+{% set attack_host_cmp_two = attack_host_cmp_two or 'node5' %}
+{% set systemd_service_name = systemd_service_name or 'openvswitch-switch'%}
+
+scenarios:
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+ steps:
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+
+ nodes:
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+-
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ process_name: "openvswitch"
+ key: "kill-process-cmp-one"
+ host: {{attack_host_cmp_one}}
+
+ -
+ fault_type: "kill-process"
+ process_name: "openvswitch"
+ key: "kill-process-cmp-two"
+ host: {{attack_host_cmp_two}}
+
+ monitors:
+ - monitor_type: "process"
+ process_name: "openvswitch"
+ host: {{attack_host_cmp_one}}
+ key: "monitor-recovery-cmp-one"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+ - monitor_type: "process"
+ process_name: "openvswitch"
+ host: {{attack_host_cmp_two}}
+ key: "monitor-recovery-cmp-two"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 20
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 20
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "restart-service-cmp-one"
+ host: {{attack_host_cmp_one}}
+ operation_key: "start-service"
+ action_parameter:
+ service: {{systemd_service_name ~ " restart"}}
+ rollback_parameter:
+ service: "openvswitch"
+
+ - operation_type: "general-operation"
+ key: "restart-service-cmp-two"
+ host: {{attack_host_cmp_two}}
+ operation_key: "start-service"
+ action_parameter:
+ service: {{systemd_service_name ~ " restart"}}
+ rollback_parameter:
+ service: "openvswitch"
+
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+
+ steps:
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+ - actionKey: "kill-process-cmp-one"
+ actionType: "attacker"
+ index: 4
+
+ - actionKey: "kill-process-cmp-two"
+ actionType: "attacker"
+ index: 5
+
+ - actionKey: "monitor-recovery-cmp-one"
+ actionType: "monitor"
+ index: 6
+
+ - actionKey: "monitor-recovery-cmp-two"
+ actionType: "monitor"
+ index: 7
+
+
+ - actionKey: "restart-service-cmp-one"
+ actionType: "operation"
+ index: 8
+
+ - actionKey: "restart-service-cmp-two"
+ actionType: "operation"
+ index: 9
+
+
+ nodes:
+ {{attack_host_cmp_one}}: {{attack_host_cmp_one}}.LF
+ {{attack_host_cmp_two}}: {{attack_host_cmp_two}}.LF
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 80
+ host: athena
+ sla:
+ max_outage_time: 40
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "hermes"
+ return_parameter:
+ all: "@private_ip"
+
+ - operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: "hermes yardstick-image yardstick-flavor test_one"
+ rollback_parameter:
+ serverconfig: "hermes"
+
+ - operation_type: "general-operation"
+ key: "add-server-to-secgroup"
+ operation_key: "add-server-to-secgroup"
+ action_parameter:
+ serverconfig: "hermes ODLnoHA1"
+ rollback_parameter:
+ serverconfig: "hermes ODLnoHA1"
+
+
+ steps:
+ - actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "add-server-to-secgroup"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 3
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 4
+
+ nodes:
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+contexts:
+ -
+ type: Node
+ name: LF
+ file: {{file}}
+ -
+ name: ODLnoHA1
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+ host: athena
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ ares:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ networks:
+ test_one:
+ cidr: '10.0.1.0/24'
+ router: 'test_router'
+
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
index 13cc710f3..f174a90e4 100644
--- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
@@ -62,3 +62,18 @@ test_cases:
task_args:
huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
"host": "node1"}'
+-
+ file_name: opnfv_yardstick_tc092.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host": "node6"}'
+-
+ file_name: opnfv_yardstick_tc093.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5",
+ "systemd_service_name": "openvswitch"}'
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
index 42a170a89..feb8a6631 100644
--- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
@@ -61,3 +61,11 @@ test_cases:
task_args:
default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml",
"attack_host": "node6"}'
+-
+ file_name: opnfv_yardstick_tc093.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5",
+ "systemd_service_name": "openvswitch"}'
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_base.py b/tests/unit/network_services/vnf_generic/vnf/test_base.py
index 664373f8f..9ef6473f0 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_base.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_base.py
@@ -215,9 +215,11 @@ class TestGenericVNF(unittest.TestCase):
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0])
- msg = ("Can't instantiate abstract class GenericVNF with abstract "
- "methods collect_kpi, instantiate, scale, terminate, "
- "wait_for_instantiate")
+
+ msg = ("Can't instantiate abstract class GenericVNF with abstract methods "
+ "collect_kpi, instantiate, scale, start_collect, "
+ "stop_collect, terminate, wait_for_instantiate")
+
self.assertEqual(msg, str(exc.exception))
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index efb79bf2c..ff71bed9d 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1664,42 +1664,6 @@ class TestSampleVnf(unittest.TestCase):
# test the default resource helper is MyResourceHelper, not subclass
self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper)
- def test__get_port0localip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:6414'
- result = sample_vnf._get_port0localip6()
- self.assertEqual(result, expected)
-
- def test__get_port1localip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:2814'
- result = sample_vnf._get_port1localip6()
- self.assertEqual(result, expected)
-
- def test__get_port0prefixip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '112'
- result = sample_vnf._get_port0prefixlen6()
- self.assertEqual(result, expected)
-
- def test__get_port1prefixip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '112'
- result = sample_vnf._get_port1prefixlen6()
- self.assertEqual(result, expected)
-
- def test__get_port0gateway6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:6414'
- result = sample_vnf._get_port0gateway6()
- self.assertEqual(result, expected)
-
- def test__get_port1gateway6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:2814'
- result = sample_vnf._get_port1gateway6()
- self.assertEqual(result, expected)
-
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process')
def test__start_vnf(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
@@ -1753,6 +1717,64 @@ class TestSampleVnf(unittest.TestCase):
self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {}))
self.assertEqual(sample_vnf.nfvi_context, context2)
+ def test__update_collectd_options(self):
+ scenario_cfg = {'options':
+ {'collectd':
+ {'interval': 3,
+ 'plugins':
+ {'plugin3': {'param': 3}}},
+ 'vnf__0':
+ {'collectd':
+ {'interval': 2,
+ 'plugins':
+ {'plugin3': {'param': 2},
+ 'plugin2': {'param': 2}}}}}}
+ context_cfg = {'nodes':
+ {'vnf__0':
+ {'collectd':
+ {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}}}}
+ expected = {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf__0', vnfd)
+ sample_vnf._update_collectd_options(scenario_cfg, context_cfg)
+ self.assertEqual(sample_vnf.setup_helper.collectd_options, expected)
+
+ def test__update_options(self):
+ options1 = {'interval': 1,
+ 'param1': 'value1',
+ 'plugins':
+ {'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+ options2 = {'interval': 2,
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin2': {'param': 2},
+ 'plugin1': {'param': 2}}}
+ expected = {'interval': 1,
+ 'param1': 'value1',
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf._update_options(options2, options1)
+ self.assertEqual(options2, expected)
+
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@mock.patch("yardstick.ssh.SSH")
def test_wait_for_instantiate_empty_queue(self, ssh, *args):
@@ -1788,16 +1810,6 @@ class TestSampleVnf(unittest.TestCase):
self.assertEqual(sample_vnf.wait_for_instantiate(), 0)
- def test__build_ports(self):
- vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
-
- self.assertIsNone(sample_vnf._build_ports())
- self.assertIsNotNone(sample_vnf.networks)
- self.assertIsNotNone(sample_vnf.uplink_ports)
- self.assertIsNotNone(sample_vnf.downlink_ports)
- self.assertIsNotNone(sample_vnf.my_ports)
-
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
def test_vnf_execute_with_queue_data(self, *args):
queue_size_list = [
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index ae8319e37..692c16892 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -6,17 +6,20 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
import abc
import six
-import yardstick.common.utils as utils
+from yardstick.common import constants
+from yardstick.common import utils
class Flags(object):
"""Class to represent the status of the flags in a context"""
_FLAGS = {'no_setup': False,
- 'no_teardown': False}
+ 'no_teardown': False,
+ 'os_cloud_config': constants.OS_CLOUD_DEFAULT_CONFIG}
def __init__(self, **kwargs):
for name, value in self._FLAGS.items():
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 0964b7baf..82861829e 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -325,8 +325,10 @@ class HeatContext(Context):
if not os.path.exists(self.key_filename):
SSH.gen_keys(self.key_filename)
- heat_template = HeatTemplate(self.name, self.template_file,
- self.heat_parameters)
+ heat_template = HeatTemplate(
+ self.name, template_file=self.template_file,
+ heat_parameters=self.heat_parameters,
+ os_cloud_config=self._flags.os_cloud_config)
if self.template_file is None:
self._add_resources_to_template(heat_template)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index cb171eafa..7f1136c08 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker):
def check(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, stderr = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
if stdout:
- LOG.info("check the environment success!")
+ LOG.info("Check the environment success!")
return int(stdout.strip('\n'))
else:
- LOG.error(
- "the host environment is error, stdout:%s, stderr:%s",
- stdout, stderr)
+ LOG.error("Error checking the host environment, "
+ "stdout:%s, stderr:%s", stdout, stderr)
return False
def inject_fault(self):
with open(self.inject_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
def recover(self):
with open(self.recovery_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ exit_status, _, _ = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
if exit_status:
- LOG.info("Fail to restart service!")
+ LOG.info("Failed to restart service: %s", self.recovery_script)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d03d04420..d67a16b98 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -71,7 +71,7 @@ class BaseAttacker(object):
for attacker_cls in utils.itersubclasses(BaseAttacker):
if attacker_type == attacker_cls.__attacker_type__:
return attacker_cls
- raise RuntimeError("No such runner_type %s" % attacker_type)
+ raise RuntimeError("No such runner_type: %s" % attacker_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(attacker_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 71690c135..6cc0cb286 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -40,7 +40,7 @@ class Director(object):
nodes = self.context_cfg.get("nodes", None)
# setup attackers
if "attackers" in self.scenario_cfg["options"]:
- LOG.debug("start init attackers...")
+ LOG.debug("Start init attackers...")
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
self.attackerMgr = baseattacker.AttackerMgr()
self.data = self.attackerMgr.init_attackers(attacker_cfgs,
@@ -48,19 +48,19 @@ class Director(object):
# setup monitors
if "monitors" in self.scenario_cfg["options"]:
- LOG.debug("start init monitors...")
+ LOG.debug("Start init monitors...")
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
# setup operations
if "operations" in self.scenario_cfg["options"]:
- LOG.debug("start init operations...")
+ LOG.debug("Start init operations...")
operation_cfgs = self.scenario_cfg["options"]["operations"]
self.operationMgr = baseoperation.OperationMgr()
self.operationMgr.init_operations(operation_cfgs, nodes)
# setup result checker
if "resultCheckers" in self.scenario_cfg["options"]:
- LOG.debug("start init resultCheckers...")
+ LOG.debug("Start init resultCheckers...")
result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
@@ -69,7 +69,7 @@ class Director(object):
if intermediate_variables is None:
intermediate_variables = {}
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables)
if type == ActionType.MONITOR:
@@ -80,17 +80,17 @@ class Director(object):
if type == ActionType.OPERATION:
return actionplayers.OperationPlayer(self.operationMgr[key],
intermediate_variables)
- LOG.debug("something run when creatactionplayer")
+ LOG.debug("The type is not recognized by createActionPlayer")
def createActionRollbacker(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
if type == ActionType.OPERATION:
return actionrollbackers.OperationRollbacker(
self.operationMgr[key])
- LOG.debug("no rollbacker created for %s", key)
+ LOG.debug("No rollbacker created for key: %s", key)
def verify(self):
result = True
@@ -99,7 +99,7 @@ class Director(object):
if hasattr(self, 'resultCheckerMgr'):
result &= self.resultCheckerMgr.verify()
if result:
- LOG.debug("monitors are passed")
+ LOG.debug("Monitor results are passed")
return result
def stopMonitors(self):
@@ -107,12 +107,12 @@ class Director(object):
self.monitorMgr.wait_monitors()
def knockoff(self):
- LOG.debug("knock off ....")
+ LOG.debug("Knock off ....")
while self.executionSteps:
singleStep = self.executionSteps.pop()
singleStep.rollback()
def store_result(self, result):
- LOG.debug("store result ....")
+ LOG.debug("Store result ....")
if hasattr(self, 'monitorMgr'):
self.monitorMgr.store_result(result)
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index 858d86ca0..2388507d7 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
@@ -9,24 +9,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Start a service and check the service is started
+# Start or restart a service and check the service is started
set -e
service_name=$1
+operation=${2-start} # values are "start" or "restart"
-Distributor=$(lsb_release -a | grep "Distributor ID" | awk '{print $3}')
-
-if [ "$Distributor" != "Ubuntu" -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" ]; then
+if [ -f /usr/bin/yum -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" -a "$service_name" != "openvswitch" ]; then
service_name="openstack-"${service_name}
-elif [ "$Distributor" = "Ubuntu" -a "$service_name" = "keystone" ]; then
+elif [ -f /usr/bin/apt -a "$service_name" = "keystone" ]; then
service_name="apache2"
elif [ "$service_name" = "keystone" ]; then
service_name="httpd"
fi
if which systemctl 2>/dev/null; then
- systemctl start $service_name
+ systemctl $operation $service_name
else
- service $service_name start
+ service $service_name $operation
fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 50a63f53d..f6004c774 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process):
for monitor in utils.itersubclasses(BaseMonitor):
if monitor_type == monitor.__monitor_type__:
return monitor
- raise RuntimeError("No such monitor_type %s" % monitor_type)
+ raise RuntimeError("No such monitor_type: %s" % monitor_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(monitor_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index d0551bf03..3b36c762d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
output = []
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host,
defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
@@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self.cmd + " --insecure"
def monitor_func(self):
- exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
- LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout)
+ LOG.debug("Executed command '%s'. "
+ "The stdout is:\n%s", self.cmd, stdout)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- LOG.debug("the _result:%s", self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -97,7 +95,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index dce69f45f..971bae1e9 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor):
monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
monitor_number = self._config.get("monitor_number", 1)
- for i in range(monitor_number):
+ for _ in range(monitor_number):
monitor_ins = monitor_cls(self._config, self._context,
self.monitor_data)
self.monitors.append(monitor_ins)
@@ -70,7 +70,8 @@ class MultiMonitor(basemonitor.BaseMonitor):
elif "max_recover_time" in self._config["sla"]:
max_outage_time = self._config["sla"]["max_recover_time"]
else:
- raise RuntimeError("monitor max_outage_time config is not found")
+ raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+ "config is not found")
self._result = {"outage_time": outage_time}
if outage_time > max_outage_time:
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index b0f6f8e9d..8d2f2633c 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_process_python.bash")
self.process_name = self._config["process_name"]
def monitor_func(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, _ = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.process_name),
stdin=stdin_file)
@@ -45,14 +45,12 @@ class MonitorProcess(basemonitor.BaseMonitor):
return True
def verify_SLA(self):
- LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_recover_time"]
if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -73,7 +71,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index dcd0fe598..42941c6e7 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -34,7 +34,7 @@ class ServiceHA(base.Scenario):
"""scenario setup"""
nodes = self.context_cfg.get("nodes", None)
if nodes is None:
- LOG.error("the nodes info is none")
+ LOG.error("The nodes info is none")
return
self.attackers = []
@@ -57,17 +57,17 @@ class ServiceHA(base.Scenario):
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
- LOG.error("The setup not finished!")
+ LOG.error("The setup is not finished!")
return
self.monitorMgr.start_monitors()
- LOG.info("HA monitor start!")
+ LOG.info("Monitor '%s' start!", self.__scenario_type__)
for attacker in self.attackers:
attacker.inject_fault()
self.monitorMgr.wait_monitors()
- LOG.info("HA monitor stop!")
+ LOG.info("Monitor '%s' stop!", self.__scenario_type__)
sla_pass = self.monitorMgr.verify_SLA()
for k, v in self.data.items():
diff --git a/yardstick/benchmark/scenarios/lib/attach_volume.py b/yardstick/benchmark/scenarios/lib/attach_volume.py
index 88124964b..96dd130b1 100644
--- a/yardstick/benchmark/scenarios/lib/attach_volume.py
+++ b/yardstick/benchmark/scenarios/lib/attach_volume.py
@@ -6,30 +6,31 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
class AttachVolume(base.Scenario):
- """Attach a volmeu to an instance"""
+ """Attach a volume to an instance"""
__scenario_type__ = "AttachVolume"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.server_id = self.options.get("server_id", "TestServer")
- self.volume_id = self.options.get("volume_id", None)
+ self.server_name_or_id = self.options["server_name_or_id"]
+ self.volume_name_or_id = self.options["volume_name_or_id"]
+ self.device = self.options.get("device")
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,10 +45,14 @@ class AttachVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.attach_server_volume(self.server_id,
- self.volume_id)
+ status = openstack_utils.attach_volume_to_server(
+ self.shade_client, self.server_name_or_id, self.volume_name_or_id,
+ device=self.device, wait=self.wait, timeout=self.timeout)
+
+ if not status:
+ result.update({"attach_volume": 0})
+ LOG.error("Attach volume to server failed!")
+ raise exceptions.ScenarioAttachVolumeError
- if status:
- LOG.info("Attach volume to server successful!")
- else:
- LOG.info("Attach volume to server failed!")
+ result.update({"attach_volume": 1})
+ LOG.info("Attach volume to server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/create_volume.py b/yardstick/benchmark/scenarios/lib/create_volume.py
index df523a5ec..b66749026 100644
--- a/yardstick/benchmark/scenarios/lib/create_volume.py
+++ b/yardstick/benchmark/scenarios/lib/create_volume.py
@@ -7,14 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import time
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -27,15 +25,16 @@ class CreateVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.volume_name = self.options.get("volume_name", "TestVolume")
- self.volume_size = self.options.get("size", 100)
- self.image_name = self.options.get("image", None)
- self.image_id = None
+ self.size = self.options["size_gb"]
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+ self.image = self.options.get("image")
+ self.name = self.options.get("name")
+ self.description = self.options.get("description")
- self.glance_client = op_utils.get_glance_client()
- self.cinder_client = op_utils.get_cinder_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,27 +43,29 @@ class CreateVolume(base.Scenario):
self.setup_done = True
- def run(self):
+ def run(self, result):
"""execute the test"""
if not self.setup_done:
self.setup()
- self.image_id = op_utils.get_image_id(self.glance_client,
- self.image_name)
+ volume = openstack_utils.create_volume(
+ self.shade_client, self.size, wait=self.wait, timeout=self.timeout,
+ image=self.image, name=self.name, description=self.description)
- volume = op_utils.create_volume(self.cinder_client, self.volume_name,
- self.volume_size, self.image_id)
+ if not volume:
+ result.update({"volume_create": 0})
+ LOG.error("Create volume failed!")
+ raise exceptions.ScenarioCreateVolumeError
- status = volume.status
- while(status == 'creating' or status == 'downloading'):
+ status = volume["status"]
+ while status == "creating" or status == "downloading":
LOG.info("Volume status is: %s", status)
time.sleep(5)
- volume = op_utils.get_volume_by_name(self.volume_name)
- status = volume.status
-
+ volume = openstack_utils.get_volume(self.shade_client, self.name)
+ status = volume["status"]
+ result.update({"volume_create": 1})
LOG.info("Create volume successful!")
-
- values = [volume.id]
- keys = self.scenario_cfg.get('output', '').split()
+ values = [volume["id"]]
+ keys = self.scenario_cfg.get("output", '').split()
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_volume.py b/yardstick/benchmark/scenarios/lib/delete_volume.py
index ea2b85812..59e19dfdf 100644
--- a/yardstick/benchmark/scenarios/lib/delete_volume.py
+++ b/yardstick/benchmark/scenarios/lib/delete_volume.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
LOG = logging.getLogger(__name__)
@@ -26,11 +23,13 @@ class DeleteVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.volume_id = self.options.get("volume_id", None)
+ self.volume_name_or_id = self.options.get("name_or_id")
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
- self.cinder_client = op_utils.get_cinder_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,11 +44,14 @@ class DeleteVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_volume(self.cinder_client, self.volume_id)
+ status = openstack_utils.delete_volume(
+ self.shade_client, name_or_id=self.volume_name_or_id,
+ wait=self.wait, timeout=self.timeout)
- if status:
- result.update({"delete_volume": 1})
- LOG.info("Delete volume successful!")
- else:
+ if not status:
result.update({"delete_volume": 0})
- LOG.info("Delete volume failed!")
+ LOG.error("Delete volume failed!")
+ raise exceptions.ScenarioDeleteVolumeError
+
+ result.update({"delete_volume": 1})
+ LOG.info("Delete volume successful!")
diff --git a/yardstick/benchmark/scenarios/lib/detach_volume.py b/yardstick/benchmark/scenarios/lib/detach_volume.py
index 0b02a3a81..76c0167bd 100644
--- a/yardstick/benchmark/scenarios/lib/detach_volume.py
+++ b/yardstick/benchmark/scenarios/lib/detach_volume.py
@@ -6,14 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -26,10 +24,14 @@ class DetachVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.server_id = self.options.get("server_id", "TestServer")
- self.volume_id = self.options.get("volume_id", None)
+ self.server = self.options["server_name_or_id"]
+ self.volume = self.options["volume_name_or_id"]
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,11 +46,14 @@ class DetachVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.detach_volume(self.server_id, self.volume_id)
+ status = openstack_utils.detach_volume(
+ self.shade_client, self.server, self.volume,
+ wait=self.wait, timeout=self.timeout)
- if status:
- result.update({"detach_volume": 1})
- LOG.info("Detach volume from server successful!")
- else:
+ if not status:
result.update({"detach_volume": 0})
- LOG.info("Detach volume from server failed!")
+ LOG.error("Detach volume from server failed!")
+ raise exceptions.ScenarioDetachVolumeError
+
+ result.update({"detach_volume": 1})
+ LOG.info("Detach volume from server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/get_flavor.py b/yardstick/benchmark/scenarios/lib/get_flavor.py
index d5e33947e..6727a7343 100644
--- a/yardstick/benchmark/scenarios/lib/get_flavor.py
+++ b/yardstick/benchmark/scenarios/lib/get_flavor.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,8 +23,12 @@ class GetFlavor(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
- self.flavor_name = self.options.get("flavor_name", "TestFlavor")
+ self.options = self.scenario_cfg["options"]
+ self.name_or_id = self.options["name_or_id"]
+ self.filters = self.options.get("filters")
+ self.get_extra = self.options.get("get_extra", True)
+ self.shade_client = openstack_utils.get_shade_client()
+
self.setup_done = False
def setup(self):
@@ -41,14 +42,18 @@ class GetFlavor(base.Scenario):
if not self.setup_done:
self.setup()
- LOG.info("Querying flavor: %s", self.flavor_name)
- flavor = op_utils.get_flavor_by_name(self.flavor_name)
- if flavor:
- LOG.info("Get flavor successful!")
- values = [self._change_obj_to_dict(flavor)]
- else:
- LOG.info("Get flavor: no flavor matched!")
- values = []
+ LOG.info("Querying flavor: %s", self.name_or_id)
+ flavor = openstack_utils.get_flavor(
+ self.shade_client, self.name_or_id, filters=self.filters,
+ get_extra=self.get_extra)
+
+ if not flavor:
+ result.update({"get_flavor": 0})
+ LOG.error("Get flavor failed!")
+ raise exceptions.ScenarioGetFlavorError
- keys = self.scenario_cfg.get('output', '').split()
+ result.update({"get_flavor": 1})
+ LOG.info("Get flavor successful!")
+ values = [flavor]
+ keys = self.scenario_cfg.get("output", '').split()
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/get_server.py b/yardstick/benchmark/scenarios/lib/get_server.py
index fcf47c80d..f65fa9ebf 100644
--- a/yardstick/benchmark/scenarios/lib/get_server.py
+++ b/yardstick/benchmark/scenarios/lib/get_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -21,63 +18,58 @@ LOG = logging.getLogger(__name__)
class GetServer(base.Scenario):
"""Get a server instance
- Parameters
- server_id - ID of the server
- type: string
- unit: N/A
- default: null
- server_name - name of the server
- type: string
- unit: N/A
- default: null
-
- Either server_id or server_name is required.
-
- Outputs
+ Parameters:
+ name_or_id - Name or ID of the server
+ type: string
+ filters - meta data to use for further filtering
+ type: dict
+ detailed: Whether or not to add detailed additional information.
+ type: bool
+ bare: Whether to skip adding any additional information to the server
+ record.
+ type: bool
+ all_projects: Whether to get server from all projects or just the current
+ auth scoped project.
+ type: bool
+
+ Outputs:
rc - response code of getting server instance
- 0 for success
- 1 for failure
+ 1 for success
+ 0 for failure
type: int
- unit: N/A
server - instance of the server
type: dict
- unit: N/A
+
"""
- __scenario_type__ = "GetServer"
+ __scenario_type__ = 'GetServer'
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg.get('options', {})
+ self.options = self.scenario_cfg['options']
- self.server_id = self.options.get("server_id")
- if self.server_id:
- LOG.debug('Server id is %s', self.server_id)
+ self.server_name_or_id = self.options.get('name_or_id')
+ self.filters = self.options.get('filters')
+ self.detailed = self.options.get('detailed', False)
+ self.bare = self.options.get('bare', False)
- default_name = self.scenario_cfg.get('host',
- self.scenario_cfg.get('target'))
- self.server_name = self.options.get('server_name', default_name)
- if self.server_name:
- LOG.debug('Server name is %s', self.server_name)
-
- self.nova_client = op_utils.get_nova_client()
+ self.shade_client = openstack_utils.get_shade_client()
def run(self, result):
"""execute the test"""
- if self.server_id:
- server = self.nova_client.servers.get(self.server_id)
- else:
- server = op_utils.get_server_by_name(self.server_name)
-
- keys = self.scenario_cfg.get('output', '').split()
+ server = openstack_utils.get_server(
+ self.shade_client, name_or_id=self.server_name_or_id,
+ filters=self.filters, detailed=self.detailed, bare=self.bare)
- if server:
- LOG.info("Get server successful!")
- values = [0, self._change_obj_to_dict(server)]
- else:
- LOG.info("Get server failed!")
- values = [1]
+ if not server:
+ result.update({'get_server': 0})
+ LOG.error('Get Server failed!')
+ raise exceptions.ScenarioGetServerError
+ result.update({'get_server': 1})
+ LOG.info('Get Server successful!')
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [server]
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be2fa3f3b..78f866e25 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -441,7 +441,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
- self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
+ self.collector = Collector(self.vnfs)
self.collector.start()
# Start the actual traffic
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 153bd4bf4..8640afbae 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -152,3 +152,6 @@ IS_PUBLIC = 'is_public'
# general
TESTCASE_PRE = 'opnfv_yardstick_'
TESTSUITE_PRE = 'opnfv_'
+
+# OpenStack cloud default config parameters
+OS_CLOUD_DEFAULT_CONFIG = {'verify': False}
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 330eb5e0a..36c7eef05 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -54,6 +54,10 @@ class YardstickException(Exception):
return False
+class ResourceCommandError(YardstickException):
+ message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"'
+
+
class FunctionNotImplemented(YardstickException):
message = ('The function "%(function_name)s" is not implemented in '
'"%(class_name)" class.')
@@ -214,3 +218,27 @@ class ScenarioCreateKeypairError(YardstickException):
class ScenarioDeleteKeypairError(YardstickException):
message = 'Nova Delete Keypair Scenario failed'
+
+
+class ScenarioAttachVolumeError(YardstickException):
+ message = 'Nova Attach Volume Scenario failed'
+
+
+class ScenarioGetServerError(YardstickException):
+ message = 'Nova Get Server Scenario failed'
+
+
+class ScenarioGetFlavorError(YardstickException):
+ message = 'Nova Get Falvor Scenario failed'
+
+
+class ScenarioCreateVolumeError(YardstickException):
+ message = 'Cinder Create Volume Scenario failed'
+
+
+class ScenarioDeleteVolumeError(YardstickException):
+ message = 'Cinder Delete Volume Scenario failed'
+
+
+class ScenarioDetachVolumeError(YardstickException):
+ message = 'Cinder Detach Volume Scenario failed'
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 0902d29e1..e3e08feb6 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -7,19 +7,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import os
-import sys
+import copy
import logging
+import os
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
from keystoneauth1 import loading
from keystoneauth1 import session
+from neutronclient.neutron import client as neutronclient
import shade
from shade import exc
-from cinderclient import client as cinderclient
-from novaclient import client as novaclient
-from glanceclient import client as glanceclient
-from neutronclient.neutron import client as neutronclient
+from yardstick.common import constants
log = logging.getLogger(__name__)
@@ -155,8 +156,21 @@ def get_glance_client(): # pragma: no cover
return glanceclient.Client(get_glance_client_version(), session=sess)
-def get_shade_client():
- return shade.openstack_cloud()
+def get_shade_client(**os_cloud_config):
+ """Get Shade OpenStack cloud client
+
+ By default, the input parameters given to "shade.openstack_cloud" method
+ are stored in "constants.OS_CLOUD_DEFAULT_CONFIG". The input parameters
+ passed in this function, "os_cloud_config", will overwrite the default
+ ones.
+
+ :param os_cloud_config: (kwargs) input arguments for
+ "shade.openstack_cloud" method.
+ :return: ``shade.OpenStackCloud`` object.
+ """
+ params = copy.deepcopy(constants.OS_CLOUD_DEFAULT_CONFIG)
+ params.update(os_cloud_config)
+ return shade.openstack_cloud(**params)
# *********************************************
@@ -264,17 +278,36 @@ def create_instance_and_wait_for_active(shade_client, name, image,
"Exception message, '%s'", o_exc.orig_message)
-def attach_server_volume(server_id, volume_id,
- device=None): # pragma: no cover
+def attach_volume_to_server(shade_client, server_name_or_id, volume_name_or_id,
+ device=None, wait=True, timeout=None):
+ """Attach a volume to a server.
+
+ This will attach a volume, described by the passed in volume
+ dict, to the server described by the passed in server dict on the named
+ device on the server.
+
+ If the volume is already attached to the server, or generally not
+ available, then an exception is raised. To re-attach to a server,
+ but under a different device, the user must detach it first.
+
+ :param server_name_or_id:(string) The server name or id to attach to.
+ :param volume_name_or_id:(string) The volume name or id to attach.
+ :param device:(string) The device name where the volume will attach.
+ :param wait:(bool) If true, waits for volume to be attached.
+ :param timeout: Seconds to wait for volume attachment. None is forever.
+
+ :returns: True if attached successful, False otherwise.
+ """
try:
- get_nova_client().volumes.create_server_volume(server_id,
- volume_id, device)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
- server_id, volume_id)
- return False
- else:
+ server = shade_client.get_server(name_or_id=server_name_or_id)
+ volume = shade_client.get_volume(volume_name_or_id)
+ shade_client.attach_volume(
+ server, volume, device=device, wait=wait, timeout=timeout)
return True
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [attach_volume_to_server(shade_client)]. "
+ "Exception message: %s", o_exc.orig_message)
+ return False
def delete_instance(shade_client, name_or_id, wait=False, timeout=180,
@@ -302,12 +335,26 @@ def delete_instance(shade_client, name_or_id, wait=False, timeout=180,
return False
-def get_server_by_name(name): # pragma: no cover
+def get_server(shade_client, name_or_id=None, filters=None, detailed=False,
+ bare=False):
+ """Get a server by name or ID.
+
+ :param name_or_id: Name or ID of the server.
+ :param filters:(dict) A dictionary of meta data to use for further
+ filtering.
+ :param detailed:(bool) Whether or not to add detailed additional
+ information.
+ :param bare:(bool) Whether to skip adding any additional information to the
+ server record.
+
+ :returns: A server ``munch.Munch`` or None if no matching server is found.
+ """
try:
- return get_nova_client().servers.list(search_opts={'name': name})[0]
- except IndexError:
- log.exception('Failed to get nova client')
- raise
+ return shade_client.get_server(name_or_id=name_or_id, filters=filters,
+ detailed=detailed, bare=bare)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_server(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
@@ -330,12 +377,22 @@ def get_flavor_id(nova_client, flavor_name): # pragma: no cover
return flavor_id
-def get_flavor_by_name(name): # pragma: no cover
- flavors = get_nova_client().flavors.list()
+def get_flavor(shade_client, name_or_id, filters=None, get_extra=True):
+ """Get a flavor by name or ID.
+
+ :param name_or_id: Name or ID of the flavor.
+ :param filters: A dictionary of meta data to use for further filtering.
+ :param get_extra: Whether or not the list_flavors call should get the extra
+ flavor specs.
+
+ :returns: A flavor ``munch.Munch`` or None if no matching flavor is found.
+ """
try:
- return next((a for a in flavors if a.name == name))
- except StopIteration:
- log.exception('No flavor matched')
+ return shade_client.get_flavor(name_or_id, filters=filters,
+ get_extra=get_extra)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_flavor(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def delete_flavor(flavor_id): # pragma: no cover
@@ -726,54 +783,79 @@ def list_images(shade_client=None):
# *********************************************
# CINDER
# *********************************************
-def get_volume_id(volume_name): # pragma: no cover
- volumes = get_cinder_client().volumes.list()
- return next((v.id for v in volumes if v.name == volume_name), None)
+def get_volume_id(shade_client, volume_name):
+ return shade_client.get_volume_id(volume_name)
+
+
+def get_volume(shade_client, name_or_id, filters=None):
+ """Get a volume by name or ID.
+
+ :param name_or_id: Name or ID of the volume.
+ :param filters: A dictionary of meta data to use for further filtering.
+ :returns: A volume ``munch.Munch`` or None if no matching volume is found.
+ """
+ return shade_client.get_volume(name_or_id, filters=filters)
+
+
+def create_volume(shade_client, size, wait=True, timeout=None,
+ image=None, **kwargs):
+ """Create a volume.
+
+ :param size: Size, in GB of the volume to create.
+ :param name: (optional) Name for the volume.
+ :param description: (optional) Name for the volume.
+ :param wait: If true, waits for volume to be created.
+ :param timeout: Seconds to wait for volume creation. None is forever.
+ :param image: (optional) Image name, ID or object from which to create
+ the volume.
-def create_volume(cinder_client, volume_name, volume_size,
- volume_image=False): # pragma: no cover
+ :returns: The created volume object.
+
+ """
try:
- if volume_image:
- volume = cinder_client.volumes.create(name=volume_name,
- size=volume_size,
- imageRef=volume_image)
- else:
- volume = cinder_client.volumes.create(name=volume_name,
- size=volume_size)
- return volume
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_volume(cinder_client, %s)]",
- (volume_name, volume_size))
- return None
+ return shade_client.create_volume(size, wait=wait, timeout=timeout,
+ image=image, **kwargs)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as op_exc:
+ log.error("Failed to create_volume(shade_client). "
+ "Exception message: %s", op_exc.orig_message)
+
+def delete_volume(shade_client, name_or_id=None, wait=True, timeout=None):
+ """Delete a volume.
-def delete_volume(cinder_client, volume_id,
- forced=False): # pragma: no cover
+ :param name_or_id:(string) Name or unique ID of the volume.
+ :param wait:(bool) If true, waits for volume to be deleted.
+ :param timeout:(string) Seconds to wait for volume deletion. None is forever.
+
+ :return: True on success, False otherwise.
+ """
try:
- if forced:
- try:
- cinder_client.volumes.detach(volume_id)
- except Exception: # pylint: disable=broad-except
- log.error(sys.exc_info()[0])
- cinder_client.volumes.force_delete(volume_id)
- else:
- while True:
- volume = get_cinder_client().volumes.get(volume_id)
- if volume.status.lower() == 'available':
- break
- cinder_client.volumes.delete(volume_id)
- return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_volume(cinder_client, '%s')]", volume_id)
+ return shade_client.delete_volume(name_or_id=name_or_id,
+ wait=wait, timeout=timeout)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as o_exc:
+ log.error("Error [delete_volume(shade_client,'%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
return False
-def detach_volume(server_id, volume_id): # pragma: no cover
+def detach_volume(shade_client, server_name_or_id, volume_name_or_id,
+ wait=True, timeout=None):
+ """Detach a volume from a server.
+
+ :param server_name_or_id: The server name or id to detach from.
+ :param volume_name_or_id: The volume name or id to detach.
+ :param wait: If true, waits for volume to be detached.
+ :param timeout: Seconds to wait for volume detachment. None is forever.
+
+ :return: True on success.
+ """
try:
- get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+ volume = shade_client.get_volume(volume_name_or_id)
+ server = get_server(shade_client, name_or_id=server_name_or_id)
+ shade_client.detach_volume(server, volume, wait=wait, timeout=timeout)
return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
- server_id, volume_id)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as o_exc:
+ log.error("Error [detach_volume(shade_client)]. "
+ "Exception message: %s", o_exc.orig_message)
return False
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 7e18302eb..322b3f5a2 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -14,42 +14,29 @@
"""This module implements stub for publishing results in yardstick format."""
import logging
-from yardstick.network_services.nfvi.resource import ResourceProfile
-from yardstick.network_services.utils import get_nsb_option
-
LOG = logging.getLogger(__name__)
class Collector(object):
"""Class that handles dictionary of results in yardstick-plot format."""
- def __init__(self, vnfs, nodes, traffic_profile, timeout=3600):
+ def __init__(self, vnfs):
super(Collector, self).__init__()
- self.traffic_profile = traffic_profile
self.vnfs = vnfs
- self.nodes = nodes
- self.timeout = timeout
- self.bin_path = get_nsb_option('bin_path', '')
- self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout)
- for node_name, node in self.nodes.items()
- if node.get("collectd")}
def start(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.initiate_systemagent(self.bin_path)
- resource.start()
- resource.amqp_process_for_nfvi_kpi()
+ for vnf in self.vnfs:
+ vnf.start_collect()
def stop(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.stop()
+ for vnf in self.vnfs:
+ vnf.stop_collect()
def get_kpi(self):
"""Returns dictionary of results in yardstick-plot format
- :return:
+ :return: (dict) dictionary of kpis collected from the VNFs;
+ the keys are the names of the VNFs.
"""
results = {}
for vnf in self.vnfs:
@@ -58,17 +45,4 @@ class Collector(object):
LOG.debug("collect KPI for %s", vnf.name)
results[vnf.name] = vnf.collect_kpi()
- for node_name, resource in self.resource_profiles.items():
- # Result example:
- # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", node_name)
- if resource.check_if_system_agent_running("collectd")[0] != 0:
- continue
-
- try:
- results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
- LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
- # NOTE(elfoley): catch a more specific error
- except Exception as exc: # pylint: disable=broad-except
- LOG.exception(exc)
return results
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
index 70ce4ff03..c538ceeba 100644
--- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
+++ b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
@@ -12,13 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
-import sys
import logging
import re
from itertools import product
+import IxNetwork
+
log = logging.getLogger(__name__)
@@ -135,7 +134,6 @@ class IxNextgen(object):
port.append(port0)
cfg = {
- 'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"],
'machine': tg_cfg["mgmt-interface"]["ip"],
'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
@@ -186,7 +184,7 @@ class IxNextgen(object):
self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count)
def add_ip_header(self, params, version):
- for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
+ for _, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i))
try:
l3 = next(iter1, {})
@@ -194,21 +192,13 @@ class IxNextgen(object):
except (KeyError, IndexError):
continue
- for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+ for _, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3)
self.ixnet.commit()
def _connect(self, tg_cfg):
self._cfg = self.get_config(tg_cfg)
-
- sys.path.append(self._cfg["py_lib_path"])
- # Import IxNetwork after getting ixia lib path
- try:
- import IxNetwork
- except ImportError:
- raise
-
self.ixnet = IxNetwork.IxNet()
machine = self._cfg['machine']
@@ -292,7 +282,7 @@ class IxNextgen(object):
self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01")))
def ix_update_ether(self, params):
- for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
+ for _, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
"configElement", 1):
iter1 = (v['outer_l2'] for v in params.values() if str(v['id']) == str(index))
try:
@@ -300,7 +290,7 @@ class IxNextgen(object):
except KeyError:
continue
- for ip, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+ for _, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
self.update_ether_multi_attributes(ether, l2)
self.ixnet.commit()
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index dc5c46a86..0c0bf223a 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -27,6 +27,7 @@ from oslo_config import cfg
from oslo_utils.encodeutils import safe_decode
from yardstick import ssh
+from yardstick.common.exceptions import ResourceCommandError
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
@@ -249,45 +250,46 @@ class ResourceProfile(object):
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
+ def _start_rabbitmq(self, connection):
+ # Reset amqp queue
+ LOG.debug("reset and setup amqp to collect data from collectd")
+ # ensure collectd.conf.d exists to avoid error/warning
+ cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d",
+ "sudo service rabbitmq-server restart",
+ "sudo rabbitmqctl stop_app",
+ "sudo rabbitmqctl reset",
+ "sudo rabbitmqctl start_app",
+ "sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+ for cmd in cmd_list:
+ exit_status, stdout, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
+ # check stdout for "sudo rabbitmqctl status" command
+ cmd = "sudo rabbitmqctl status"
+ _, stdout, stderr = connection.execute(cmd)
+ if not re.search("RabbitMQ", stdout):
+ LOG.error("rabbitmqctl status don't have RabbitMQ in running apps")
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
- connection.execute('sudo pkill -x -9 collectd')
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
+ self._prepare_collectd_conf(config_file_path)
+
+ connection.execute('sudo pkill -x -9 collectd')
exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
if exit_status != 0:
LOG.warning("%s is not present disabling", collectd_path)
- # disable auto-provisioning because it requires Internet access
- # collectd_installer = os.path.join(bin_path, "collectd.sh")
- # provision_tool(connection, collectd)
- # http_proxy = os.environ.get('http_proxy', '')
- # https_proxy = os.environ.get('https_proxy', '')
- # connection.execute("sudo %s '%s' '%s'" % (
- # collectd_installer, http_proxy, https_proxy))
return
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
LOG.debug("Starting collectd to collect NFVi stats")
- # ensure collectd.conf.d exists to avoid error/warning
- connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
- self._prepare_collectd_conf(config_file_path)
-
- # Reset amqp queue
- LOG.debug("reset and setup amqp to collect data from collectd")
- connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
- connection.execute("sudo service rabbitmq-server start")
- connection.execute("sudo rabbitmqctl stop_app")
- connection.execute("sudo rabbitmqctl reset")
- connection.execute("sudo rabbitmqctl start_app")
- connection.execute("sudo service rabbitmq-server restart")
-
- LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
- connection.execute("sudo rabbitmqctl delete_user guest")
- connection.execute("sudo rabbitmqctl add_user admin admin")
- connection.execute("sudo rabbitmqctl authenticate_user admin admin")
- connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
-
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
@@ -299,9 +301,10 @@ class ResourceProfile(object):
""" Start system agent for NFVi collection on host """
if self.enable:
try:
+ self._start_rabbitmq(self.connection)
self._start_collectd(self.connection, bin_path)
- except Exception:
- LOG.exception("Exception during collectd start")
+ except ResourceCommandError as e:
+ LOG.exception("Exception during collectd and rabbitmq start: %s", str(e))
raise
def start(self):
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index c3277fb12..7a84f1365 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -116,7 +116,6 @@ class ProxBinSearchProfile(ProxProfile):
self.current_lower = test_value
successful_pkt_loss = result.pkt_loss
samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- samples["TxThroughput"] = samples["TxThroughput"] * 1000 * 1000
# store results with success tag in influxdb
success_samples = {'Success_' + key: value for key, value in samples.items()}
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index a776b0989..9ceac3167 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -195,6 +195,18 @@ class GenericVNF(object):
:return: {"kpi": value, "kpi2": value}
"""
+ @abc.abstractmethod
+ def start_collect(self):
+ """Start KPI collection
+ :return: None
+ """
+
+ @abc.abstractmethod
+ def stop_collect(self):
+ """Stop KPI collection
+ :return: None
+ """
+
@six.add_metaclass(abc.ABCMeta)
class GenericTrafficGen(GenericVNF):
@@ -254,3 +266,23 @@ class GenericTrafficGen(GenericVNF):
:return: True/False
"""
pass
+
+ def start_collect(self):
+ """Start KPI collection.
+
+ Traffic measurements are always collected during injection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
+
+ def stop_collect(self):
+ """Stop KPI collection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index d8918b2c3..16873611e 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -35,7 +35,6 @@ from yardstick.common import utils
from yardstick.network_services import constants
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
@@ -60,6 +59,7 @@ class SetupEnvHelper(object):
self.vnfd_helper = vnfd_helper
self.ssh_helper = ssh_helper
self.scenario_helper = scenario_helper
+ self.collectd_options = {}
def build_config(self):
raise NotImplementedError
@@ -234,12 +234,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
if exit_status == 0:
return
- def get_collectd_options(self):
- options = self.scenario_helper.all_options.get("collectd", {})
- # override with specific node settings
- options.update(self.scenario_helper.options.get("collectd", {}))
- return options
-
def _setup_resources(self):
# what is this magic? how do we know which socket is for which port?
# what about quad-socket?
@@ -252,11 +246,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
# this won't work because we don't have DPDK port numbers yet
ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num)
port_names = (intf["name"] for intf in ports)
- collectd_options = self.get_collectd_options()
- plugins = collectd_options.get("plugins", {})
+ plugins = self.collectd_options.get("plugins", {})
+ interval = self.collectd_options.get("interval")
# we must set timeout to be the same as the VNF otherwise KPIs will die before VNF
return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names,
- plugins=plugins, interval=collectd_options.get("interval"),
+ plugins=plugins, interval=interval,
timeout=self.scenario_helper.timeout)
def _check_interface_fields(self):
@@ -666,49 +660,6 @@ class SampleVNF(GenericVNF):
self.vnf_port_pairs = None
self._vnf_process = None
- def _build_ports(self):
- self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
- self.networks = self._port_pairs.networks
- self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports)
- self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports)
- self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
-
- def _get_route_data(self, route_index, route_type):
- route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
- for _ in range(route_index):
- next(route_iter, '')
- return next(route_iter, {}).get(route_type, '')
-
- def _get_port0localip6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0localip6 : %s", return_value)
- return return_value
-
- def _get_port1localip6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1localip6 : %s", return_value)
- return return_value
-
- def _get_port0prefixlen6(self):
- return_value = self._get_route_data(0, 'netmask')
- LOG.info("_get_port0prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port1prefixlen6(self):
- return_value = self._get_route_data(1, 'netmask')
- LOG.info("_get_port1prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port0gateway6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0gateway6 : %s", return_value)
- return return_value
-
- def _get_port1gateway6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1gateway6 : %s", return_value)
- return return_value
-
def _start_vnf(self):
self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT)
name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid())
@@ -719,6 +670,7 @@ class SampleVNF(GenericVNF):
pass
def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
@@ -730,6 +682,54 @@ class SampleVNF(GenericVNF):
self.resource_helper.setup()
self._start_vnf()
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ """Update collectd configuration options
+ This function retrieves all collectd options contained in the test case
+
+ definition builds a single dictionary combining them. The following fragment
+ represents a test case with the collectd options and priorities (1 highest, 3 lowest):
+ ---
+ schema: yardstick:task:0.1
+ scenarios:
+ - type: NSPerf
+ nodes:
+ tg__0: trafficgen_1.yardstick
+ vnf__0: vnf.yardstick
+ options:
+ collectd:
+ <options> # COLLECTD priority 3
+ vnf__0:
+ collectd:
+ plugins:
+ load
+ <options> # COLLECTD priority 2
+ context:
+ type: Node
+ name: yardstick
+ nfvi_type: baremetal
+ file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1
+ """
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ """Update collectd options and plugins dictionary"""
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
def wait_for_instantiate(self):
buf = []
time.sleep(self.WAIT_TIME) # Give some time for config to load
@@ -745,7 +745,6 @@ class SampleVNF(GenericVNF):
LOG.info("%s VNF is up and running.", self.APP_NAME)
self._vnf_up_post()
self.queue_wrapper.clear()
- self.resource_helper.start_collect()
return self._vnf_process.exitcode
if "PANIC" in message:
@@ -758,6 +757,12 @@ class SampleVNF(GenericVNF):
# by other VNF output
self.q_in.put('\r\n')
+ def start_collect(self):
+ self.resource_helper.start_collect()
+
+ def stop_collect(self):
+ self.resource_helper.stop_collect()
+
def _build_run_kwargs(self):
self.run_kwargs = {
'stdin': self.queue_wrapper,
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 5afa4151e..e0c0db262 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -22,13 +22,13 @@ import time
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
-import shade
from shade._heat import event_utils
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import constants as consts
from yardstick.common import exceptions
from yardstick.common import template_format
-from yardstick.common import constants as consts
+from yardstick.common import openstack_utils as op_utils
+
log = logging.getLogger(__name__)
@@ -41,10 +41,11 @@ _DEPLOYED_STACKS = {}
class HeatStack(object):
"""Represents a Heat stack (deployed template) """
- def __init__(self, name):
+ def __init__(self, name, os_cloud_config=None):
self.name = name
self.outputs = {}
- self._cloud = shade.openstack_cloud()
+ os_cloud_config = {} if not os_cloud_config else os_cloud_config
+ self._cloud = op_utils.get_shade_client(**os_cloud_config)
self._stack = None
def _update_stack_tracking(self):
@@ -152,10 +153,12 @@ name (i.e. %s).
# short hand for resources part of template
self.resources = self._template['resources']
- def __init__(self, name, template_file=None, heat_parameters=None):
+ def __init__(self, name, template_file=None, heat_parameters=None,
+ os_cloud_config=None):
self.name = name
self.keystone_client = None
self.heat_parameters = {}
+ self._os_cloud_config = {} if not os_cloud_config else os_cloud_config
# heat_parameters is passed to heat in stack create, empty dict when
# yardstick creates the template (no get_param in resources part)
@@ -622,7 +625,7 @@ name (i.e. %s).
log.info("Creating stack '%s' START", self.name)
start_time = time.time()
- stack = HeatStack(self.name)
+ stack = HeatStack(self.name, os_cloud_config=self._os_cloud_config)
stack.create(self._template, self.heat_parameters, block, timeout)
if not block:
diff --git a/yardstick/tests/integration/dummy-scenario-heat-context.yaml b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
index 7c980b412..45a39951a 100644
--- a/yardstick/tests/integration/dummy-scenario-heat-context.yaml
+++ b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
@@ -6,6 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{% set context_name = context_name or "demo" %}
---
# Sample Heat context config with Dummy context
@@ -22,9 +23,9 @@ scenarios:
context:
name: {{ context_name }}
- image: cirros-0.3.5
- flavor: cirros256
- user: cirros
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
servers:
athena:
diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py
index 153c6a527..b19883479 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_base.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_base.py
@@ -25,6 +25,7 @@ class FlagsTestCase(unittest.TestCase):
def test___init__(self):
self.assertFalse(self.flags.no_setup)
self.assertFalse(self.flags.no_teardown)
+ self.assertEqual({'verify': False}, self.flags.os_cloud_config)
def test___init__with_flags(self):
flags = base.Flags(no_setup=True)
@@ -32,10 +33,12 @@ class FlagsTestCase(unittest.TestCase):
self.assertFalse(flags.no_teardown)
def test_parse(self):
- self.flags.parse(no_setup=True, no_teardown="False")
+ self.flags.parse(no_setup=True, no_teardown='False',
+ os_cloud_config={'verify': True})
self.assertTrue(self.flags.no_setup)
- self.assertEqual(self.flags.no_teardown, "False")
+ self.assertEqual('False', self.flags.no_teardown)
+ self.assertEqual({'verify': True}, self.flags.os_cloud_config)
def test_parse_forbidden_flags(self):
self.flags.parse(foo=42)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index 1d491fe60..ebb1d69ba 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -234,7 +234,7 @@ class HeatContextTestCase(unittest.TestCase):
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(ssh.SSH, 'gen_keys')
- @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
+ @mock.patch.object(heat, 'HeatTemplate')
def test_deploy(self, mock_template, mock_genkeys, mock_path_exists):
self.test_context._name = 'foo'
self.test_context._task_id = '1234567890'
@@ -245,9 +245,10 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.get_neutron_info = mock.MagicMock()
self.test_context.deploy()
- mock_template.assert_called_with('foo-12345678',
- '/bar/baz/some-heat-file',
- {'image': 'cirros'})
+ mock_template.assert_called_with(
+ 'foo-12345678', template_file='/bar/baz/some-heat-file',
+ heat_parameters={'image': 'cirros'},
+ os_cloud_config=self.test_context._flags.os_cloud_config)
self.assertIsNotNone(self.test_context.stack)
key_filename = ''.join(
[consts.YARDSTICK_ROOT_PATH,
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
index 2964ecc14..bb7fa4536 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
@@ -6,21 +6,51 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import attach_volume
class AttachVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.attach_server_volume')
- def test_attach_volume(self, mock_attach_server_volume):
- options = {
- 'volume_id': '123-456-000',
- 'server_id': '000-123-456'
- }
- args = {"options": options}
- obj = AttachVolume(args, {})
- obj.run({})
- mock_attach_server_volume.assert_called_once()
+ def setUp(self):
+
+ self._mock_attach_volume_to_server = mock.patch.object(
+ openstack_utils, 'attach_volume_to_server')
+ self.mock_attach_volume_to_server = (
+ self._mock_attach_volume_to_server.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(attach_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'server_name_or_id': _uuid,
+ 'volume_name_or_id': _uuid}}
+ self.result = {}
+ self.addCleanup(self._stop_mock)
+ self.attachvol_obj = attach_volume.AttachVolume(self.args, mock.ANY)
+
+ def _stop_mock(self):
+ self._mock_attach_volume_to_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_attach_volume_to_server.return_value = True
+ self.assertIsNone(self.attachvol_obj.run(self.result))
+ self.assertEqual({'attach_volume': 1}, self.result)
+ self.mock_log.info.asset_called_once_with(
+ 'Attach volume to server successful!')
+
+ def test_run_fail(self):
+ self.mock_attach_volume_to_server.return_value = False
+ with self.assertRaises(exceptions.ScenarioAttachVolumeError):
+ self.attachvol_obj.run(self.result)
+ self.assertEqual({'attach_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Attach volume to server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
index 30333dda8..f91d2c3f4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
@@ -6,95 +6,53 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import mock
+from oslo_utils import uuidutils
import unittest
+import mock
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios.lib import create_volume
class CreateVolumeTestCase(unittest.TestCase):
def setUp(self):
- self._mock_cinder_client = mock.patch(
- 'yardstick.common.openstack_utils.get_cinder_client')
- self.mock_cinder_client = self._mock_cinder_client.start()
- self._mock_glance_client = mock.patch(
- 'yardstick.common.openstack_utils.get_glance_client')
- self.mock_glance_client = self._mock_glance_client.start()
- self.addCleanup(self._stop_mock)
-
- self.scenario_cfg = {
- "options" :
- {
- 'volume_name': 'yardstick_test_volume_01',
- 'size': '256',
- 'image': 'cirros-0.3.5'
- }
- }
- self.scenario = create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={})
+ self._mock_create_volume = mock.patch.object(
+ openstack_utils, 'create_volume')
+ self.mock_create_volume = (
+ self._mock_create_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'size_gb': 1}}
+ self.result = {}
+
+ self.cvolume_obj = create_volume.CreateVolume(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
def _stop_mock(self):
- self._mock_cinder_client.stop()
- self._mock_glance_client.stop()
-
- def test_init(self):
- self.mock_cinder_client.return_value = "All volumes are equal"
- self.mock_glance_client.return_value = "Images are more equal"
-
- expected_vol_name = self.scenario_cfg["options"]["volume_name"]
- expected_vol_size = self.scenario_cfg["options"]["size"]
- expected_im_name = self.scenario_cfg["options"]["image"]
- expected_im_id = None
-
- scenario = create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={})
-
- self.assertEqual(expected_vol_name, scenario.volume_name)
- self.assertEqual(expected_vol_size, scenario.volume_size)
- self.assertEqual(expected_im_name, scenario.image_name)
- self.assertEqual(expected_im_id, scenario.image_id)
- self.assertEqual("All volumes are equal", scenario.cinder_client)
- self.assertEqual("Images are more equal", scenario.glance_client)
-
- def test_setup(self):
- self.assertFalse(self.scenario.setup_done)
- self.scenario.setup()
- self.assertTrue(self.scenario.setup_done)
-
- @mock.patch('yardstick.common.openstack_utils.create_volume')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- def test_run(self, mock_image_id, mock_create_volume):
- self.scenario.run()
-
- mock_image_id.assert_called_once()
- mock_create_volume.assert_called_once()
-
- @mock.patch.object(create_volume.CreateVolume, 'setup')
- def test_run_no_setup(self, scenario_setup):
- self.scenario.setup_done = False
- self.scenario.run()
- scenario_setup.assert_called_once()
-
- @mock.patch('yardstick.common.openstack_utils.create_volume')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- def test_create_volume(self, mock_get_glance_client,
- mock_get_cinder_client, mock_image_id,
- mock_create_volume):
- options = {
- 'volume_name': 'yardstick_test_volume_01',
- 'size': '256',
- 'image': 'cirros-0.3.5'
- }
- args = {"options": options}
- scenario = create_volume.CreateVolume(args, {})
- scenario.run()
- mock_create_volume.assert_called_once()
- mock_image_id.assert_called_once()
- mock_get_glance_client.assert_called_once()
- mock_get_cinder_client.assert_called_once()
+ self._mock_create_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cvolume_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_volume.return_value = {'name': 'yardstick_volume',
+ 'id': _uuid,
+ 'status': 'available'}
+ output = self.cvolume_obj.run(self.result)
+ self.assertDictEqual({'volume_create': 1}, self.result)
+ self.assertDictEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create volume successful!')
+
+ def test_run_fail(self):
+ self.mock_create_volume.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateVolumeError):
+ self.cvolume_obj.run(self.result)
+ self.assertDictEqual({'volume_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create volume failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
index 93f76e819..0db16f396 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
@@ -9,19 +9,44 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_volume
class DeleteVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
- @mock.patch('yardstick.common.openstack_utils.delete_volume')
- def test_delete_volume(self, mock_get_cinder_client, mock_delete_volume):
- options = {
- 'volume_id': '123-123-123'
- }
- args = {"options": options}
- obj = DeleteVolume(args, {})
- obj.run({})
- mock_get_cinder_client.assert_called_once()
- mock_delete_volume.assert_called_once()
+ def setUp(self):
+ self._mock_delete_volume = mock.patch.object(
+ openstack_utils, 'delete_volume')
+ self.mock_delete_volume = (
+ self._mock_delete_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_volume'}}
+ self.result = {}
+
+ self.delvol_obj = delete_volume.DeleteVolume(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_volume.return_value = True
+ self.assertIsNone(self.delvol_obj.run(self.result))
+ self.assertEqual({'delete_volume': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete volume successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_volume.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteVolumeError):
+ self.delvol_obj.run(self.result)
+ self.assertEqual({'delete_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete volume failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
index 9794d2129..2bc57f495 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
@@ -6,21 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import detach_volume
class DetachVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.detach_volume')
- def test_detach_volume(self, mock_detach_volume):
- options = {
- 'server_id': '321-321-321',
- 'volume_id': '123-123-123'
- }
- args = {"options": options}
- obj = DetachVolume(args, {})
- obj.run({})
- mock_detach_volume.assert_called_once()
+ def setUp(self):
+ self._mock_detach_volume = mock.patch.object(
+ openstack_utils, 'detach_volume')
+ self.mock_detach_volume = (
+ self._mock_detach_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(detach_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'server_name_or_id': _uuid,
+ 'volume_name_or_id': _uuid}}
+ self.result = {}
+
+ self.detachvol_obj = detach_volume.DetachVolume(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_detach_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_detach_volume.return_value = True
+ self.assertIsNone(self.detachvol_obj.run(self.result))
+ self.assertEqual({'detach_volume': 1}, self.result)
+ self.mock_log.info.assert_called_once_with(
+ 'Detach volume from server successful!')
+
+ def test_run_fail(self):
+ self.mock_detach_volume.return_value = False
+ with self.assertRaises(exceptions.ScenarioDetachVolumeError):
+ self.detachvol_obj.run(self.result)
+ self.assertEqual({'detach_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Detach volume from server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
index 15a6f7c8f..1c1364348 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
@@ -6,20 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_flavor import GetFlavor
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_flavor
class GetFlavorTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_flavor_by_name')
- def test_get_flavor(self, mock_get_flavor_by_name):
- options = {
- 'flavor_name': 'yardstick_test_flavor'
- }
- args = {"options": options}
- obj = GetFlavor(args, {})
- obj.run({})
- mock_get_flavor_by_name.assert_called_once()
+ def setUp(self):
+
+ self._mock_get_flavor = mock.patch.object(
+ openstack_utils, 'get_flavor')
+ self.mock_get_flavor = self._mock_get_flavor.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_flavor, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_flavor'}}
+ self.result = {}
+
+ self.getflavor_obj = get_flavor.GetFlavor(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_flavor.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getflavor_obj.scenario_cfg = {'output': 'flavor'}
+ self.mock_get_flavor.return_value = (
+ {'name': 'flavor-name', 'id': _uuid})
+ output = self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 1}, self.result)
+ self.assertDictEqual({'flavor': {'name': 'flavor-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get flavor successful!')
+
+ def test_run_fail(self):
+ self.mock_get_flavor.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetFlavorError):
+ self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get flavor failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
index 83ec903bc..5b5329cb0 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
@@ -6,37 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_server import GetServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_server
class GetServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_server_by_name')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_name(self, mock_get_nova_client, mock_get_server_by_name):
- scenario_cfg = {
- 'options': {
- 'server_name': 'yardstick_server'
- },
- 'output': 'status server'
- }
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_get_server_by_name.assert_called_once()
-
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_id(self, mock_get_nova_client):
- scenario_cfg = {
- 'options': {
- 'server_id': '1'
- },
- 'output': 'status server'
- }
- mock_get_nova_client().servers.get.return_value = None
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called()
+ def setUp(self):
+
+ self._mock_get_server = mock.patch.object(
+ openstack_utils, 'get_server')
+ self.mock_get_server = self._mock_get_server.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_key'}}
+ self.result = {}
+
+ self.getserver_obj = get_server.GetServer(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getserver_obj.scenario_cfg = {'output': 'server'}
+ self.mock_get_server.return_value = (
+ {'name': 'server-name', 'id': _uuid})
+ output = self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 1}, self.result)
+ self.assertDictEqual({'server': {'name': 'server-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get Server successful!')
+
+ def test_run_fail(self):
+ self.mock_get_server.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetServerError):
+ self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get Server failed!')
diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py
index 4dc4a7082..a1a4af2e9 100644
--- a/yardstick/tests/unit/common/test_openstack_utils.py
+++ b/yardstick/tests/unit/common/test_openstack_utils.py
@@ -10,8 +10,10 @@
from oslo_utils import uuidutils
import unittest
import mock
-
+import shade
from shade import exc
+
+from yardstick.common import constants
from yardstick.common import openstack_utils
@@ -35,6 +37,29 @@ class GetHeatApiVersionTestCase(unittest.TestCase):
self.assertEqual(api_version, expected_result)
+class GetShadeClientTestCase(unittest.TestCase):
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client(self, mock_openstack_cloud):
+ os_cloud_config = {'param1': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ mock_openstack_cloud.reset_mock()
+ os_cloud_config = {'verify': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client_no_parameters(self, mock_openstack_cloud):
+ self.assertEqual('os_client', openstack_utils.get_shade_client())
+ mock_openstack_cloud.assert_called_once_with(
+ **constants.OS_CLOUD_DEFAULT_CONFIG)
+
+
class DeleteNeutronNetTestCase(unittest.TestCase):
def setUp(self):
@@ -445,3 +470,189 @@ class DeleteKeypairTestCase(unittest.TestCase):
'key_name')
mock_logger.error.assert_called_once()
self.assertFalse(output)
+
+
+class AttachVolumeToServerTestCase(unittest.TestCase):
+
+ def test_attach_volume_to_server(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ self.mock_shade_client.attach_volume.return_value = True
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ self.assertTrue(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_attach_volume_to_server_fail(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.attach_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class GetServerTestCase(unittest.TestCase):
+
+ def test_get_server(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_server.return_value = {
+ 'name': 'server_name', 'id': _uuid}
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ self.assertEqual({'name': 'server_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_server_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class GetFlavorTestCase(unittest.TestCase):
+
+ def test_get_flavor(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_flavor.return_value = {
+ 'name': 'flavor_name', 'id': _uuid}
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ self.assertEqual({'name': 'flavor_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_flavor_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_flavor.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+# *********************************************
+# CINDER
+# *********************************************
+
+
+class GetVolumeIDTestCase(unittest.TestCase):
+
+ def test_get_volume_id(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_volume_id.return_value = _uuid
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertEqual(_uuid, output)
+
+ def test_get_volume_id_None(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_volume_id.return_value = None
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertIsNone(output)
+
+
+class GetVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_volume = mock.Mock()
+
+ def test_get_volume(self):
+ self.mock_shade_client.get_volume.return_value = {'volume'}
+ output = openstack_utils.get_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertEqual({'volume'}, output)
+
+ def test_get_volume_None(self):
+ self.mock_shade_client.get_volume.return_value = None
+ output = openstack_utils.get_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertIsNone(output)
+
+
+class CreateVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.size = 1
+
+ def test_create_volume(self):
+ self.mock_shade_client.create_volume.return_value = (
+ {'name': 'volume-name', 'size': self.size})
+ output = openstack_utils.create_volume(
+ self.mock_shade_client, self.size)
+ self.assertEqual(
+ {'name': 'volume-name', 'size': self.size},
+ output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_volume_fail(self, mock_logger):
+ self.mock_shade_client.create_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_volume(self.mock_shade_client,
+ self.size)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_volume(self):
+ self.mock_shade_client.delete_volume.return_value = True
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertTrue(output)
+
+ def test_delete_volume_fail(self):
+ self.mock_shade_client.delete_volume.return_value = False
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_volume_exception(self, mock_logger):
+ self.mock_shade_client.delete_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class DetachVolumeTestCase(unittest.TestCase):
+
+ @mock.patch.object(openstack_utils, 'get_server')
+ def test_detach_volume(self, mock_get_server):
+ self.mock_shade_client = mock.Mock()
+ mock_get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ output = openstack_utils.detach_volume(self.mock_shade_client,
+ 'server_name_or_id',
+ 'volume_name_or_id')
+ self.assertTrue(output)
+
+ @mock.patch.object(openstack_utils, 'get_server')
+ @mock.patch.object(openstack_utils, 'log')
+ def test_detach_volume_exception(self, mock_logger, mock_get_server):
+ self.mock_shade_client = mock.Mock()
+ mock_get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ self.mock_shade_client.detach_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.detach_volume(self.mock_shade_client,
+ 'server_name_or_id',
+ 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
diff --git a/yardstick/tests/unit/common/test_packages.py b/yardstick/tests/unit/common/test_packages.py
new file mode 100644
index 000000000..ba59a3015
--- /dev/null
+++ b/yardstick/tests/unit/common/test_packages.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from pip import exceptions as pip_exceptions
+from pip.operations import freeze
+import unittest
+
+from yardstick.common import packages
+
+
+class PipExecuteActionTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_pip_main = mock.patch.object(packages, '_pip_main')
+ self.mock_pip_main = self._mock_pip_main.start()
+ self.mock_pip_main.return_value = 0
+ self._mock_freeze = mock.patch.object(freeze, 'freeze')
+ self.mock_freeze = self._mock_freeze.start()
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_pip_main.stop()
+ self._mock_freeze.stop()
+
+ def test_pip_execute_action(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package'))
+
+ def test_remove(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package',
+ action='uninstall'))
+
+ def test_install(self):
+ self.assertEqual(0, packages._pip_execute_action(
+ 'test_package', action='install', target='temp_dir'))
+
+ def test_pip_execute_action_error(self):
+ self.mock_pip_main.return_value = 1
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_execute_action_exception(self):
+ self.mock_pip_main.side_effect = pip_exceptions.PipError
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_list(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ pkg_dict = {
+ 'XStatic-Rickshaw': '1.5.0.0',
+ 'xvfbwrapper': '0.2.9',
+ 'yardstick': '50773a24afc02c9652b662ecca2fc5621ea6097a',
+ 'zope.interface': '4.4.3'
+ }
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list()
+ for pkg_name, pkg_version in pkg_output.items():
+ self.assertEqual(pkg_dict.get(pkg_name), pkg_version)
+
+ def test_pip_list_single_package(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list(pkg_name='xvfbwrapper')
+ self.assertEqual(1, len(pkg_output))
+ self.assertEqual(pkg_output.get('xvfbwrapper'), '0.2.9')
diff --git a/tests/unit/network_services/collector/__init__.py b/yardstick/tests/unit/network_services/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/collector/__init__.py
+++ b/yardstick/tests/unit/network_services/__init__.py
diff --git a/tests/unit/network_services/helpers/__init__.py b/yardstick/tests/unit/network_services/collector/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/helpers/__init__.py
+++ b/yardstick/tests/unit/network_services/collector/__init__.py
diff --git a/tests/unit/network_services/collector/test_publisher.py b/yardstick/tests/unit/network_services/collector/test_publisher.py
index 4a175841d..145441ddd 100644
--- a/tests/unit/network_services/collector/test_publisher.py
+++ b/yardstick/tests/unit/network_services/collector/test_publisher.py
@@ -13,9 +13,6 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.collector.publisher
-
-from __future__ import absolute_import
import unittest
from yardstick.network_services.collector import publisher
diff --git a/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py
index d4b4ecf7a..14e26f7fe 100644
--- a/tests/unit/network_services/collector/test_subscriber.py
+++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py
@@ -13,13 +13,11 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.collector.subscriber
-
-from __future__ import absolute_import
import unittest
import mock
from yardstick.network_services.collector import subscriber
+from yardstick import ssh
class MockVnfAprrox(object):
@@ -40,57 +38,41 @@ class MockVnfAprrox(object):
class CollectorTestCase(unittest.TestCase):
- NODES = {
- 'node1': {},
- 'node2': {
- 'ip': '1.2.3.4',
- 'collectd': {
- 'plugins': {'abc': 12, 'def': 34},
- 'interval': 987,
- },
- },
- }
- TRAFFIC_PROFILE = {
- 'key1': 'value1',
- }
-
def setUp(self):
vnf = MockVnfAprrox()
- self.ssh_patch = mock.patch('yardstick.network_services.nfvi.resource.ssh', autospec=True)
+ vnf.start_collect = mock.Mock()
+ vnf.stop_collect = mock.Mock()
+ self.ssh_patch = mock.patch.object(ssh, 'AutoConnectSSH')
mock_ssh = self.ssh_patch.start()
mock_instance = mock.Mock()
mock_instance.execute.return_value = 0, '', ''
- mock_ssh.AutoConnectSSH.from_node.return_value = mock_instance
- self.collector = subscriber.Collector([vnf], self.NODES, self.TRAFFIC_PROFILE, 1800)
+ mock_ssh.from_node.return_value = mock_instance
+ self.collector = subscriber.Collector([vnf])
def tearDown(self):
self.ssh_patch.stop()
def test___init__(self, *_):
vnf = MockVnfAprrox()
- collector = subscriber.Collector([vnf], {}, {})
+ collector = subscriber.Collector([vnf])
self.assertEqual(len(collector.vnfs), 1)
- self.assertEqual(collector.traffic_profile, {})
-
- def test___init___with_data(self, *_):
- self.assertEqual(len(self.collector.vnfs), 1)
- self.assertDictEqual(self.collector.traffic_profile, self.TRAFFIC_PROFILE)
- self.assertEqual(len(self.collector.resource_profiles), 1)
-
- def test___init___negative(self, *_):
- pass
def test_start(self, *_):
self.assertIsNone(self.collector.start())
+ for vnf in self.collector.vnfs:
+ vnf.start_collect.assert_called_once()
def test_stop(self, *_):
self.assertIsNone(self.collector.stop())
+ for vnf in self.collector.vnfs:
+ vnf.stop_collect.assert_called_once()
def test_get_kpi(self, *_):
result = self.collector.get_kpi()
+ self.assertEqual(1, len(result))
+ self.assertEqual(4, len(result["vnf__1"]))
self.assertEqual(result["vnf__1"]["pkt_in_up_stream"], 100)
self.assertEqual(result["vnf__1"]["pkt_drop_up_stream"], 5)
self.assertEqual(result["vnf__1"]["pkt_in_down_stream"], 50)
self.assertEqual(result["vnf__1"]["pkt_drop_down_stream"], 40)
- self.assertIn('node2', result)
diff --git a/tests/unit/network_services/libs/__init__.py b/yardstick/tests/unit/network_services/helpers/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/libs/__init__.py
+++ b/yardstick/tests/unit/network_services/helpers/__init__.py
diff --git a/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml b/yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml
index f60834fbd..f60834fbd 100644
--- a/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml
+++ b/yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml
diff --git a/tests/unit/network_services/helpers/test_cpu.py b/yardstick/tests/unit/network_services/helpers/test_cpu.py
index 1f9d3f219..871fbf8c9 100644
--- a/tests/unit/network_services/helpers/test_cpu.py
+++ b/yardstick/tests/unit/network_services/helpers/test_cpu.py
@@ -13,7 +13,6 @@
# limitations under the License.
#
-from __future__ import absolute_import
from __future__ import division
import unittest
import mock
@@ -25,15 +24,20 @@ from yardstick.network_services.helpers.cpu import \
class TestCpuSysCores(unittest.TestCase):
+ def setUp(self):
+ self._mock_ssh = mock.patch("yardstick.ssh.SSH")
+ self.mock_ssh = self._mock_ssh.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_ssh.stop()
+
def test___init__(self):
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(1, "", ""))
- ssh_mock.put = \
- mock.Mock(return_value=(1, "", ""))
- cpu_topo = CpuSysCores(ssh_mock)
- self.assertIsNotNone(cpu_topo.connection)
+ self.mock_ssh.execute.return_value = 1, "", ""
+ self.mock_ssh.put.return_value = 1, "", ""
+ cpu_topo = CpuSysCores(self.mock_ssh)
+ self.assertIsNotNone(cpu_topo.connection)
def test__get_core_details(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -52,7 +56,7 @@ class TestCpuSysCores(unittest.TestCase):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
ssh_mock.put = \
mock.Mock(return_value=(1, "", ""))
cpu_topo = CpuSysCores(ssh_mock)
@@ -68,7 +72,7 @@ class TestCpuSysCores(unittest.TestCase):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
ssh_mock.put = \
mock.Mock(return_value=(1, "", ""))
cpu_topo = CpuSysCores(ssh_mock)
@@ -77,14 +81,14 @@ class TestCpuSysCores(unittest.TestCase):
mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
[{'physical id': '2', 'processor': '1'}]])
cpu_topo.core_map = \
- {'thread_per_core': '1', '2':['1'], 'cores_per_socket': '2'}
+ {'thread_per_core': '1', '2': ['1'], 'cores_per_socket': '2'}
self.assertEqual(-1, cpu_topo.validate_cpu_cfg())
def test_validate_cpu_cfg_2t(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
ssh_mock.put = \
mock.Mock(return_value=(1, "", ""))
cpu_topo = CpuSysCores(ssh_mock)
@@ -93,7 +97,7 @@ class TestCpuSysCores(unittest.TestCase):
mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
[{'physical id': '2', 'processor': '1'}]])
cpu_topo.core_map = \
- {'thread_per_core': 1, '2':['1'], 'cores_per_socket': '2'}
+ {'thread_per_core': 1, '2': ['1'], 'cores_per_socket': '2'}
vnf_cfg = {'lb_config': 'SW', 'lb_count': 1, 'worker_config':
'1C/2T', 'worker_threads': 1}
self.assertEqual(-1, cpu_topo.validate_cpu_cfg(vnf_cfg))
@@ -102,7 +106,7 @@ class TestCpuSysCores(unittest.TestCase):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
ssh_mock.put = \
mock.Mock(return_value=(1, "", ""))
cpu_topo = CpuSysCores(ssh_mock)
@@ -111,7 +115,7 @@ class TestCpuSysCores(unittest.TestCase):
mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
[{'physical id': '2', 'processor': '1'}]])
cpu_topo.core_map = \
- {'thread_per_core': 1, '2':[1], 'cores_per_socket': 2}
+ {'thread_per_core': 1, '2': [1], 'cores_per_socket': 2}
vnf_cfg = {'lb_config': 'SW', 'lb_count': 1, 'worker_config':
'1C/1T', 'worker_threads': 1}
self.assertEqual(-1, cpu_topo.validate_cpu_cfg(vnf_cfg))
diff --git a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
index 367072e84..367072e84 100644
--- a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
+++ b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
diff --git a/tests/unit/network_services/helpers/test_iniparser.py b/yardstick/tests/unit/network_services/helpers/test_iniparser.py
index bd27b497e..1a09f0761 100644
--- a/tests/unit/network_services/helpers/test_iniparser.py
+++ b/yardstick/tests/unit/network_services/helpers/test_iniparser.py
@@ -13,13 +13,11 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
import unittest
from contextlib import contextmanager
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
@@ -105,7 +103,7 @@ class TestBaseParser(unittest.TestCase):
@staticmethod
def make_open(text_blob):
@contextmanager
- def internal_open(*args, **kwargs):
+ def internal_open(*args):
yield text_blob.split('\n')
return internal_open
@@ -136,7 +134,7 @@ class TestConfigParser(unittest.TestCase):
@staticmethod
def make_open(text_blob):
@contextmanager
- def internal_open(*args, **kwargs):
+ def internal_open(*args):
yield text_blob.split('\n')
return internal_open
diff --git a/tests/unit/network_services/helpers/test_samplevnf_helper.py b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
index dc74b1859..6d5e1da60 100644
--- a/tests/unit/network_services/helpers/test_samplevnf_helper.py
+++ b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
@@ -756,7 +756,6 @@ class TestMultiPortConfig(unittest.TestCase):
self.assertIsNone(result)
def test_generate_arp_route_tbl(self):
- # ELF: could n=do this in setup
topology_file = mock.Mock()
config_tpl = mock.Mock()
tmp_file = ""
diff --git a/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/libs/ixia_libs/__init__.py
+++ b/yardstick/tests/unit/network_services/libs/__init__.py
diff --git a/tests/unit/network_services/nfvi/__init__.py b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/nfvi/__init__.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
diff --git a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
index 2a97048aa..7ca2f0f19 100644
--- a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
@@ -13,34 +13,38 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.libs.ixia_libs.IxNet
-
-from __future__ import absolute_import
-import unittest
import mock
+import IxNetwork
+import unittest
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IxNextgen
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_4
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_6
-
UPLINK = "uplink"
DOWNLINK = "downlink"
+
class TestIxNextgen(unittest.TestCase):
def test___init__(self):
ixnet_gen = IxNextgen()
self.assertIsNone(ixnet_gen._bidir)
- @mock.patch("yardstick.network_services.libs.ixia_libs.IxNet.IxNet.sys")
- def test_connect(self, *args):
-
+ @mock.patch.object(IxNetwork, 'IxNet')
+ def test_connect(self, mock_ixnet):
+ ixnet_instance = mock.Mock()
+ mock_ixnet.return_value = ixnet_instance
ixnet_gen = IxNextgen()
- ixnet_gen.get_config = mock.MagicMock()
- ixnet_gen.get_ixnet = mock.MagicMock()
+ with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ mock_config.return_value = {'machine': 'machine_fake',
+ 'port': 'port_fake',
+ 'version': 12345}
+ ixnet_gen._connect(mock.ANY)
- self.assertRaises(ImportError, ixnet_gen._connect, {"py_lib_path": "/tmp"})
+ ixnet_instance.connect.assert_called_once_with(
+ 'machine_fake', '-port', 'port_fake', '-version', '12345')
+ mock_config.assert_called_once()
def test_clear_ixia_config(self):
ixnet = mock.MagicMock()
@@ -628,11 +632,9 @@ class TestIxNextgen(unittest.TestCase):
def test_set_random_ip_multi_attributes_bad_ip_version(self):
bad_ip_version = object()
ixnet_gen = IxNextgen(mock.Mock())
- mock1 = mock.Mock()
- mock2 = mock.Mock()
- mock3 = mock.Mock()
with self.assertRaises(ValueError):
- ixnet_gen.set_random_ip_multi_attributes(mock1, bad_ip_version, mock2, mock3)
+ ixnet_gen.set_random_ip_multi_attributes(
+ mock.Mock(), bad_ip_version, mock.Mock(), mock.Mock())
def test_get_config(self):
tg_cfg = {
@@ -659,13 +661,11 @@ class TestIxNextgen(unittest.TestCase):
"version": "test3",
"ixchassis": "test4",
"tcl_port": "test5",
- "py_lib_path": "test6",
},
}
}
expected = {
- 'py_lib_path': 'test6',
'machine': 'test1',
'port': 'test5',
'chassis': 'test4',
diff --git a/tests/unit/network_services/traffic_profile/__init__.py b/yardstick/tests/unit/network_services/nfvi/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/traffic_profile/__init__.py
+++ b/yardstick/tests/unit/network_services/nfvi/__init__.py
diff --git a/tests/unit/network_services/nfvi/test_collectd.py b/yardstick/tests/unit/network_services/nfvi/test_collectd.py
index 0ae175624..fe59aecfb 100644
--- a/tests/unit/network_services/nfvi/test_collectd.py
+++ b/yardstick/tests/unit/network_services/nfvi/test_collectd.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import unittest
import multiprocessing
import mock
diff --git a/tests/unit/network_services/nfvi/test_resource.py b/yardstick/tests/unit/network_services/nfvi/test_resource.py
index f5f7f0fe7..9f337c673 100644
--- a/tests/unit/network_services/nfvi/test_resource.py
+++ b/yardstick/tests/unit/network_services/nfvi/test_resource.py
@@ -19,7 +19,8 @@ import unittest
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.nfvi import resource, collectd
-
+from yardstick.common.exceptions import ResourceCommandError
+from yardstick import ssh
class TestResourceProfile(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
@@ -128,8 +129,31 @@ class TestResourceProfile(unittest.TestCase):
self.assertEqual(val, ('error', 'Invalid', '', ''))
def test__start_collectd(self):
- self.assertIsNone(
- self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin"))
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ ssh_mock.execute = mock.Mock(side_effect=ssh.SSHError)
+ with self.assertRaises(ssh.SSHError):
+ self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin")
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ def test__start_rabbitmq(self):
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", ""))
+ self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock))
+
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ with self.assertRaises(ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ with self.assertRaises(ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
def test__prepare_collectd_conf(self):
self.assertIsNone(
@@ -154,11 +178,12 @@ class TestResourceProfile(unittest.TestCase):
def test_initiate_systemagent(self):
self.resource_profile._start_collectd = mock.Mock()
+ self.resource_profile._start_rabbitmq = mock.Mock()
self.assertIsNone(
self.resource_profile.initiate_systemagent("/opt/nsb_bin"))
def test_initiate_systemagent_raise(self):
- self.resource_profile._start_collectd = mock.Mock(side_effect=RuntimeError)
+ self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError)
with self.assertRaises(RuntimeError):
self.resource_profile.initiate_systemagent("/opt/nsb_bin")
diff --git a/tests/unit/network_services/test_utils.py b/yardstick/tests/unit/network_services/test_utils.py
index bf98a4474..2b2eb7109 100644
--- a/tests/unit/network_services/test_utils.py
+++ b/yardstick/tests/unit/network_services/test_utils.py
@@ -13,8 +13,6 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.utils
-
import os
import unittest
import mock
diff --git a/tests/unit/network_services/test_yang_model.py b/yardstick/tests/unit/network_services/test_yang_model.py
index 0b29da701..a7eb36b8a 100644
--- a/tests/unit/network_services/test_yang_model.py
+++ b/yardstick/tests/unit/network_services/test_yang_model.py
@@ -13,14 +13,8 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.utils
-
-from __future__ import absolute_import
-
-import unittest
import mock
-
-import yaml
+import unittest
from yardstick.network_services.yang_model import YangModel
@@ -95,9 +89,9 @@ class YangModelTestCase(unittest.TestCase):
y._get_entries()
self.assertEqual(y._rules, '')
- @mock.patch('yardstick.network_services.yang_model.yaml_load')
@mock.patch('yardstick.network_services.yang_model.open')
- def test__read_config(self, mock_open, mock_safe_load):
+ @mock.patch('yardstick.network_services.yang_model.yaml_load')
+ def test__read_config(self, mock_safe_load, *args):
cfg = "yang.yaml"
y = YangModel(cfg)
mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'}
diff --git a/yardstick/tests/unit/network_services/traffic_profile/__init__.py b/yardstick/tests/unit/network_services/traffic_profile/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/__init__.py
diff --git a/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 3b8804976..3b8804976 100644
--- a/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
diff --git a/tests/unit/network_services/traffic_profile/test_fixed.py b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
index dec94964b..39905e6b1 100644
--- a/tests/unit/network_services/traffic_profile/test_fixed.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
@@ -13,12 +13,10 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
-import unittest
import mock
+import unittest
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_http.py b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
index 5d8029ea0..0d1b916a7 100644
--- a/tests/unit/network_services/traffic_profile/test_http.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
@@ -13,7 +13,6 @@
# limitations under the License.
#
-from __future__ import absolute_import
import unittest
from yardstick.network_services.traffic_profile.base import TrafficProfile
@@ -29,7 +28,7 @@ class TestTrafficProfileGenericHTTP(unittest.TestCase):
def test_execute(self):
traffic_profile_generic_htt_p = \
- TrafficProfileGenericHTTP(TrafficProfile)
+ TrafficProfileGenericHTTP(TrafficProfile)
traffic_generator = {}
self.assertIsNone(
traffic_profile_generic_htt_p.execute(traffic_generator))
diff --git a/tests/unit/network_services/traffic_profile/test_http_ixload.py b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
index 5110439fd..57de6602d 100644
--- a/tests/unit/network_services/traffic_profile/test_http_ixload.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from __future__ import absolute_import
import unittest
import mock
@@ -187,9 +185,9 @@ class TestIxLoadTrafficGen(unittest.TestCase):
with self.assertRaises(Exception):
ixload.load_config_file("ixload.cfg")
- @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
- def test_start_http_test_connect_error(self, mock_collector_type, mock_ixload_type):
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
+ def test_start_http_test_connect_error(self, mock_ixload_type, *args):
ports = [1, 2, 3]
test_input = {
"remote_server": "REMOTE_SERVER",
@@ -204,8 +202,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
j = jsonutils.dump_as_bytes(test_input)
- mock_ixload = mock_ixload_type()
- mock_ixload.connect.side_effect = RuntimeError
+ mock_ixload_type.return_value.connect.side_effect = RuntimeError
ixload = http_ixload.IXLOADHttpTest(j)
ixload.results_on_windows = 'windows_result_dir'
@@ -216,7 +213,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
- def test_start_http_test(self, mock_collector_type, mock_ixload_type):
+ def test_start_http_test(self, *args):
ports = [1, 2, 3]
test_input = {
"remote_server": "REMOTE_SERVER",
@@ -240,7 +237,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
@mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
- def test_start_http_test_reassign_error(self, mock_collector_type, mock_ixload_type):
+ def test_start_http_test_reassign_error(self, *args):
ports = [1, 2, 3]
test_input = {
"remote_server": "REMOTE_SERVER",
@@ -264,9 +261,9 @@ class TestIxLoadTrafficGen(unittest.TestCase):
ixload.result_dir = 'my_result_dir'
ixload.start_http_test()
- self.assertEqual(reassign_ports.call_count, 1)
+ reassign_ports.assert_called_once()
@mock.patch("yardstick.network_services.traffic_profile.http_ixload.IXLOADHttpTest")
- def test_main(self, IXLOADHttpTest):
+ def test_main(self, *args):
args = ["1", "2", "3"]
http_ixload.main(args)
diff --git a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index e8910d62b..a0abe2bbd 100644
--- a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -13,14 +13,12 @@
# limitations under the License.
#
-from __future__ import absolute_import
-from __future__ import division
import unittest
import mock
from copy import deepcopy
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -48,35 +46,55 @@ class TestIXIARFC2544Profile(unittest.TestCase):
},
}
- PROFILE = {'description': 'Traffic profile to run RFC2544 latency',
- 'name': 'rfc2544',
- 'traffic_profile': {'traffic_type': 'IXIARFC2544Profile',
- 'frame_rate': 100},
- IXIARFC2544Profile.DOWNLINK: {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'}},
- 'outer_l3v4': {'dstip4': '1.1.1.1-1.15.255.255',
- 'proto': 'udp', 'count': '1',
- 'srcip4': '90.90.1.1-90.105.255.255',
- 'dscp': 0, 'ttl': 32},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234'}}},
- IXIARFC2544Profile.UPLINK: {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'}},
- 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
- 'proto': 'udp', 'count': '1',
- 'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234'}}},
- 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {
+ 'traffic_type': 'IXIARFC2544Profile',
+ 'frame_rate': 100},
+ IXIARFC2544Profile.DOWNLINK: {
+ 'ipv4': {
+ 'outer_l2': {
+ 'framesize': {
+ '64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4': {
+ 'dstip4': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp',
+ 'count': '1',
+ 'srcip4': '90.90.1.1-90.105.255.255',
+ 'dscp': 0,
+ 'ttl': 32},
+ 'outer_l4': {
+ 'srcport': '2001',
+ 'dsrport': '1234'}}},
+ IXIARFC2544Profile.UPLINK: {
+ 'ipv4': {
+ 'outer_l2': {
+ 'framesize': {
+ '64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4': {
+ 'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'count': '1',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0,
+ 'ttl': 32},
+ 'outer_l4': {
+ 'dstport': '2001',
+ 'srcport': '1234'}}},
+ 'schema': 'isb:traffic_profile:0.1'}
def test_get_ixia_traffic_profile_error(self):
traffic_generator = mock.Mock(autospec=TrexProfile)
@@ -215,7 +233,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"count": "1"
},
"outer_l3v6": {
- "count": 1024,
"dscp": 0,
"dstip4": "152.16.100.20",
"proto": "udp",
@@ -249,7 +266,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"ttl": 32
},
"outer_l3v4": {
- "count": 1024,
"dscp": 0,
"dstip4": "152.16.100.20",
"proto": "udp",
@@ -257,7 +273,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"ttl": 32,
},
"outer_l3v6": {
- "count": 1024,
"dscp": 0,
"dstip4": "152.16.100.20",
"proto": "udp",
@@ -408,24 +423,24 @@ class TestIXIARFC2544Profile(unittest.TestCase):
'outer_l4': {'srcport': '2001',
'dsrport': '1234'}}},
IXIARFC2544Profile.UPLINK: {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'}},
- 'outer_l3v4':
- {'dstip4': '9.9.1.1-90.105.255.255',
- 'proto': 'udp', 'count': '1',
- 'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32},
- 'outer_l3v6':
- {'dstip6': '9.9.1.1-90.105.255.255',
- 'proto': 'udp', 'count': '1',
- 'srcip6': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32},
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32},
+ 'outer_l3v6':
+ {'dstip6': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip6': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234'}}},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234'}}},
'schema': 'isb:traffic_profile:0.1'}
result = r_f_c2544_profile._get_ixia_traffic_profile(profile_data, mac)
self.assertIsNotNone(result)
@@ -497,7 +512,8 @@ class TestIXIARFC2544Profile(unittest.TestCase):
r_f_c2544_profile.full_profile = {}
r_f_c2544_profile.get_streams = mock.Mock()
- self.assertIsNone(r_f_c2544_profile.update_traffic_profile(traffic_generator))
+ self.assertIsNone(
+ r_f_c2544_profile.update_traffic_profile(traffic_generator))
self.assertEqual(r_f_c2544_profile.ports, ports_expected)
def test_get_drop_percentage(self):
diff --git a/tests/unit/network_services/traffic_profile/test_prox_acl.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py
index ef5bac0d5..48c449b20 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_acl.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py
@@ -13,12 +13,10 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
import unittest
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -32,7 +30,7 @@ if stl_patch:
class TestProxACLProfile(unittest.TestCase):
def test_run_test_with_pkt_size(self):
- def target(*args, **kwargs):
+ def target(*args):
runs.append(args[2])
if args[2] < 0 or args[2] > 100:
raise RuntimeError(' '.join([str(args), str(runs)]))
@@ -40,13 +38,8 @@ class TestProxACLProfile(unittest.TestCase):
return fail_tuple, {}
return success_tuple, {}
- def get_mock_samples(*args, **kwargs):
- if args[2] < 0:
- raise RuntimeError(' '.join([str(args), str(runs)]))
- return success_tuple
-
tp_config = {
- 'traffic_profile': {
+ 'traffic_profile': {
'upper_bound': 100.0,
'lower_bound': 0.0,
'tolerated_loss': 50.0,
@@ -55,8 +48,10 @@ class TestProxACLProfile(unittest.TestCase):
}
runs = []
- success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
- fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+ success_tuple = ProxTestDataTuple(
+ 10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(
+ 10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
traffic_gen = mock.MagicMock()
@@ -75,4 +70,5 @@ class TestProxACLProfile(unittest.TestCase):
profile.tolerated_loss = 100.0
profile._profile_helper = profile_helper
- profile.run_test_with_pkt_size(traffic_gen, profile.pkt_size, profile.duration)
+ profile.run_test_with_pkt_size(
+ traffic_gen, profile.pkt_size, profile.duration)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index 1b4189b48..58343ffb2 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -13,12 +13,10 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
import unittest
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -68,7 +66,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
# Result Samples inc theor_max
result_tuple = {"Result_Actual_throughput": 7.5e-07,
- "Result_theor_max_throughput": 0.00012340000000000002,
+ "Result_theor_max_throughput": 1.234e-10,
"Result_pktSize": 200}
profile.queue.put.assert_called_with(result_tuple)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
index e5b36096f..e466305ea 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_profile.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
@@ -13,12 +13,10 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
import unittest
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_ramp.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py
index 1acec2f68..7a77e3295 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_ramp.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py
@@ -13,12 +13,10 @@
# limitations under the License.
#
-from __future__ import absolute_import
-
import unittest
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index cb3a547ee..2684e0ba1 100644
--- a/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -16,7 +16,7 @@
import unittest
import mock
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
STLClient = mock.MagicMock()
@@ -191,7 +191,8 @@ class TestRFC2544Profile(unittest.TestCase):
mock.Mock(return_value=True)
r_f_c2544_profile = RFC2544Profile(self.TRAFFIC_PROFILE)
r_f_c2544_profile.params = self.PROFILE
- self.assertIsNone(r_f_c2544_profile.execute_traffic(traffic_generator))
+ self.assertIsNone(
+ r_f_c2544_profile.execute_traffic(traffic_generator))
samples = {}
for ifname in range(1):
name = "xe{}".format(ifname)
diff --git a/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
index d1009a5e8..5fe1b7326 100644
--- a/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
@@ -18,7 +18,7 @@ import mock
import six
import unittest
-from tests.unit import STL_MOCKS
+from yardstick.tests import STL_MOCKS
from yardstick.common import exceptions as y_exc
STLClient = mock.MagicMock()
diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py
index 9598eeb04..3ec59a3c2 100644
--- a/yardstick/tests/unit/orchestrator/test_heat.py
+++ b/yardstick/tests/unit/orchestrator/test_heat.py
@@ -17,6 +17,7 @@ import shade
import unittest
from yardstick.benchmark.contexts import node
+from yardstick.common import constants
from yardstick.common import exceptions
from yardstick.orchestrator import heat
@@ -53,6 +54,14 @@ class HeatStackTestCase(unittest.TestCase):
self._mock_stack_get.stop()
heat._DEPLOYED_STACKS = {}
+ @mock.patch.object(shade, 'openstack_cloud')
+ def test__init(self, mock_openstack_cloud):
+ os_cloud_config = {'key': 'value'}
+ heatstack = heat.HeatStack('name', os_cloud_config=os_cloud_config)
+ self.assertEqual('name', heatstack.name)
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
def test_create(self):
template = {'tkey': 'tval'}
heat_parameters = {'pkey': 'pval'}
@@ -192,7 +201,9 @@ class HeatStackTestCase(unittest.TestCase):
class HeatTemplateTestCase(unittest.TestCase):
def setUp(self):
- self.template = heat.HeatTemplate('test')
+ self._os_cloud_config = {'key1': 'value1'}
+ self.template = heat.HeatTemplate(
+ 'test', os_cloud_config=self._os_cloud_config)
def test_add_tenant_network(self):
self.template.add_network('some-network')
@@ -337,8 +348,12 @@ class HeatTemplateTestCase(unittest.TestCase):
def test_create_not_block(self):
heat_stack = mock.Mock()
- with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack) \
+ as mock_heatstack:
ret = self.template.create(block=False)
+
+ mock_heatstack.assert_called_once_with(
+ self.template.name, os_cloud_config=self.template._os_cloud_config)
heat_stack.create.assert_called_once_with(
self.template._template, self.template.heat_parameters, False,
3600)