summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/group_vars/all.yml8
-rw-r--r--ansible/install-inventory.ini15
-rw-r--r--ansible/install.yaml50
-rw-r--r--ansible/roles/add_custom_repos/tasks/main.yml2
-rw-r--r--ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml56
-rw-r--r--ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml104
-rw-r--r--ansible/roles/build_yardstick_image/tasks/main.yml14
-rw-r--r--ansible/roles/build_yardstick_image/tasks/post_build.yml46
-rw-r--r--ansible/roles/build_yardstick_image/tasks/pre_build.yml202
-rw-r--r--ansible/roles/build_yardstick_image/vars/main.yml31
-rw-r--r--ansible/roles/download_dpdk/defaults/main.yml2
-rw-r--r--ansible/roles/install_civetweb/defaults/main.yml7
-rw-r--r--ansible/roles/install_image_dependencies/tasks/main.yml2
-rw-r--r--dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json207
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc074.rst72
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg4
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg4
-rw-r--r--samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg2
-rw-r--r--samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg2
-rw-r--r--samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua99
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml2
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml1
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml1
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml18
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py102
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py25
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py24
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py1
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py6
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py92
-rw-r--r--yardstick/common/exceptions.py4
-rw-r--r--yardstick/common/utils.py22
-rw-r--r--yardstick/network_services/traffic_profile/base.py1
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py29
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py105
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py36
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_prox.py7
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py46
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py4
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py102
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py32
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py28
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py36
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py340
-rw-r--r--yardstick/tests/unit/common/test_utils.py16
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py83
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py131
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py58
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py1
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py5
61 files changed, 1831 insertions, 487 deletions
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 359968277..9f5293272 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -1,9 +1,15 @@
---
target_os: "Ubuntu"
YARD_IMG_ARCH: "amd64"
+IMG_PROPERTY: "normal"
clone_dest: /opt/tempT
release: xenial
normal_image_file: "{{ workspace }}/yardstick-image.img"
nsb_image_file: "{{ workspace }}/yardstick-nsb-image.img"
ubuntu_image_file: /tmp/workspace/yardstick/yardstick-trusty-server.raw
-proxy_env: {}
+proxy_env:
+ PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/bin
+ http_proxy: "{{ lookup('env', 'http_proxy') }}"
+ https_proxy: "{{ lookup('env', 'https_proxy') }}"
+ ftp_proxy: "{{ lookup('env', 'ftp_proxy') }}"
+ no_proxy: "{{ lookup('env', 'no_proxy') }}"
diff --git a/ansible/install-inventory.ini b/ansible/install-inventory.ini
index 6aa9905bd..e15a2e96a 100644
--- a/ansible/install-inventory.ini
+++ b/ansible/install-inventory.ini
@@ -1,7 +1,6 @@
# the group of systems on which to install yardstick
# by default just localhost
[jumphost]
-#yardstickvm1 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=local
localhost ansible_connection=local
# section below is only due backward compatibility.
@@ -10,14 +9,10 @@ localhost ansible_connection=local
jumphost
[yardstick-standalone]
-#yardstickvm2 ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
-# uncomment hosts below if you would to test yardstick-standalone/sriov scenarios
-#yardstick-standalone-node ansible_host=192.168.1.2
-#yardstick-standalone-node-2 ansible_host=192.168.1.3
+# standalone-node ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
[yardstick-baremetal]
-#yardstickvm3 ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
-# hostname ansible_host=192.168.1.2
+# baremetal-node ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
[all:vars]
arch_amd64=amd64
@@ -25,6 +20,6 @@ arch_arm64=arm64
inst_mode_container=container
inst_mode_baremetal=baremetal
ubuntu_archive={"amd64": "http://archive.ubuntu.com/ubuntu/", "arm64": "http://ports.ubuntu.com/ubuntu-ports/"}
-# uncomment credentials below for yardstick-standalone
-#ansible_user=root
-#ansible_pass=root
+# Uncomment credentials below if needed
+# ansible_user=root
+# ansible_pass=root
diff --git a/ansible/install.yaml b/ansible/install.yaml
index ae9f8587f..fa8419b53 100644
--- a/ansible/install.yaml
+++ b/ansible/install.yaml
@@ -50,23 +50,18 @@
- shell: uwsgi -i /etc/yardstick/yardstick.ini
when: installation_mode != inst_mode_container
-- name: Prepare baremetal and standalone server(s)
+
+- name: Prepare baremetal and standalone servers
hosts: yardstick-baremetal,yardstick-standalone
become: yes
- vars:
- YARD_IMG_ARCH: "{{ arch_amd64 }}"
- environment:
- proxy_env:
- http_proxy: "{{ lookup('env', 'http_proxy') }}"
- https_proxy: "{{ lookup('env', 'https_proxy') }}"
- ftp_proxy: "{{ lookup('env', 'ftp_proxy') }}"
- no_proxy: "{{ lookup('env', 'no_proxy') }}"
+ environment: "{{ proxy_env }}"
roles:
- add_custom_repos
- role: set_package_installer_proxy
when: proxy_env is defined and proxy_env
# can't update grub in chroot/docker
+ # ?? - enable_iommu_on_boot
- enable_hugepages_on_boot
# needed for collectd plugins
- increase_open_file_limits
@@ -98,3 +93,40 @@
- install_pmu_tools
- download_collectd
- install_collectd
+
+
+- hosts: jumphost
+ become: yes
+ vars:
+ img_prop_item: "{{ IMG_PROPERTY }}"
+ img_arch: "{{ YARD_IMG_ARCH }}"
+
+ tasks:
+ - name: Include pre-build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: pre_build.yml
+
+
+- hosts: chroot_image
+ connection: chroot
+ become: yes
+ vars:
+ img_property: "{{ IMG_PROPERTY }}"
+ environment: "{{ proxy_env }}"
+
+ tasks:
+ - name: Include image build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: "cloudimg_modify_{{ img_property }}.yml"
+
+
+- hosts: jumphost
+ become: yes
+
+ tasks:
+ - name: Include post-build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: post_build.yml
diff --git a/ansible/roles/add_custom_repos/tasks/main.yml b/ansible/roles/add_custom_repos/tasks/main.yml
index 7341ad07d..b1dfd542d 100644
--- a/ansible/roles/add_custom_repos/tasks/main.yml
+++ b/ansible/roles/add_custom_repos/tasks/main.yml
@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- include: "{{ target_os|lower }}.yml"
+- include_tasks: "{{ target_os|lower }}.yml"
diff --git a/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml
new file mode 100644
index 000000000..435b43856
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml
@@ -0,0 +1,56 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- debug:
+ msg: "chrooted in {{ inventory_hostname }}"
+
+- debug:
+ var: proxy_env
+ verbosity: 2
+
+- include_role:
+ name: "{{ role_item }}"
+ with_items:
+ - reset_resolv_conf
+ - add_custom_repos
+ - modify_cloud_config
+ loop_control:
+ loop_var: role_item
+
+- include_role:
+ name: set_package_installer_proxy
+ when: proxy_env is defined and proxy_env
+
+- include_role:
+ name: install_image_dependencies
+
+- include_vars: roles/download_unixbench/defaults/main.yml
+ when: unixbench_dest is undefined
+
+- include_vars: roles/download_ramspeed/defaults/main.yml
+ when: ramspeed_dest is undefined
+
+- include_role:
+ name: "{{ role_item }}"
+ with_items:
+ - download_l2fwd
+ - download_unixbench
+ - install_unixbench
+ - download_ramspeed
+ - install_ramspeed
+ - download_cachestat
+ loop_control:
+ loop_var: role_item
+
+ environment: "{{ proxy_env }}"
diff --git a/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml
new file mode 100644
index 000000000..9a70ff39a
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml
@@ -0,0 +1,104 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- debug:
+ msg: "chrooted in {{ inventory_hostname }}"
+
+- debug:
+ var: proxy_env
+ verbosity: 2
+
+- debug: msg="play_hosts={{play_hosts}}"
+
+- include_role:
+ name: "{{ role_item }}"
+ with_items:
+ - reset_resolv_conf
+ - add_custom_repos
+ - modify_cloud_config
+ loop_control:
+ loop_var: role_item
+
+- include_role:
+ name: set_package_installer_proxy
+ when: proxy_env is defined and proxy_env
+
+- include_vars: roles/install_dpdk/vars/main.yml
+ when: dpdk_make_arch is undefined
+
+- include_vars: roles/download_dpdk/defaults/main.yml
+ when: dpdk_version is undefined
+
+- include_vars: roles/download_trex/defaults/main.yml
+ when: trex_unarchive is undefined
+
+- include_vars: roles/download_civetweb/defaults/main.yml
+ when: civetweb_dest is undefined
+
+- include_role:
+ name: "{{ role_item }}"
+ with_items:
+ - install_image_dependencies
+ - enable_hugepages_on_boot # can't update grub in chroot/docker
+ - increase_open_file_limits # needed for collectd plugins
+ - download_dpdk
+ - install_dpdk
+ - download_trex
+ - install_trex
+ - download_pktgen
+ - install_pktgen
+ - download_civetweb
+ - install_civetweb
+ - download_samplevnfs
+ loop_control:
+ loop_var: role_item
+ environment: "{{ proxy_env }}"
+
+- include_vars: roles/install_dpdk/defaults/main.yml
+ when: INSTALL_BIN_PATH is undefined
+
+- include_vars: roles/download_samplevnfs/defaults/main.yml
+ when: samplevnf_dest is undefined
+- set_fact:
+ samplevnf_path: "{{ samplevnf_dest }}"
+- include_role:
+ name: install_samplevnf
+ with_items:
+ - PROX
+ - UDP_Replay
+ - ACL
+ - FW
+ - CGNAPT
+ loop_control:
+ loop_var: vnf_name
+
+- include_vars: roles/download_drivers/defaults/main.yml
+ when: i40evf_path is undefined
+
+- include_role:
+ name: "{{ role_item }}"
+ with_items:
+ - install_dpdk_shared # build shared DPDK for collectd only, required DPDK downloaded already
+ - install_rabbitmq
+ - download_intel_cmt_cat
+ - install_intel_cmt_cat
+ - download_pmu_tools
+ - install_pmu_tools
+ - download_collectd
+ - install_collectd
+ - download_drivers
+ - install_drivers
+ loop_control:
+ loop_var: role_item
+ environment: "{{ proxy_env }}"
diff --git a/ansible/roles/build_yardstick_image/tasks/main.yml b/ansible/roles/build_yardstick_image/tasks/main.yml
new file mode 100644
index 000000000..e21cbb766
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/tasks/main.yml
@@ -0,0 +1,14 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
diff --git a/ansible/roles/build_yardstick_image/tasks/post_build.yml b/ansible/roles/build_yardstick_image/tasks/post_build.yml
new file mode 100644
index 000000000..c6888f8df
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/tasks/post_build.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: convert image to image file
+ command: qemu-img convert -c -o compat=0.10 -O qcow2 {{ raw_imgfile }} {{ imgfile }}
+
+# Post build yardstick image
+- group_by:
+ key: image_builder
+
+- name: remove ubuntu policy-rc.d workaround
+ file:
+ path: "{{ mountdir }}/usr/sbin/policy-rc.d"
+ state: absent
+ when: "target_os == 'Ubuntu'"
+
+- name: cleanup fake tmp fstab
+ file:
+ path: "{{ fake_fstab }}"
+ state: absent
+
+- mount:
+ name: "{{ mountdir }}/proc"
+ state: unmounted
+
+- mount:
+ name: "{{ mountdir }}"
+ state: unmounted
+
+- name: kpartx -dv to delete all image partition device nodes
+ command: kpartx -dv "{{ raw_imgfile }}"
+ ignore_errors: true
+
+- debug:
+ msg: "yardstick image = {{ imgfile }}"
diff --git a/ansible/roles/build_yardstick_image/tasks/pre_build.yml b/ansible/roles/build_yardstick_image/tasks/pre_build.yml
new file mode 100644
index 000000000..2dae38060
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/tasks/pre_build.yml
@@ -0,0 +1,202 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Group
+ group_by:
+ key: image_builder
+
+- package: name=parted state=present
+ environment: "{{ proxy_env }}"
+
+- package: name=kpartx state=present
+ environment: "{{ proxy_env }}"
+
+- package: name="{{ growpart_package[ansible_os_family] }}" state=present
+ environment: "{{ proxy_env }}"
+
+- set_fact:
+ imgfile: "{{ normal_image_file }}"
+ when: img_prop_item == "normal"
+
+- set_fact:
+ imgfile: "{{ nsb_image_file }}"
+ when: img_prop_item == "nsb"
+
+- set_fact:
+ mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"
+ raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
+
+# cleanup non-lxd
+- name: unmount all old mount points
+ mount:
+ name: "{{ item }}"
+ state: unmounted
+ with_items:
+ # order matters
+ - "{{ mountdir }}/proc"
+ - "{{ mountdir }}"
+ - "/mnt/{{ release }}"
+
+- name: kpartx -dv to delete all image partition device nodes
+ command: kpartx -dv "{{ raw_imgfile }}"
+ ignore_errors: true
+
+- name: Debug dump loop devices
+ command: losetup -a
+ ignore_errors: true
+
+- name: delete loop devices for image file
+ # use this because kpartx -dv will fail if raw_imgfile was delete
+ # but in theory we could have deleted file still attached to loopback device?
+ # use grep because of // and awk
+ shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile_basename }}" | awk '{ print $1 }' | xargs -l1 losetup -v -d
+ ignore_errors: true
+
+- name: Debug dump loop devices again
+ command: losetup -a
+ ignore_errors: true
+
+- name: delete {{ raw_imgfile }}
+ file:
+ path: "{{ raw_imgfile }}"
+ state: absent
+
+# common
+- name: remove {{ mountdir }}
+ file:
+ path: "{{ mountdir }}"
+ state: absent
+
+# download-common
+- name: remove {{ workspace }}
+ file:
+ path: "{{ workspace }}"
+ state: directory
+
+- name: "fetch {{ image_url }} and verify "
+ fetch_url_and_verify:
+ url: "{{ image_url }}"
+ sha256url: "{{ sha256sums_url }}"
+ dest: "{{ image_dest }}"
+
+- name: convert image to raw
+ command: "qemu-img convert {{ image_dest }} {{ raw_imgfile }}"
+
+- name: resize image to allow for more VNFs
+ command: "qemu-img resize -f raw {{ raw_imgfile }} +2G"
+
+- name: resize parition to allow for more VNFs
+ # use growpart because maybe it handles GPT better than parted
+ command: growpart {{ raw_imgfile }} 1
+
+- name: create mknod devices in chroot
+ command: "mknod -m 0660 /dev/loop{{ item }} b 7 {{ item }}"
+ args:
+ creates: "/dev/loop{{ item }}"
+ with_sequence: start=0 end=9
+ tags: mknod_devices
+
+- name: find first partition device
+ command: kpartx -l "{{ raw_imgfile }}"
+ register: kpartx_res
+
+- set_fact:
+ image_first_partition: "{{ kpartx_res.stdout_lines[0].split()[0] }}"
+
+- set_fact:
+ # assume / is the first partition
+ image_first_partition_device: "/dev/mapper/{{ image_first_partition }}"
+
+- name: use kpartx to create device nodes for the raw image loop device
+ # operate on the loop device to avoid /dev namespace missing devices
+ command: kpartx -avs "{{ raw_imgfile }}"
+
+- name: parted dump raw image
+ command: parted "{{ raw_imgfile }}" print
+ register: parted_res
+
+- debug:
+ var: parted_res
+ verbosity: 2
+
+- name: use blkid to find filesystem type of first partition device
+ command: blkid -o value -s TYPE {{ image_first_partition_device }}
+ register: blkid_res
+
+- set_fact:
+ image_fs_type: "{{ blkid_res.stdout.strip() }}"
+
+- fail:
+ msg: "We only support ext4 image filesystems because we have to resize"
+ when: image_fs_type != "ext4"
+
+- name: fsck the image filesystem
+ command: "e2fsck -y -f {{ image_first_partition_device }}"
+
+- name: resize filesystem to full partition size
+ command: resize2fs {{ image_first_partition_device }}
+
+- name: fsck the image filesystem
+ command: "e2fsck -y -f {{ image_first_partition_device }}"
+
+- name: make tmp disposable fstab
+ command: mktemp --tmpdir fake_fstab.XXXXXXXXXX
+ register: mktemp_res
+
+- set_fact:
+ fake_fstab: "{{ mktemp_res.stdout.strip() }}"
+
+- name: mount first parition on image device
+ mount:
+ src: "{{ image_first_partition_device }}"
+ name: "{{ mountdir }}"
+ # fstype is required
+ fstype: "{{ image_fs_type }}"
+ # !!!!!!! this is required otherwise we add entries to /etc/fstab
+ # and prevent the system from booting
+ fstab: "{{ fake_fstab }}"
+ state: mounted
+
+- name: mount chroot /proc
+ mount:
+ src: none
+ name: "{{ mountdir }}/proc"
+ fstype: proc
+ # !!!!!!! this is required otherwise we add entries to /etc/fstab
+ # and prevent the system from booting
+ fstab: "{{ fake_fstab }}"
+ state: mounted
+
+- name: if arm copy qemu-aarch64-static into chroot
+ copy:
+ src: /usr/bin/qemu-aarch64-static
+ dest: "{{ mountdir }}/usr/bin"
+ when: img_arch == arch_arm64
+
+- name: create ubuntu policy-rc.d workaround
+ copy:
+ content: "{{ '#!/bin/sh\nexit 101\n' }}"
+ dest: "{{ mountdir }}/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: "target_os == 'Ubuntu'"
+
+- name: add chroot as host
+ add_host:
+ name: "{{ mountdir }}"
+ groups: chroot_image,image_builder
+ connection: chroot
+ ansible_python_interpreter: /usr/bin/python3
+ # set this host variable here
+ nameserver_ip: "{{ ansible_dns.nameservers[0] }}"
+ image_type: vm
diff --git a/ansible/roles/build_yardstick_image/vars/main.yml b/ansible/roles/build_yardstick_image/vars/main.yml
new file mode 100644
index 000000000..6728e5afb
--- /dev/null
+++ b/ansible/roles/build_yardstick_image/vars/main.yml
@@ -0,0 +1,31 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+boot_modes:
+ 'amd64': disk1
+ 'arm64': uefi1
+boot_mode: "{{ boot_modes[YARD_IMG_ARCH] }}"
+image_filename: "{{ release }}-server-cloudimg-{{ YARD_IMG_ARCH }}-{{ boot_mode }}.img"
+image_path: "{{ release }}/current/{{ image_filename }}"
+host: "{{ lookup('env', 'HOST')|default('cloud-images.ubuntu.com', true)}}"
+image_url: "{{ lookup('env', 'IMAGE_URL')|default('https://' ~ host ~ '/' ~ image_path, true) }}"
+image_dest: "{{ workspace }}/{{ image_filename }}"
+sha256sums_path: "{{ release }}/current/SHA256SUMS"
+sha256sums_filename: "{{ sha256sums_path|basename }}"
+sha256sums_url: "{{ lookup('env', 'SHA256SUMS_URL')|default('https://' ~ host ~ '/' ~ sha256sums_path, true) }}"
+workspace: "{{ lookup('env', 'workspace')|default('/tmp/workspace/yardstick', true) }}"
+raw_imgfile_basename: "yardstick-{{ release }}-server.raw"
+growpart_package:
+ RedHat: cloud-utils-growpart
+ Debian: cloud-guest-utils
diff --git a/ansible/roles/download_dpdk/defaults/main.yml b/ansible/roles/download_dpdk/defaults/main.yml
index 885eebf03..83711881b 100644
--- a/ansible/roles/download_dpdk/defaults/main.yml
+++ b/ansible/roles/download_dpdk/defaults/main.yml
@@ -1,5 +1,5 @@
---
-dpdk_version: "17.02.1"
+dpdk_version: "17.05"
dpdk_url: "http://fast.dpdk.org/rel/dpdk-{{ dpdk_version }}.tar.xz"
dpdk_file: "{{ dpdk_url|basename }}"
dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]xz$', '') }}"
diff --git a/ansible/roles/install_civetweb/defaults/main.yml b/ansible/roles/install_civetweb/defaults/main.yml
index ed5ab27f2..c97403688 100644
--- a/ansible/roles/install_civetweb/defaults/main.yml
+++ b/ansible/roles/install_civetweb/defaults/main.yml
@@ -15,8 +15,9 @@
civetweb_dest: "{{ clone_dest }}/civetweb"
civetweb_build_dependencies:
Debian:
- - libjson-c-dev=0.11-4ubuntu2
- - libjson0
- - libjson0-dev
+# - libjson-c-dev=0.11-4ubuntu2
+# - libjson0
+# - libjson0-dev
- libssl-dev
+ - libjson-c-dev
RedHat:
diff --git a/ansible/roles/install_image_dependencies/tasks/main.yml b/ansible/roles/install_image_dependencies/tasks/main.yml
index ffd30f33e..4e55339c2 100644
--- a/ansible/roles/install_image_dependencies/tasks/main.yml
+++ b/ansible/roles/install_image_dependencies/tasks/main.yml
@@ -19,5 +19,5 @@
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
register: pkg_mgr_results
retries: "{{ pkg_mgr_retries }}"
- until: pkg_mgr_results|success
+ until: pkg_mgr_results is success
with_items: "{{ install_dependencies[ansible_os_family] }}"
diff --git a/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json
index 3c78ab18d..659522510 100644
--- a/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json
+++ b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json
@@ -44,6 +44,7 @@
"annotations": {
"list": []
},
+ "editMode": false,
"editable": true,
"gnetId": null,
"graphTooltip": 0,
@@ -149,7 +150,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.xe0.out_packets\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE $timeFilter GROUP BY time($interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.xe0.out_packets\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE $timeFilter GROUP BY time($interval) fill(null)",
"rawQuery": false,
"refId": "A",
"resultFormat": "time_series",
@@ -283,7 +284,7 @@
[
{
"params": [
- "tg__0.xe0.in_packets"
+ "tg__0.collect_stats.xe0.in_packets"
],
"type": "field"
},
@@ -321,7 +322,7 @@
[
{
"params": [
- "tg__0.xe1.in_packets"
+ "tg__0.collect_stats.xe1.in_packets"
],
"type": "field"
},
@@ -359,7 +360,7 @@
[
{
"params": [
- "tg__0.xe2.in_packets"
+ "tg__0.collect_stats.xe2.in_packets"
],
"type": "field"
},
@@ -397,7 +398,7 @@
[
{
"params": [
- "tg__0.xe3.in_packets"
+ "tg__0.collect_stats.xe3.in_packets"
],
"type": "field"
},
@@ -525,7 +526,7 @@
[
{
"params": [
- "tg__0.xe0.out_packets"
+ "tg__0.collect_stats.xe0.out_packets"
],
"type": "field"
},
@@ -563,7 +564,7 @@
[
{
"params": [
- "tg__0.xe1.out_packets"
+ "tg__0.collect_stats.xe1.out_packets"
],
"type": "field"
},
@@ -601,7 +602,7 @@
[
{
"params": [
- "tg__0.xe2.out_packets"
+ "tg__0.collect_stats.xe2.out_packets"
],
"type": "field"
},
@@ -639,7 +640,7 @@
[
{
"params": [
- "tg__0.xe3.out_packets"
+ "tg__0.collect_stats.xe3.out_packets"
],
"type": "field"
},
@@ -1060,7 +1061,7 @@
[
{
"params": [
- "tg__0.TxThroughput"
+ "tg__0.collect_stats.TxThroughput"
],
"type": "field"
},
@@ -1099,7 +1100,7 @@
[
{
"params": [
- "tg__0.RxThroughput"
+ "tg__0.collect_stats.RxThroughput"
],
"type": "field"
},
@@ -1225,7 +1226,7 @@
[
{
"params": [
- "tg__0.PktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -1355,13 +1356,15 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
+ "query": "SELECT mean(\"tg__0.collect_stats.tx_total\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(none)",
+ "rawQuery": false,
"refId": "B",
"resultFormat": "time_series",
"select": [
[
{
"params": [
- "tg__0.Success_tx_total"
+ "tg__0.collect_stats.tx_total"
],
"type": "field"
},
@@ -1371,7 +1374,13 @@
}
]
],
- "tags": []
+ "tags": [
+ {
+ "key": "tg__0.collect_stats.Status",
+ "operator": "=",
+ "value": "Success"
+ }
+ ]
},
{
"alias": "SUCCESS Rx Total",
@@ -1393,13 +1402,15 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
+ "query": "SELECT mean(\"tg__0.collect_stats.rx_total\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": false,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
- "tg__0.Success_rx_total"
+ "tg__0.collect_stats.rx_total"
],
"type": "field"
},
@@ -1409,7 +1420,13 @@
}
]
],
- "tags": []
+ "tags": [
+ {
+ "key": "tg__0.collect_stats.Status",
+ "operator": "=",
+ "value": "Success"
+ }
+ ]
},
{
"alias": "SUCCESS ALLOWABLE LOST PACKETS",
@@ -1431,13 +1448,15 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
+ "query": "SELECT mean(\"tg__0.collect_stats.can_be_lost\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": false,
"refId": "C",
"resultFormat": "time_series",
"select": [
[
{
"params": [
- "tg__0.Success_can_be_lost"
+ "tg__0.collect_stats.can_be_lost"
],
"type": "field"
},
@@ -1447,7 +1466,13 @@
}
]
],
- "tags": []
+ "tags": [
+ {
+ "key": "tg__0.collect_stats.Status",
+ "operator": "=",
+ "value": "Success"
+ }
+ ]
}
],
"thresholds": [],
@@ -1567,7 +1592,7 @@
[
{
"params": [
- "tg__0.duration"
+ "tg__0.collect_stats.duration"
],
"type": "field"
},
@@ -1675,7 +1700,7 @@
[
{
"params": [
- "tg__0.test_duration"
+ "tg__0.collect_stats.test_duration"
],
"type": "field"
},
@@ -1783,7 +1808,7 @@
[
{
"params": [
- "tg__0.test_precision"
+ "tg__0.collect_stats.test_precision"
],
"type": "field"
},
@@ -1891,7 +1916,7 @@
[
{
"params": [
- "tg__0.tolerated_loss"
+ "tg__0.collect_stats.tolerated_loss"
],
"type": "field"
},
@@ -2048,7 +2073,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2056,7 +2081,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -2068,7 +2093,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "64"
}
@@ -2165,7 +2190,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2173,7 +2198,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -2185,7 +2210,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "64"
}
@@ -2281,7 +2306,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2289,7 +2314,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -2301,7 +2326,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "64"
}
@@ -2409,7 +2434,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2417,7 +2442,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -2429,7 +2454,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "128"
}
@@ -2525,7 +2550,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2533,7 +2558,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -2545,7 +2570,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "128"
}
@@ -2641,7 +2666,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2649,7 +2674,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -2661,7 +2686,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "128"
}
@@ -2768,7 +2793,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2776,7 +2801,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -2788,7 +2813,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSiuze",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "256"
}
@@ -2883,7 +2908,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -2891,7 +2916,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -2903,7 +2928,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "256"
}
@@ -2998,7 +3023,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3006,7 +3031,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -3018,7 +3043,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "256"
}
@@ -3125,7 +3150,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3133,7 +3158,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -3145,7 +3170,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "512"
}
@@ -3241,7 +3266,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3249,7 +3274,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -3261,7 +3286,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "512"
}
@@ -3357,7 +3382,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3365,7 +3390,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -3377,7 +3402,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "512"
}
@@ -3483,7 +3508,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3491,7 +3516,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -3503,7 +3528,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1024"
}
@@ -3598,7 +3623,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3606,7 +3631,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -3618,7 +3643,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1024"
}
@@ -3713,7 +3738,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3721,7 +3746,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -3733,7 +3758,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1024"
}
@@ -3840,7 +3865,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3848,7 +3873,7 @@
[
{
"params": [
- "tg__0.Result_pktSize"
+ "tg__0.collect_stats.PktSize"
],
"type": "field"
},
@@ -3860,7 +3885,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1280"
}
@@ -3956,7 +3981,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -3964,7 +3989,7 @@
[
{
"params": [
- "tg__0.Result_theor_max_throughput"
+ "tg__0.collect_stats.theor_max_throughput"
],
"type": "field"
},
@@ -3976,7 +4001,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1280"
}
@@ -4072,7 +4097,7 @@
"measurement": "tc_prox_baremetal_l2fwd-4",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -4080,7 +4105,7 @@
[
{
"params": [
- "tg__0.Result_Actual_throughput"
+ "tg__0.collect_stats.Actual_throughput"
],
"type": "field"
},
@@ -4092,7 +4117,7 @@
],
"tags": [
{
- "key": "tg__0.Result_pktSize",
+ "key": "tg__0.collect_stats.PktSize",
"operator": "=",
"value": "1280"
}
@@ -4197,7 +4222,7 @@
],
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -4305,7 +4330,7 @@
],
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -4413,7 +4438,7 @@
],
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@@ -4541,7 +4566,7 @@
[
{
"params": [
- "tg__0.LatencyAvg.5"
+ "tg__0.collect_stats.LatencyAvg.5"
],
"type": "field"
},
@@ -4579,7 +4604,7 @@
[
{
"params": [
- "tg__0.LatencyMax.5"
+ "tg__0.collect_stats.LatencyMax.5"
],
"type": "field"
},
@@ -4687,7 +4712,7 @@
[
{
"params": [
- "tg__0.LatencyAvg.6"
+ "tg__0.collect_stats.LatencyAvg.6"
],
"type": "field"
},
@@ -4725,7 +4750,7 @@
[
{
"params": [
- "tg__0.LatencyMax.6"
+ "tg__0.collect_stats.LatencyMax.6"
],
"type": "field"
},
@@ -4833,7 +4858,7 @@
[
{
"params": [
- "tg__0.LatencyAvg.7"
+ "tg__0.collect_stats.LatencyAvg.7"
],
"type": "field"
},
@@ -4871,7 +4896,7 @@
[
{
"params": [
- "tg__0.LatencyMax.7"
+ "tg__0.collect_stats.LatencyMax.7"
],
"type": "field"
},
@@ -4979,7 +5004,7 @@
[
{
"params": [
- "tg__0.LatencyAvg.8"
+ "tg__0.collect_stats.LatencyAvg.8"
],
"type": "field"
},
@@ -5017,7 +5042,7 @@
[
{
"params": [
- "tg__0.LatencyMax.8"
+ "tg__0.collect_stats.LatencyMax.8"
],
"type": "field"
},
@@ -5783,8 +5808,8 @@
"list": []
},
"time": {
- "from": "2018-02-12T15:17:27.733Z",
- "to": "2018-02-12T16:44:28.270Z"
+ "from": "now-2d",
+ "to": "now"
},
"timepicker": {
"refresh_intervals": [
@@ -5813,5 +5838,5 @@
},
"timezone": "browser",
"title": "Prox_BM_L2FWD-4Port_MultiSize",
- "version": 29
-} \ No newline at end of file
+ "version": 12
+}
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
index 92cd51439..261a8bd95 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc074.rst
@@ -19,16 +19,27 @@ Yardstick Test Case Description TC074
|metric | Storage performance |
| | |
+--------------+--------------------------------------------------------------+
-|test purpose | Storperf integration with yardstick. The purpose of StorPerf |
-| | is to provide a tool to measure block and object storage |
-| | performance in an NFVI. When complemented with a |
-| | characterization of typical VF storage performance |
-| | requirements, it can provide pass/fail thresholds for test, |
-| | staging, and production NFVI environments. |
-| | |
-| | The benchmarks developed for block and object storage will |
-| | be sufficiently varied to provide a good preview of expected |
-| | storage performance behavior for any type of VNF workload. |
+|test purpose | To evaluate and report on the Cinder volume performance. |
+| | |
+| | This testcase integrates with OPNFV StorPerf to measure |
+| | block performance of the underlying Cinder drivers. Many |
+| | options are supported, and even the root disk (Glance |
+| | ephemeral storage can be profiled. |
+| | |
+| | The fundamental concept of the test case is to first fill |
+| | the volumes with random data to ensure reported metrics |
+| | are indicative of continued usage and not skewed by |
+| | transitional performance while the underlying storage |
+| | driver allocates blocks. |
+| | The metrics for filling the volumes with random data |
+| | are not reported in the final results. The test also |
+| | ensures the volumes are performing at a consistent level |
+| | of performance by measuring metrics every minute, and |
+| | comparing the trend of the metrics over the run. By |
+| | evaluating the min and max values, as well as the slope of |
+| | the trend, it can make the determination that the metrics |
+| | are stable, and not fluctuating beyond industry standard |
+| | norms. |
| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc074.yaml |
@@ -38,7 +49,8 @@ Yardstick Test Case Description TC074
| | * public_network: "ext-net" - name of public network |
| | * volume_size: 2 - cinder volume size |
| | * block_sizes: "4096" - data block size |
-| | * queue_depths: "4" |
+| | * queue_depths: "4" - the number of simultaneous I/Os |
+| | to perform at all times |
| | * StorPerf_ip: "192.168.200.2" |
| | * query_interval: 10 - state query interval |
| | * timeout: 600 - maximum allowed job time |
@@ -50,7 +62,11 @@ Yardstick Test Case Description TC074
| | performance in an NFVI. |
| | |
| | StorPerf is delivered as a Docker container from |
-| | https://hub.docker.com/r/opnfv/storperf/tags/. |
+| | https://hub.docker.com/r/opnfv/storperf-master/tags/. |
+| | |
+| | The underlying tool used is FIO, and StorPerf supports |
+| | any FIO option in order to tailor the test to the exact |
+| | workload needed. |
| | |
+--------------+--------------------------------------------------------------+
|references | Storperf_ |
@@ -80,9 +96,17 @@ Yardstick Test Case Description TC074
| | - rr: 100% Read, random access |
| | - wr: 100% Write, random access |
| | - rw: 70% Read / 30% write, random access |
-| | * nossd: Do not perform SSD style preconditioning. |
-| | * nowarm: Do not perform a warmup prior to |
| | measurements. |
+| | * workloads={json maps} |
+| | This parameter supercedes the workload and calls the V2.0 |
+| | API in StorPerf. It allows for greater control of the |
+| | parameters to be passed to FIO. For example, running a |
+| | random read/write with a mix of 90% read and 10% write |
+| | would be expressed as follows: |
+| | {"9010randrw": {"rw":"randrw","rwmixread": "90"}} |
+| | Note: This must be passed in as a string, so don't forget |
+| | to escape or otherwise properly deal with the quotes. |
+| | |
| | * report= [job_id] |
| | Query the status of the supplied job_id and report on |
| | metrics. If a workload is supplied, will report on only |
@@ -92,8 +116,7 @@ Yardstick Test Case Description TC074
| | |
+--------------+--------------------------------------------------------------+
|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions | need to add one. A key pair for launching agents is also |
-| | required. |
+|conditions | need to add one. |
| | |
| | Storperf is required to be installed in the environment. |
| | There are two possible methods for Storperf installation: |
@@ -126,10 +149,21 @@ Yardstick Test Case Description TC074
|test sequence | description and expected result |
| | |
+--------------+--------------------------------------------------------------+
-|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored |
-| | in glance. TC is invoked and logs are produced and stored. |
+|step 1 | Yardstick calls StorPerf to create the heat stack with the |
+| | number of VMs and size of Cinder volumes specified. The |
+| | VMs will be on their own private subnet, and take floating |
+| | IP addresses from the specified public network. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | Yardstick calls StorPerf to fill all the volumes with |
+| | random data. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Yardstick calls StorPerf to perform the series of tests |
+| | specified by the workload, queue depths and block sizes. |
| | |
-| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|step 4 | Yardstick calls StorPerf to delete the stack it created. |
| | |
+--------------+--------------------------------------------------------------+
|test verdict | None. Storage performance results are fetched and stored. |
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
index a70ea658b..60f21bd70 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
@@ -94,7 +94,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
@@ -113,7 +113,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
index a70ea658b..60f21bd70 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
@@ -94,7 +94,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
@@ -113,7 +113,7 @@ rand_offset=14
random=0000XXXX00XX00XX
rand_offset=18
; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
rand_offset=38
lat pos=42
diff --git a/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg b/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
index 7d350bd91..c191d29d5 100644
--- a/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
@@ -14,7 +14,7 @@
#
[lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
user_table = dofile("gre_table.lua")
[eal options]
diff --git a/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg b/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
index f65b7cbf9..b873fb9af 100644
--- a/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
@@ -14,7 +14,7 @@
#
[lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
user_table = dofile("gre_table.lua")
dscp_table = dofile("dscp.lua")
diff --git a/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua b/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua
new file mode 100644
index 000000000..22697b06a
--- /dev/null
+++ b/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua
@@ -0,0 +1,99 @@
+-- Copyright (c) 2016-2017 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+require("parameters")
+
+local lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac(tester_mac1), mpls = 0x112},
+ {id = 1, port_id = 1, ip = ip("2.1.1.1"), mac = mac(tester_mac1), mpls = 0x212},
+ {id = 2, port_id = 0, ip = ip("3.1.1.1"), mac = mac(tester_mac3), mpls = 0x312},
+ {id = 3, port_id = 1, ip = ip("4.1.1.1"), mac = mac(tester_mac3), mpls = 0x412},
+ {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac(tester_mac1), mpls = 0x512},
+ {id = 5, port_id = 1, ip = ip("6.1.1.1"), mac = mac(tester_mac1), mpls = 0x612},
+ {id = 6, port_id = 0, ip = ip("7.1.1.1"), mac = mac(tester_mac3), mpls = 0x712},
+ {id = 7, port_id = 1, ip = ip("8.1.1.1"), mac = mac(tester_mac3), mpls = 0x812},
+ {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac(tester_mac1), mpls = 0x912},
+ {id = 9, port_id = 1, ip = ip("10.1.1.1"), mac = mac(tester_mac1), mpls = 0x1012},
+ {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac(tester_mac3), mpls = 0x1112},
+ {id = 11, port_id = 1, ip = ip("12.1.1.1"), mac = mac(tester_mac3), mpls = 0x1212},
+ {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac(tester_mac1), mpls = 0x1312},
+ {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac(tester_mac1), mpls = 0x1412},
+ {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac(tester_mac3), mpls = 0x1512},
+ {id = 15, port_id = 1, ip = ip("16.1.1.1"), mac = mac(tester_mac3), mpls = 0x1612},
+ {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac(tester_mac1), mpls = 0x1712},
+ {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac(tester_mac1), mpls = 0x1812},
+ {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac(tester_mac3), mpls = 0x1912},
+ {id = 19, port_id = 1, ip = ip("20.1.1.1"), mac = mac(tester_mac3), mpls = 0x2012},
+ {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac(tester_mac1), mpls = 0x2112},
+ {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac(tester_mac1), mpls = 0x2212},
+ {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac(tester_mac3), mpls = 0x2312},
+ {id = 23, port_id = 1, ip = ip("24.1.1.1"), mac = mac(tester_mac3), mpls = 0x2412},
+ {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac(tester_mac1), mpls = 0x2512},
+ {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac(tester_mac1), mpls = 0x2612},
+ {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac(tester_mac3), mpls = 0x2712},
+ {id = 27, port_id = 1, ip = ip("28.1.1.1"), mac = mac(tester_mac3), mpls = 0x2812},
+ {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac(tester_mac1), mpls = 0x2912},
+ {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac(tester_mac1), mpls = 0x3012},
+ {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac(tester_mac3), mpls = 0x3112},
+ {id = 31, port_id = 1, ip = ip("32.1.1.1"), mac = mac(tester_mac3), mpls = 0x3212},
+ {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac(tester_mac1), mpls = 0x3312},
+ {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac(tester_mac1), mpls = 0x3412},
+ {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac(tester_mac3), mpls = 0x3512},
+ {id = 35, port_id = 1, ip = ip("36.1.1.1"), mac = mac(tester_mac3), mpls = 0x3612},
+ {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac(tester_mac1), mpls = 0x3712},
+ {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac(tester_mac1), mpls = 0x3812},
+ {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac(tester_mac3), mpls = 0x3912},
+ {id = 39, port_id = 1, ip = ip("40.1.1.1"), mac = mac(tester_mac3), mpls = 0x4012},
+ {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac(tester_mac1), mpls = 0x4112},
+ {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac(tester_mac1), mpls = 0x4212},
+ {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac(tester_mac3), mpls = 0x4312},
+ {id = 43, port_id = 1, ip = ip("44.1.1.1"), mac = mac(tester_mac3), mpls = 0x4412},
+ {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac(tester_mac1), mpls = 0x4512},
+ {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac(tester_mac1), mpls = 0x4612},
+ {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac(tester_mac3), mpls = 0x4712},
+ {id = 47, port_id = 1, ip = ip("48.1.1.1"), mac = mac(tester_mac3), mpls = 0x4812},
+ {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac(tester_mac1), mpls = 0x4912},
+ {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac(tester_mac1), mpls = 0x5012},
+ {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac(tester_mac3), mpls = 0x5112},
+ {id = 51, port_id = 1, ip = ip("52.1.1.1"), mac = mac(tester_mac3), mpls = 0x5212},
+ {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac(tester_mac1), mpls = 0x5312},
+ {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac(tester_mac1), mpls = 0x5412},
+ {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac(tester_mac3), mpls = 0x5512},
+ {id = 55, port_id = 1, ip = ip("56.1.1.1"), mac = mac(tester_mac3), mpls = 0x5612},
+ {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac(tester_mac1), mpls = 0x5712},
+ {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac(tester_mac1), mpls = 0x5812},
+ {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac(tester_mac3), mpls = 0x5912},
+ {id = 59, port_id = 1, ip = ip("60.1.1.1"), mac = mac(tester_mac3), mpls = 0x6012},
+ {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac(tester_mac1), mpls = 0x6112},
+ {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac(tester_mac1), mpls = 0x6212},
+ {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac(tester_mac3), mpls = 0x6312},
+ {id = 63, port_id = 1, ip = ip("64.1.1.1"), mac = mac(tester_mac3), mpls = 0x6412},
+}
+
+lpm4.routes = {};
+
+base_ip = 10 * 2^24;
+
+for i = 1,2^13 do
+ res = ip(base_ip + (1 * 2^12) * (i - 1));
+
+ lpm4.routes[i] = {
+ cidr = {ip = res, depth = 24},
+ next_hop_id = (i - 1) % 64,
+ }
+end
+
+return lpm4
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
index 1711c561a..f86913941 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
prox_generate_parameter: True
tg__0:
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
index a7d2d3846..707fc1d30 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
"configs/dscp.lua" : ""
prox_generate_parameter: True
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
index e4cd546bc..d580bd8cc 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
prox_generate_parameter: True
tg__0:
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
index 60002f0b1..7f447b164 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
@@ -36,7 +36,7 @@ scenarios:
"-t": ""
prox_files:
"configs/gre_table.lua" : ""
- "configs/ipv4.lua" : ""
+ "configs/ipv4_bng.lua" : ""
"configs/dscp.lua" : ""
prox_generate_parameter: True
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
index 98b1bf96d..c1acb69a4 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
index ee0415371..54f42b2bc 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
index 19f083646..06fb220da 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
index 95fa0b6d8..f6a12eb31 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
@@ -44,6 +44,7 @@ traffic_profile:
traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate: 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
index c267e7677..194bcd978 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
@@ -43,6 +43,7 @@ traffic_profile:
traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: False
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index 507491446..906793740 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -29,6 +29,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 3cbd7cd62..6e2f8ec7c 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -29,6 +29,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
duration: {{ duration }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index edff3612e..cfc5f1ea3 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -43,6 +43,7 @@ traffic_profile:
traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
frame_rate : 100 # pc of linerate
injection_time: {{ injection_time }}
+ enable_latency: True
uplink_0:
ipv4:
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index fe8423d25..d08dbaa6e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -15,20 +15,30 @@ description: >
{% set public_network = public_network or "ext-net" %}
{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
+{% set workload = workload or "" %}
+{% set workloads = workloads or "" %}
+{% set agent_count = agent_count or 1 %}
+{% set block_sizes = block_sizes or "4096" %}
+{% set queue_depths = queue_depths or "4" %}
+{% set steady_state_samples = steady_state_samples or 10 %}
+{% set volume_size = volume_size or 4 %}
scenarios:
-
type: StorPerf
options:
- agent_count: 1
+ agent_count: {{agent_count}}
agent_image: "Ubuntu-16.04"
agent_flavor: "storperf"
public_network: {{public_network}}
- volume_size: 4
- block_sizes: "4096"
- queue_depths: "4"
+ volume_size: {{volume_size}}
+ block_sizes: {{block_sizes}}
+ queue_depths: {{queue_depths}}
StorPerf_ip: {{StorPerf_ip}}
query_interval: 10
timeout: 300
+ workload: {{workload}}
+ workloads: {{workloads}}
+ steady_state_samples: {{steady_state_samples}}
runner:
type: Iteration
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index ecddcbbe0..962cb48e2 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -89,6 +89,17 @@ VM_TEMPLATE = """
</devices>
</domain>
"""
+
+USER_DATA_TEMPLATE = """
+cat > {user_file} <<EOF
+#cloud-config
+preserve_hostname: false
+hostname: {host}
+users:
+{user_config}
+EOF
+"""
+
WAIT_FOR_BOOT = 30
@@ -268,7 +279,7 @@ class Libvirt(object):
return vm_image
@classmethod
- def build_vm_xml(cls, connection, flavor, vm_name, index):
+ def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img):
"""Build the XML from the configuration parameters"""
memory = flavor.get('ram', '4096')
extra_spec = flavor.get('extra_specs', {})
@@ -293,6 +304,9 @@ class Libvirt(object):
socket=socket, threads=threads,
vm_image=image, cpuset=cpuset, cputune=cputune)
+ # Add CD-ROM device
+ vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)
+
return vm_xml, mac
@staticmethod
@@ -320,6 +334,71 @@ class Libvirt(object):
et = ET.ElementTree(element=root)
et.write(file_name, encoding='utf-8', method='xml')
+ @classmethod
+ def add_cdrom(cls, file_path, xml_str):
+ """Add a CD-ROM disk XML node in 'devices' node
+
+ <devices>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='/var/lib/libvirt/images/data.img'/>
+ <target dev='hdb'/>
+ <readonly/>
+ </disk>
+ ...
+ </devices>
+ """
+
+ root = ET.fromstring(xml_str)
+ device = root.find('devices')
+
+ disk = ET.SubElement(device, 'disk')
+ disk.set('type', 'file')
+ disk.set('device', 'cdrom')
+
+ driver = ET.SubElement(disk, 'driver')
+ driver.set('name', 'qemu')
+ driver.set('type', 'raw')
+
+ source = ET.SubElement(disk, 'source')
+ source.set('file', file_path)
+
+ target = ET.SubElement(disk, 'target')
+ target.set('dev', 'hdb')
+
+ ET.SubElement(disk, 'readonly')
+ return ET.tostring(root)
+
+ @staticmethod
+ def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename):
+ """Generate ISO image for CD-ROM """
+
+ user_config = [" - name: {user_name}",
+ " ssh_authorized_keys:",
+ " - {pub_key_str}"]
+ if vm_user != "root":
+ user_config.append(" sudo: ALL=(ALL) NOPASSWD:ALL")
+
+ meta_data = "/tmp/meta-data"
+ user_data = "/tmp/user-data"
+ with open(".".join([key_filename, "pub"]), "r") as pub_key_file:
+ pub_key_str = pub_key_file.read().rstrip()
+ user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user)
+
+ cmd_lst = [
+ "touch %s" % meta_data,
+ USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf),
+ "genisoimage -output {0} -volid cidata -joliet -r {1} {2}".format(file_path,
+ meta_data,
+ user_data),
+ "rm {0} {1}".format(meta_data, user_data),
+ ]
+ for cmd in cmd_lst:
+ LOG.info(cmd)
+ status, _, error = connection.execute(cmd)
+ if status:
+ raise exceptions.LibvirtQemuImageCreateError(error=error)
+
class StandaloneContextHelper(object):
""" This class handles all the common code for standalone
@@ -331,7 +410,7 @@ class StandaloneContextHelper(object):
@staticmethod
def install_req_libs(connection, extra_pkgs=None):
extra_pkgs = extra_pkgs or []
- pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
+ pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping", "genisoimage"]
pkgs.extend(extra_pkgs)
cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
for pkg in pkgs:
@@ -457,6 +536,25 @@ class StandaloneContextHelper(object):
node["ip"] = ip
return nodes
+ @classmethod
+ def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img):
+ # Generate public/private keys if private key file is not provided
+ user_name = node.get('user')
+ if not user_name:
+ node['user'] = 'root'
+ user_name = node.get('user')
+ if not node.get('key_filename'):
+ key_filename = ''.join(
+ [constants.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/files/yardstick_key-',
+ id_name])
+ ssh.SSH.gen_keys(key_filename)
+ node['key_filename'] = key_filename
+ # Update image with public key
+ key_filename = node.get('key_filename')
+ Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename)
+ return node
+
class Server(object):
""" This class handles geting vnf nodes
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index 88ad598c3..5891f798e 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -394,13 +394,14 @@ class OvsDpdkContext(base.Context):
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_ovs_%d.xml' % index
- vm_name = "vm_%d" % index
+ vm_name = "vm-%d" % index
+ cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
# 1. Check and delete VM if already exists
model.Libvirt.check_if_vm_exists_and_delete(vm_name,
self.connection)
xml_str, mac = model.Libvirt.build_vm_xml(
- self.connection, self.vm_flavor, vm_name, index)
+ self.connection, self.vm_flavor, vm_name, index, cdrom_img)
# 2: Cleanup already available VMs
for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
@@ -411,16 +412,24 @@ class OvsDpdkContext(base.Context):
model.Libvirt.write_file(cfg, xml_str)
self.connection.put(cfg, cfg)
+ node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+ self.networks,
+ self.host_mgmt.get('ip'),
+ key, vnf, mac)
+ # Generate public/private keys if password or private key file is not provided
+ node = model.StandaloneContextHelper.check_update_key(self.connection,
+ node,
+ vm_name,
+ self.name,
+ cdrom_img)
+
+ # store vnf node details
+ nodes.append(node)
+
# NOTE: launch through libvirt
LOG.info("virsh create ...")
model.Libvirt.virsh_create_vm(self.connection, cfg)
self.vm_names.append(vm_name)
- # build vnf node details
- nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
- self.networks,
- self.host_mgmt.get('ip'),
- key, vnf, mac))
-
return nodes
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 3da12a9a8..8d410b2f3 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -225,13 +225,14 @@ class SriovContext(base.Context):
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_sriov_%s.xml' % str(index)
- vm_name = "vm_%s" % str(index)
+ vm_name = "vm-%s" % str(index)
+ cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
# 1. Check and delete VM if already exists
model.Libvirt.check_if_vm_exists_and_delete(vm_name,
self.connection)
xml_str, mac = model.Libvirt.build_vm_xml(
- self.connection, self.vm_flavor, vm_name, index)
+ self.connection, self.vm_flavor, vm_name, index, cdrom_img)
# 2: Cleanup already available VMs
network_ports = collections.OrderedDict(
@@ -243,17 +244,26 @@ class SriovContext(base.Context):
model.Libvirt.write_file(cfg, xml_str)
self.connection.put(cfg, cfg)
+ node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+ self.networks,
+ self.host_mgmt.get('ip'),
+ key, vnf, mac)
+ # Generate public/private keys if password or private key file is not provided
+ node = model.StandaloneContextHelper.check_update_key(self.connection,
+ node,
+ vm_name,
+ self.name,
+ cdrom_img)
+
+ # store vnf node details
+ nodes.append(node)
+
# NOTE: launch through libvirt
LOG.info("virsh create ...")
model.Libvirt.virsh_create_vm(self.connection, cfg)
self.vm_names.append(vm_name)
- # build vnf node details
- nodes.append(self.vnf_node.generate_vnf_instance(
- self.vm_flavor, self.networks, self.host_mgmt.get('ip'),
- key, vnf, mac))
-
return nodes
def _get_vf_data(self, value, vfmac, pfif):
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 53abd586b..4c79a4931 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -34,6 +34,8 @@ class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
+ # baremetal down need to recover even sla pass
+ self.mandatory = True
LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d67a16b98..7871cc918 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -63,6 +63,7 @@ class BaseAttacker(object):
self.data = {}
self.setup_done = False
self.intermediate_variables = {}
+ self.mandatory = False
@staticmethod
def get_attacker_cls(attacker_cfg):
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 7f976fdbc..fdfe7cbbe 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -88,9 +88,9 @@ class ServiceHA(base.Scenario):
def teardown(self):
"""scenario teardown"""
- # only recover when sla not pass
- if not self.sla_pass:
- for attacker in self.attackers:
+ # recover when mandatory or sla not pass
+ for attacker in self.attackers:
+ if attacker.mandatory or not self.sla_pass:
attacker.recover()
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index f0b2361d6..8093cd2d2 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -8,15 +8,16 @@
##############################################################################
from __future__ import absolute_import
-import os
import logging
+import os
import time
-import requests
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios import base
+
LOG = logging.getLogger(__name__)
@@ -43,12 +44,6 @@ class StorPerf(base.Scenario):
wr: 100% Write, random access
rw: 70% Read / 30% write, random access
- nossd (Optional):
- Do not perform SSD style preconditioning.
-
- nowarm (Optional):
- Do not perform a warmup prior to measurements.
-
report = [job_id] (Optional):
Query the status of the supplied job_id and report on metrics.
If a workload is supplied, will report on only that subset.
@@ -79,10 +74,13 @@ class StorPerf(base.Scenario):
setup_query_content = jsonutils.loads(
setup_query.content)
- if setup_query_content["stack_created"]:
- self.setup_done = True
+ if ("stack_created" in setup_query_content and
+ setup_query_content["stack_created"]):
LOG.debug("stack_created: %s",
setup_query_content["stack_created"])
+ return True
+
+ return False
def setup(self):
"""Set the configuration."""
@@ -111,9 +109,13 @@ class StorPerf(base.Scenario):
elif setup_res.status_code == 200:
LOG.info("stack_id: %s", setup_res_content["stack_id"])
- while not self.setup_done:
- self._query_setup_state()
- time.sleep(self.query_interval)
+ while not self._query_setup_state():
+ time.sleep(self.query_interval)
+
+ # We do not want to load the results of the disk initialization,
+ # so it is not added to the results here.
+ self.initialize_disks()
+ self.setup_done = True
def _query_job_state(self, job_id):
"""Query the status of the supplied job_id and report on metrics"""
@@ -149,7 +151,8 @@ class StorPerf(base.Scenario):
if not self.setup_done:
self.setup()
- metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+ metadata = {"build_tag": "latest",
+ "test_case": "opnfv_yardstick_tc074"}
metadata_payload_dict = {"pod_name": "NODE_NAME",
"scenario_name": "DEPLOY_SCENARIO",
"version": "YARDSTICK_BRANCH"}
@@ -162,7 +165,9 @@ class StorPerf(base.Scenario):
job_args = {"metadata": metadata}
job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
- "target", "nossd", "nowarm", "workload"]
+ "target", "workload", "workloads",
+ "agent_count", "steady_state_samples"]
+ job_args["deadline"] = self.options["timeout"]
for job_argument in job_args_payload_list:
try:
@@ -170,8 +175,16 @@ class StorPerf(base.Scenario):
except KeyError:
pass
+ api_version = "v1.0"
+
+ if ("workloads" in job_args and
+ job_args["workloads"] is not None and
+ len(job_args["workloads"])) > 0:
+ api_version = "v2.0"
+
LOG.info("Starting a job with parameters %s", job_args)
- job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+ job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+ api_version),
json=job_args)
job_res_content = jsonutils.loads(job_res.content)
@@ -187,15 +200,6 @@ class StorPerf(base.Scenario):
self._query_job_state(job_id)
time.sleep(self.query_interval)
- terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
- self.target)
-
- if terminate_res.status_code != 200:
- terminate_res_content = jsonutils.loads(
- terminate_res.content)
- raise RuntimeError("Failed to start a job, error message:",
- terminate_res_content["message"])
-
# TODO: Support using ETA to polls for completion.
# Read ETA, next poll in 1/2 ETA time slot.
# If ETA is greater than the maximum allowed job time,
@@ -216,14 +220,46 @@ class StorPerf(base.Scenario):
result.update(result_res_content)
+ def initialize_disks(self):
+ """Fills the target with random data prior to executing workloads"""
+
+ job_args = {}
+ job_args_payload_list = ["target"]
+
+ for job_argument in job_args_payload_list:
+ try:
+ job_args[job_argument] = self.options[job_argument]
+ except KeyError:
+ pass
+
+ LOG.info("Starting initialization with parameters %s", job_args)
+ job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+ self.target, json=job_args)
+
+ job_res_content = jsonutils.loads(job_res.content)
+
+ if job_res.status_code != 200:
+ raise RuntimeError(
+ "Failed to start initialization job, error message:",
+ job_res_content["message"])
+ elif job_res.status_code == 200:
+ job_id = job_res_content["job_id"]
+ LOG.info("Started initialization as job id: %s...", job_id)
+
+ while not self.job_completed:
+ self._query_job_state(job_id)
+ time.sleep(self.query_interval)
+
+ self.job_completed = False
+
def teardown(self):
"""Deletes the agent configuration and the stack"""
- teardown_res = requests.delete('http://%s:5000/api/v1.0/\
- configurations' % self.target)
+ teardown_res = requests.delete(
+ 'http://%s:5000/api/v1.0/configurations' % self.target)
if teardown_res.status_code == 400:
teardown_res_content = jsonutils.loads(
- teardown_res.content)
+ teardown_res.json_data)
raise RuntimeError("Failed to reset environment, error message:",
teardown_res_content['message'])
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index b39a0af9c..10c1f3f27 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -79,6 +79,10 @@ class FunctionNotImplemented(YardstickException):
'"%(class_name)" class.')
+class InvalidType(YardstickException):
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
class InfluxDBConfigurationMissing(YardstickException):
message = ('InfluxDB configuration is not available. Add "influxdb" as '
'a dispatcher and the configuration section')
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index c019cd264..31885c073 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -21,6 +21,7 @@ import importlib
import ipaddress
import logging
import os
+import pydoc
import random
import re
import signal
@@ -578,3 +579,24 @@ def send_socket_command(host, port, command):
finally:
sock.close()
return ret
+
+
+def safe_cast(value, type_to_convert, default_value):
+ """Convert value to type, in case of error return default_value
+
+ :param value: value to convert
+ :param type_to_convert: type to convert, could be "type" or "string"
+ :param default_value: default value to return
+ :return: converted value or default_value
+ """
+ if isinstance(type_to_convert, type):
+ _type = type_to_convert
+ else:
+ _type = pydoc.locate(type_to_convert)
+ if not _type:
+ raise exceptions.InvalidType(type_to_convert=type_to_convert)
+
+ try:
+ return _type(value)
+ except ValueError:
+ return default_value
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index a8f950b7b..4fbceea9b 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -44,6 +44,7 @@ class TrafficProfileConfig(object):
self.lower_bound = tprofile.get('lower_bound')
self.upper_bound = tprofile.get('upper_bound')
self.step_interval = tprofile.get('step_interval')
+ self.enable_latency = tprofile.get('enable_latency', False)
def _parse_rate(self, rate):
"""Parse traffic profile rate
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index ccaf3ded3..461604fb8 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -168,12 +168,8 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
[samples[iface]['out_packets'] for iface in samples])
- rx_throughput = sum(
- [samples[iface]['RxThroughput'] for iface in samples])
- rx_throughput = round(float(rx_throughput), 2)
- tx_throughput = sum(
- [samples[iface]['TxThroughput'] for iface in samples])
- tx_throughput = round(float(tx_throughput), 2)
+ rx_throughput = round(float(in_packets_sum) / duration, 3)
+ tx_throughput = round(float(out_packets_sum) / duration, 3)
packet_drop = abs(out_packets_sum - in_packets_sum)
try:
@@ -183,10 +179,6 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
except ZeroDivisionError:
LOG.info('No traffic is flowing')
- samples['TxThroughput'] = tx_throughput
- samples['RxThroughput'] = rx_throughput
- samples['DropPercentage'] = drop_percent
-
if first_run:
completed = True if drop_percent <= tolerance else False
if (first_run and
@@ -200,4 +192,21 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
else:
completed = True
+ latency_ns_avg = float(
+ sum([samples[iface]['Store-Forward_Avg_latency_ns']
+ for iface in samples])) / num_ifaces
+ latency_ns_min = float(
+ sum([samples[iface]['Store-Forward_Min_latency_ns']
+ for iface in samples])) / num_ifaces
+ latency_ns_max = float(
+ sum([samples[iface]['Store-Forward_Max_latency_ns']
+ for iface in samples])) / num_ifaces
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['DropPercentage'] = drop_percent
+ samples['latency_ns_avg'] = latency_ns_avg
+ samples['latency_ns_min'] = latency_ns_min
+ samples['latency_ns_max'] = latency_ns_max
+
return completed, samples
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 506a880e0..16a0411ec 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -25,6 +25,14 @@ from yardstick.common import constants as overall_constants
LOG = logging.getLogger(__name__)
+STATUS_SUCCESS = "Success"
+STATUS_FAIL = "Failure"
+STATUS_RESULT = "Result"
+STEP_CONFIRM = "Confirm retry"
+STEP_INCREASE_LOWER = "Increase lower"
+STEP_DECREASE_LOWER = "Decrease lower"
+STEP_DECREASE_UPPER = "Decrease upper"
+
class ProxBinSearchProfile(ProxProfile):
"""
@@ -85,18 +93,16 @@ class ProxBinSearchProfile(ProxProfile):
# success, the binary search will complete on an integer multiple
# of the precision, rather than on a fraction of it.
- theor_max_thruput = actual_max_thruput = 0
+ theor_max_thruput = 0
result_samples = {}
- # Store one time only value in influxdb
- single_samples = {
+ test_data = {
"test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
"test_precision": self.params["traffic_profile"]["test_precision"],
"tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
"duration": duration
}
- self.queue.put(single_samples)
self.prev_time = time.time()
# throughput and packet loss from the most recent successful test
@@ -110,85 +116,88 @@ class ProxBinSearchProfile(ProxProfile):
neg_retry = 0
total_retry = 0
- LOG.info("Checking MAX %s MIN %s TEST %s",
- self.current_upper, self.lower_bound, test_value)
+ LOG.info("Checking MAX %s MIN %s TEST %s", self.current_upper,
+ self.lower_bound, test_value)
+
while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
total_retry = total_retry + 1
+
result, port_samples = self._profile_helper.run_test(pkt_size, duration,
test_value,
self.tolerated_loss,
line_speed)
- if (total_retry > (ok_retry * 3)) and (ok_retry is not 0):
- LOG.info("Failure.!! .. RETRY EXCEEDED ... decrease lower bound")
+ if (total_retry > (ok_retry * 3)) and (ok_retry is not 0):
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_LOWER
successful_pkt_loss = result.pkt_loss
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
-
self.current_upper = test_value
neg_retry = total_retry
elif result.success:
if (pos_retry < ok_retry) and (ok_retry is not 0):
- neg_retry = 0
- LOG.info("Success! ... confirm retry")
-
+ status = STATUS_SUCCESS
+ next_step = STEP_CONFIRM
successful_pkt_loss = result.pkt_loss
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
-
+ neg_retry = 0
else:
- LOG.info("Success! Increasing lower bound")
+ status = STATUS_SUCCESS
+ next_step = STEP_INCREASE_LOWER
self.current_lower = test_value
-
successful_pkt_loss = result.pkt_loss
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
-
- # store results with success tag in influxdb
- success_samples = \
- {'Success_' + key: value for key, value in samples.items()}
-
- success_samples["Success_rx_total"] = int(result.rx_total)
- success_samples["Success_tx_total"] = int(result.tx_total)
- success_samples["Success_can_be_lost"] = int(result.can_be_lost)
- success_samples["Success_drop_total"] = int(result.drop_total)
- success_samples["Success_RxThroughput"] = samples["RxThroughput"]
- success_samples["Success_RxThroughput_gbps"] = \
- (samples["RxThroughput"] / 1000) * ((pkt_size + 20)* 8)
- LOG.info(">>>##>>Collect SUCCESS TG KPIs %s %s",
- datetime.datetime.now(), success_samples)
- self.queue.put(success_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
-
- # Store Actual throughput for result samples
- actual_max_thruput = success_samples["Success_RxThroughput"]
pos_retry = pos_retry + 1
else:
if (neg_retry < ok_retry) and (ok_retry is not 0):
-
+ status = STATUS_FAIL
+ next_step = STEP_CONFIRM
pos_retry = 0
- LOG.info("failure! ... confirm retry")
else:
- LOG.info("Failure... Decreasing upper bound")
+ status = STATUS_FAIL
+ next_step = STEP_DECREASE_UPPER
self.current_upper = test_value
neg_retry = neg_retry + 1
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ LOG.info(
+ "Status = '%s' Next_Step = '%s'", status, next_step)
+
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
if theor_max_thruput < samples["TxThroughput"]:
theor_max_thruput = samples['TxThroughput']
- self.queue.put({'theor_max_throughput': theor_max_thruput})
-
- LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ samples['theor_max_throughput'] = theor_max_thruput
+
+ samples["rx_total"] = int(result.rx_total)
+ samples["tx_total"] = int(result.tx_total)
+ samples["can_be_lost"] = int(result.can_be_lost)
+ samples["drop_total"] = int(result.drop_total)
+ samples["RxThroughput_gbps"] = \
+ (samples["RxThroughput"] / 1000) * ((pkt_size + 20) * 8)
+ samples['Status'] = status
+ samples['Next_Step'] = next_step
samples["MAX_Rate"] = self.current_upper
samples["MIN_Rate"] = self.current_lower
samples["Test_Rate"] = test_value
samples["Step_Id"] = step_id
samples["Confirmation_Retry"] = total_retry
+
+ samples.update(test_data)
+
+ if status == STATUS_SUCCESS and next_step == STEP_INCREASE_LOWER:
+ # Store success samples for result samples
+ result_samples = samples
+
+ LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+
self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
- LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
- pkt_size, theor_max_thruput, actual_max_thruput)
- result_samples["Result_pktSize"] = pkt_size
- result_samples["Result_theor_max_throughput"] = theor_max_thruput
- result_samples["Result_Actual_throughput"] = actual_max_thruput
+ LOG.info(
+ ">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
+ pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0))
+ result_samples["Status"] = STATUS_RESULT
+ result_samples["Next_Step"] = ""
+ result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0)
+ result_samples["theor_max_throughput"] = theor_max_thruput
self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index b54fc575f..987029373 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -118,7 +118,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
ports.append(port_num)
port_pg_id.add_port(port_num)
profile = self._create_profile(profile_data,
- self.rate, port_pg_id)
+ self.rate, port_pg_id,
+ self.config.enable_latency)
self.generator.client.add_streams(profile, ports=[port_num])
self.generator.client.start(ports=ports,
@@ -126,7 +127,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
force=True)
return ports, port_pg_id
- def _create_profile(self, profile_data, rate, port_pg_id):
+ def _create_profile(self, profile_data, rate, port_pg_id, enable_latency):
"""Create a STL profile (list of streams) for a port"""
streams = []
for packet_name in profile_data:
@@ -134,7 +135,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
get('outer_l2', {}).get('framesize'))
imix_data = self._create_imix_data(imix)
self._create_vm(profile_data[packet_name])
- _streams = self._create_streams(imix_data, rate, port_pg_id)
+ _streams = self._create_streams(imix_data, rate, port_pg_id,
+ enable_latency)
streams.extend(_streams)
return trex_stl_streams.STLProfile(streams)
@@ -213,7 +215,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
return trex_stl_packet_builder_scapy.STLPktBuilder(
pkt=base_pkt / pad, vm=self.trex_vm)
- def _create_streams(self, imix_data, rate, port_pg_id):
+ def _create_streams(self, imix_data, rate, port_pg_id, enable_latency):
"""Create a list of streams per packet size
The STL TX mode speed of the generated streams will depend on the frame
@@ -237,7 +239,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
in imix_data.items() if float(weight) > 0):
packet = self._create_single_packet(size)
pg_id = port_pg_id.increase_pg_id()
- stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ stl_flow = (trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) if
+ enable_latency else None)
mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
streams.append(trex_stl_client.STLStream(
packet=packet, flow_stats=stl_flow, mode=mode))
@@ -247,19 +250,16 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
correlated_traffic):
"""Calculate the drop percentage and run the traffic"""
completed = False
- tx_rate_fps = 0
- rx_rate_fps = 0
- for sample in samples:
- tx_rate_fps += sum(
- port['tx_throughput_fps'] for port in sample.values())
- rx_rate_fps += sum(
- port['rx_throughput_fps'] for port in sample.values())
- tx_rate_fps = round(float(tx_rate_fps) / len(samples), 2)
- rx_rate_fps = round(float(rx_rate_fps) / len(samples), 2)
-
- # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- out_packets = sum(port['out_packets'] for port in samples[-1].values())
- in_packets = sum(port['in_packets'] for port in samples[-1].values())
+ out_pkt_end = sum(port['out_packets'] for port in samples[-1].values())
+ in_pkt_end = sum(port['in_packets'] for port in samples[-1].values())
+ out_pkt_ini = sum(port['out_packets'] for port in samples[0].values())
+ in_pkt_ini = sum(port['in_packets'] for port in samples[0].values())
+ time_diff = (list(samples[-1].values())[0]['timestamp'] -
+ list(samples[0].values())[0]['timestamp']).total_seconds()
+ out_packets = out_pkt_end - out_pkt_ini
+ in_packets = in_pkt_end - in_pkt_ini
+ tx_rate_fps = float(out_packets) / time_diff
+ rx_rate_fps = float(in_packets) / time_diff
drop_percent = 100.0
# https://tools.ietf.org/html/rfc2544#section-26.3
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
index 854319a21..d12c42ec8 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
@@ -12,9 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
import logging
+import copy
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
@@ -32,7 +31,9 @@ class ProxTrafficGen(SampleVNFTrafficGen):
def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
resource_helper_type=None):
- # don't call superclass, use custom wrapper of ProxApproxVnf
+ vnfd_cpy = copy.deepcopy(vnfd)
+ super(ProxTrafficGen, self).__init__(name, vnfd_cpy, task_id)
+
self._vnf_wrapper = ProxApproxVnf(
name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
self.bin_path = get_nsb_option('bin_path', '')
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 4d3bc2ce5..94ab06980 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -60,7 +60,7 @@ class IxiaResourceHelper(ClientResourceHelper):
def stop_collect(self):
self._terminated.value = 1
- def generate_samples(self, ports, key=None):
+ def generate_samples(self, ports, duration):
stats = self.get_stats()
samples = {}
@@ -70,27 +70,23 @@ class IxiaResourceHelper(ClientResourceHelper):
try:
# reverse lookup port name from port_num so the stats dict is descriptive
intf = self.vnfd_helper.find_interface_by_port(port_num)
- port_name = intf["name"]
+ port_name = intf['name']
+ avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+ min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+ max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
samples[port_name] = {
- "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
- "out_packets": int(stats["Frames_Tx"][port_num]),
- # NOTE(ralonsoh): we need to make the traffic injection
- # time variable.
- "RxThroughput": int(stats["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(stats["Frames_Tx"][port_num]) / 30,
+ 'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]),
+ 'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]),
+ 'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]),
+ 'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]),
+ 'in_packets': int(stats['Valid_Frames_Rx'][port_num]),
+ 'out_packets': int(stats['Frames_Tx'][port_num]),
+ 'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+ 'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+ 'Store-Forward_Avg_latency_ns': utils.safe_cast(avg_latency, int, 0),
+ 'Store-Forward_Min_latency_ns': utils.safe_cast(min_latency, int, 0),
+ 'Store-Forward_Max_latency_ns': utils.safe_cast(max_latency, int, 0)
}
- if key:
- avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
- max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
- samples[port_name][key] = \
- {"Store-Forward_Avg_latency_ns": avg_latency,
- "Store-Forward_Min_latency_ns": min_latency,
- "Store-Forward_Max_latency_ns": max_latency}
except IndexError:
pass
@@ -129,13 +125,11 @@ class IxiaResourceHelper(ClientResourceHelper):
self, self.client, mac)
self.client_started.value = 1
# pylint: disable=unnecessary-lambda
- utils.wait_until_true(lambda: self.client.is_traffic_stopped())
- samples = self.generate_samples(traffic_profile.ports)
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped(),
+ timeout=traffic_profile.config.duration * 2)
+ samples = self.generate_samples(traffic_profile.ports,
+ traffic_profile.config.duration)
- # NOTE(ralonsoh): the traffic injection duration is fixed to 30
- # seconds. This parameter is configurable and must be retrieved
- # from the traffic_profile.full_profile information.
- # Every flow must have the same duration.
completed, samples = traffic_profile.get_drop_percentage(
samples, min_tol, max_tol, first_run=first_run)
self._queue.put(samples)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 58b73488b..4296da84c 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -11,8 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
+import datetime
import logging
import os
@@ -167,6 +167,7 @@ class TrexResourceHelper(ClientResourceHelper):
def _get_samples(self, ports, port_pg_id=None):
stats = self.get_stats(ports)
+ timestamp = datetime.datetime.now()
samples = {}
for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
port_num = self.vnfd_helper.port_num(pname)
@@ -178,6 +179,7 @@ class TrexResourceHelper(ClientResourceHelper):
'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
'in_packets': int(port_stats.get('ipackets', 0)),
'out_packets': int(port_stats.get('opackets', 0)),
+ 'timestamp': timestamp
}
pg_id_list = port_pg_id.get_pg_ids(port_num)
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index 8ad581918..371e4ef36 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -46,6 +46,16 @@ XML_SAMPLE_INTERFACE = """<?xml version="1.0"?>
class ModelLibvirtTestCase(unittest.TestCase):
+ XML_STR = model.VM_TEMPLATE.format(
+ vm_name="vm_name",
+ random_uuid=uuid.uuid4(),
+ mac_addr="00:01:02:03:04:05",
+ memory=2048, vcpu=2, cpu=2,
+ numa_cpus=0 - 10,
+ socket=1, threads=1,
+ vm_image="/var/lib/libvirt/images/yardstick-nsb-image.img",
+ cpuset=2 - 10, cputune='')
+
def setUp(self):
self.pci_address_str = '0001:04:03.2'
self.pci_address = utils.PciAddress(self.pci_address_str)
@@ -66,34 +76,34 @@ class ModelLibvirtTestCase(unittest.TestCase):
ssh_mock.execute = mock.Mock(return_value=(0, "a", ""))
ssh.return_value = ssh_mock
# NOTE(ralonsoh): this test doesn't cover function execution.
- model.Libvirt.check_if_vm_exists_and_delete("vm_0", ssh_mock)
+ model.Libvirt.check_if_vm_exists_and_delete('vm-0', ssh_mock)
def test_virsh_create_vm(self):
self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
- model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
- self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
+ model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0')
+ self.mock_ssh.execute.assert_called_once_with('virsh create vm-0')
def test_virsh_create_vm_error(self):
self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_create'))
with self.assertRaises(exceptions.LibvirtCreateError) as exc:
- model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
+ model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0')
self.assertEqual('Error creating the virtual machine. Error: '
'error_create.', str(exc.exception))
- self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
+ self.mock_ssh.execute.assert_called_once_with('virsh create vm-0')
def test_virsh_destroy_vm(self):
self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
- model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
- self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
+ model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh)
+ self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0')
@mock.patch.object(model, 'LOG')
def test_virsh_destroy_vm_error(self, mock_logger):
self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_destroy'))
mock_logger.warning = mock.Mock()
- model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
+ model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh)
mock_logger.warning.assert_called_once_with(
- 'Error destroying VM %s. Error: %s', 'vm_0', 'error_destroy')
- self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
+ 'Error destroying VM %s. Error: %s', 'vm-0', 'error_destroy')
+ self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0')
def test_add_interface_address(self):
xml = ElementTree.ElementTree(
@@ -171,6 +181,56 @@ class ModelLibvirtTestCase(unittest.TestCase):
self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[1],
interface_address.get('function'))
+ def test_add_cdrom(self):
+ xml_input = copy.deepcopy(XML_SAMPLE)
+ xml_output = model.Libvirt.add_cdrom('/var/lib/libvirt/images/data.img', xml_input)
+
+ root = ElementTree.fromstring(xml_output)
+ et_out = ElementTree.ElementTree(element=root)
+ disk = et_out.find('devices').find('disk')
+ self.assertEqual('file', disk.get('type'))
+ self.assertEqual('cdrom', disk.get('device'))
+ driver = disk.find('driver')
+ self.assertEqual('qemu', driver.get('name'))
+ self.assertEqual('raw', driver.get('type'))
+ source = disk.find('source')
+ self.assertEqual('/var/lib/libvirt/images/data.img', source.get('file'))
+ target = disk.find('target')
+ self.assertEqual('hdb', target.get('dev'))
+ self.assertIsNotNone(disk.find('readonly'))
+
+ def test_gen_cdrom_image(self):
+ self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+ root = ElementTree.fromstring(self.XML_STR)
+ hostname = root.find('name').text
+ meta_data = "/tmp/meta-data"
+ user_data = "/tmp/user-data"
+ file_path = "/tmp/cdrom-0.img"
+ key_filename = "id_rsa"
+ pub_key_str = "KEY"
+ user = 'root'
+ user_config = [" - name: {user_name}",
+ " ssh_authorized_keys:",
+ " - {pub_key_str}"]
+
+ user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=user)
+ with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=pub_key_str),
+ create=True) as mock_file:
+ with open(key_filename, "r") as h:
+ result = h.read()
+ model.Libvirt.gen_cdrom_image(self.mock_ssh, file_path, hostname, user, key_filename)
+ mock_file.assert_called_with(".".join([key_filename, "pub"]), "r")
+ self.assertEqual(result, pub_key_str)
+
+ self.mock_ssh.execute.assert_has_calls([
+ mock.call("touch %s" % meta_data),
+ mock.call(model.USER_DATA_TEMPLATE.format(user_file=user_data, host=hostname,
+ user_config=user_conf)),
+ mock.call("genisoimage -output {0} -volid cidata"
+ " -joliet -r {1} {2}".format(file_path, meta_data, user_data)),
+ mock.call("rm {0} {1}".format(meta_data, user_data))
+ ])
+
def test_create_snapshot_qemu(self):
self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
index = 1
@@ -211,6 +271,19 @@ class ModelLibvirtTestCase(unittest.TestCase):
self.mock_ssh.put_file.assert_called_once_with(base_image,
'/tmp/base_image')
+ @mock.patch.object(model.Libvirt, 'gen_cdrom_image')
+ def test_check_update_key(self, mock_gen_cdrom_image):
+ node = {'user': 'defuser', 'key_filename': '/home/ubuntu/id_rsa'}
+ cdrom_img = "/var/lib/libvirt/images/data.img"
+ id_name = 'fake_name'
+ key_filename = node.get('key_filename')
+ root = ElementTree.fromstring(self.XML_STR)
+ hostname = root.find('name').text
+ model.StandaloneContextHelper.check_update_key(self.mock_ssh, node, hostname, id_name,
+ cdrom_img)
+ mock_gen_cdrom_image.assert_called_once_with(self.mock_ssh, cdrom_img, hostname,
+ node.get('user'), key_filename)
+
@mock.patch.object(os, 'access', return_value=False)
def test_create_snapshot_qemu_no_image_local(self, mock_os_access):
self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)])
@@ -253,18 +326,20 @@ class ModelLibvirtTestCase(unittest.TestCase):
mac = model.StandaloneContextHelper.get_mac_address(0x00)
_uuid = uuid.uuid4()
connection = mock.Mock()
+ cdrom_img = '/tmp/cdrom-0.img'
with mock.patch.object(model.StandaloneContextHelper,
'get_mac_address', return_value=mac) as \
mock_get_mac_address, \
mock.patch.object(uuid, 'uuid4', return_value=_uuid):
xml_out, mac = model.Libvirt.build_vm_xml(
- connection, flavor, 'vm_name', 100)
+ connection, flavor, 'vm_name', 100, cdrom_img)
xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name',
random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4',
numa_cpus='0-7', socket='3', threads='2',
vm_image='qemu_image', cpuset='4,5', cputune='cool')
- self.assertEqual(xml_ref, xml_out)
+ xml_ref = model.Libvirt.add_cdrom(cdrom_img, xml_ref)
+ self.assertEqual(xml_out, xml_ref)
mock_get_mac_address.assert_called_once_with(0x00)
mock_create_snapshot_qemu.assert_called_once_with(
connection, 100, 'images')
@@ -296,6 +371,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
status = model.Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
self.assertIsNotNone(status)
+
class StandaloneContextHelperTestCase(unittest.TestCase):
NODE_SAMPLE = "nodes_sample.yaml"
@@ -463,7 +539,7 @@ class ServerTestCase(unittest.TestCase):
}
}
status = self.server.generate_vnf_instance(
- {}, self.NETWORKS, '1.1.1.1/24', 'vm_0', vnf, '00:00:00:00:00:01')
+ {}, self.NETWORKS, '1.1.1.1/24', 'vm-0', vnf, '00:00:00:00:00:01')
self.assertIsNotNone(status)
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index 69779d3e0..1a2407575 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -231,8 +231,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test_undeploy(self, mock_libvirt):
self.ovs_dpdk.vm_deploy = True
self.ovs_dpdk.connection = mock.Mock()
- self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
- self.ovs_dpdk.drivers = ['vm_0', 'vm_1']
+ self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
+ self.ovs_dpdk.drivers = ['vm-0', 'vm-1']
self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.undeploy()
@@ -370,7 +370,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
ssh.return_value = ssh_mock
self.ovs_dpdk.vm_deploy = True
self.ovs_dpdk.connection = ssh_mock
- self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+ self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
self.ovs_dpdk.drivers = []
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.helper.get_mac_address = mock.Mock(return_value="")
@@ -381,7 +381,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test__enable_interfaces(self, mock_add_ovs_interface):
self.ovs_dpdk.vm_deploy = True
self.ovs_dpdk.connection = mock.Mock()
- self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+ self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
self.ovs_dpdk.drivers = []
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.ovs_properties = {'vpath': 'fake_path'}
@@ -391,15 +391,16 @@ class OvsDpdkContextTestCase(unittest.TestCase):
'fake_path', 0, self.NETWORKS['private_0']['vpci'],
self.NETWORKS['private_0']['mac'], 'test')
+ @mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
@mock.patch.object(model.Libvirt, 'write_file')
@mock.patch.object(model.Libvirt, 'build_vm_xml')
@mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
@mock.patch.object(model.Libvirt, 'virsh_create_vm')
- def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists,
- mock_build_xml, mock_write_file):
+ def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, mock_build_xml,
+ mock_write_file, mock_check_update_key):
self.ovs_dpdk.vm_deploy = True
self.ovs_dpdk.connection = mock.Mock()
- self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+ self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
self.ovs_dpdk.drivers = []
self.ovs_dpdk.servers = {
'vnf_0': {
@@ -413,23 +414,32 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.host_mgmt = {}
self.ovs_dpdk.flavor = {}
+ self.ovs_dpdk.file_path = '/var/lib/libvirt/images/cdrom-0.img'
self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
- xml_str = mock.Mock()
+ self.ovs_dpdk._name_task_id = 'fake_name'
+ xml_str = 'vm-0'
mock_build_xml.return_value = (xml_str, '00:00:00:00:00:01')
self.ovs_dpdk._enable_interfaces = mock.Mock(return_value=xml_str)
vnf_instance = mock.Mock()
+ vnf_instance_2 = mock.Mock()
+ mock_check_update_key.return_value = vnf_instance_2
self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(
return_value=vnf_instance)
- self.assertEqual([vnf_instance],
+ self.assertEqual([vnf_instance_2],
self.ovs_dpdk.setup_ovs_dpdk_context())
mock_create_vm.assert_called_once_with(
self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml')
mock_check_if_exists.assert_called_once_with(
- 'vm_0', self.ovs_dpdk.connection)
+ 'vm-0', self.ovs_dpdk.connection)
mock_build_xml.assert_called_once_with(
- self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm_0', 0)
+ self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm-0', 0, self.ovs_dpdk.file_path)
mock_write_file.assert_called_once_with('/tmp/vm_ovs_0.xml', xml_str)
+ mock_check_update_key.assert_called_once_with(self.ovs_dpdk.connection,
+ vnf_instance,
+ xml_str,
+ self.ovs_dpdk._name_task_id,
+ self.ovs_dpdk.file_path)
@mock.patch.object(io, 'BytesIO')
def test__check_hugepages(self, mock_bytesio):
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index 74c31569c..ae8e95f9a 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -113,8 +113,8 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov.vm_deploy = True
self.sriov.connection = mock_ssh
- self.sriov.vm_names = ['vm_0', 'vm_1']
- self.sriov.drivers = ['vm_0', 'vm_1']
+ self.sriov.vm_names = ['vm-0', 'vm-1']
+ self.sriov.drivers = ['vm-0', 'vm-1']
self.assertIsNone(self.sriov.undeploy())
def _get_file_abspath(self, filename):
@@ -254,7 +254,7 @@ class SriovContextTestCase(unittest.TestCase):
ssh.return_value = ssh_mock
self.sriov.vm_deploy = True
self.sriov.connection = ssh_mock
- self.sriov.vm_names = ['vm_0', 'vm_1']
+ self.sriov.vm_names = ['vm-0', 'vm-1']
self.sriov.drivers = []
self.sriov.networks = self.NETWORKS
self.sriov.helper.get_mac_address = mock.Mock(return_value="")
@@ -267,7 +267,7 @@ class SriovContextTestCase(unittest.TestCase):
def test__enable_interfaces(self, mock_add_sriov, mock_ssh):
self.sriov.vm_deploy = True
self.sriov.connection = mock_ssh
- self.sriov.vm_names = ['vm_0', 'vm_1']
+ self.sriov.vm_names = ['vm-0', 'vm-1']
self.sriov.drivers = []
self.sriov.networks = self.NETWORKS
self.assertEqual(
@@ -276,12 +276,13 @@ class SriovContextTestCase(unittest.TestCase):
mock_add_sriov.assert_called_once_with(
'0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test')
+ @mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
@mock.patch.object(model.Libvirt, 'build_vm_xml')
@mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
@mock.patch.object(model.Libvirt, 'write_file')
@mock.patch.object(model.Libvirt, 'virsh_create_vm')
- def test_setup_sriov_context(self, mock_create_vm, mock_write_file,
- mock_check, mock_build_vm_xml):
+ def test_setup_sriov_context(self, mock_create_vm, mock_write_file, mock_check,
+ mock_build_vm_xml, mock_check_update_key):
self.sriov.servers = {
'vnf_0': {
'network_ports': {
@@ -297,24 +298,29 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov.vm_flavor = 'flavor'
self.sriov.networks = 'networks'
self.sriov.configure_nics_for_sriov = mock.Mock()
+ self.sriov._name_task_id = 'fake_name'
cfg = '/tmp/vm_sriov_0.xml'
- vm_name = 'vm_0'
+ vm_name = 'vm-0'
xml_out = mock.Mock()
mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01')
+ mock_check_update_key.return_value = 'node_2'
+ cdrom_img = '/var/lib/libvirt/images/cdrom-0.img'
with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \
mock.patch.object(self.sriov, '_enable_interfaces') as \
mock_enable_interfaces:
mock_enable_interfaces.return_value = 'out_xml'
mock_vnf_node.generate_vnf_instance = mock.Mock(
- return_value='node')
+ return_value='node_1')
nodes_out = self.sriov.setup_sriov_context()
- self.assertEqual(['node'], nodes_out)
+ mock_check_update_key.assert_called_once_with(connection, 'node_1', vm_name,
+ self.sriov._name_task_id, cdrom_img)
+ self.assertEqual(['node_2'], nodes_out)
mock_vnf_node.generate_vnf_instance.assert_called_once_with(
'flavor', 'networks', '1.2.3.4', 'vnf_0',
self.sriov.servers['vnf_0'], '00:00:00:00:00:01')
mock_build_vm_xml.assert_called_once_with(
- connection, 'flavor', vm_name, 0)
+ connection, 'flavor', vm_name, 0, cdrom_img)
mock_create_vm.assert_called_once_with(connection, cfg)
mock_check.assert_called_once_with(vm_name, connection)
mock_write_file.assert_called_once_with(cfg, 'out_xml')
@@ -332,7 +338,7 @@ class SriovContextTestCase(unittest.TestCase):
ssh.return_value = ssh_mock
self.sriov.vm_deploy = True
self.sriov.connection = ssh_mock
- self.sriov.vm_names = ['vm_0', 'vm_1']
+ self.sriov.vm_names = ['vm-0', 'vm-1']
self.sriov.drivers = []
self.sriov.servers = {
'vnf_0': {
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
new file mode 100644
index 000000000..74f86983b
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
+
+class BaseAttackerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.attacker_cfg = {
+ 'fault_type': 'test-attacker',
+ 'action_parameter': {'process_name': 'nova_api'},
+ 'rollback_parameter': {'process_name': 'nova_api'},
+ 'key': 'stop-service',
+ 'attack_key': 'stop-service',
+ 'host': 'node1',
+ }
+ self.base_attacker = baseattacker.BaseAttacker({}, {})
+
+ def test__init__(self):
+ self.assertEqual(self.base_attacker.data, {})
+ self.assertFalse(self.base_attacker.mandatory)
+ self.assertEqual(self.base_attacker.intermediate_variables, {})
+ self.assertFalse(self.base_attacker.mandatory)
+
+ def test_get_attacker_cls(self):
+ with self.assertRaises(RuntimeError):
+ baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index ec0e5973c..d61fa67c7 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -109,6 +109,23 @@ class ServicehaTestCase(unittest.TestCase):
ret = {}
p.run(ret)
attacker = mock.Mock()
+ attacker.mandatory = False
p.attackers = [attacker]
p.teardown()
attacker.recover.assert_not_called()
+
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_teardown_when_mandatory(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+ p.setup()
+ self.assertTrue(p.setup_done)
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+ ret = {}
+ p.run(ret)
+ attacker = mock.Mock()
+ attacker.mandatory = True
+ p.attackers = [attacker]
+ p.teardown()
+ attacker.recover.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 5844746ab..2ba53cb93 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,18 +11,18 @@
from __future__ import absolute_import
+import json
import unittest
import mock
from oslo_serialization import jsonutils
+import requests
from yardstick.benchmark.scenarios.storage import storperf
# pylint: disable=unused-argument
# disable this for now because I keep forgetting mock patch arg ordering
-
-
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost(object):
@@ -32,10 +32,24 @@ def mocked_requests_config_post(*args, **kwargs):
return MockResponseConfigPost(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "false"}',
+ '"stack_created": false}',
200)
+def mocked_requests_config_post_fail(*args, **kwargs):
+ class MockResponseConfigPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigPost(
+ '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+ 'Error validating value \'foo\': Unable to find network with ' +
+ 'name or id \'foo\'"}',
+ 400)
+
+
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet(object):
@@ -45,10 +59,47 @@ def mocked_requests_config_get(*args, **kwargs):
return MockResponseConfigGet(
'{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
- '"stack_created": "true"}',
+ '"stack_created": true}',
200)
+def mocked_requests_config_get_not_created(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{"stack_id": "",'
+ '"stack_created": false}',
+ 200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+ class MockResponseConfigGet(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseConfigGet(
+ '{}',
+ 200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet(object):
@@ -73,6 +124,18 @@ def mocked_requests_job_post(*args, **kwargs):
"d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
+def mocked_requests_job_post_fail(*args, **kwargs):
+ class MockResponseJobPost(object):
+
+ def __init__(self, json_data, status_code):
+ self.content = json_data
+ self.status_code = status_code
+
+ return MockResponseJobPost(
+ '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+ 400)
+
+
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete(object):
@@ -100,10 +163,7 @@ def mocked_requests_delete_failed(*args, **kwargs):
self.json_data = json_data
self.status_code = status_code
- if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
- return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
- return MockResponseDeleteFailed('{}', 404)
+ return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
class StorPerfTestCase(unittest.TestCase):
@@ -119,11 +179,14 @@ class StorPerfTestCase(unittest.TestCase):
self.result = {}
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_config_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_config_get)
- def test_successful_setup(self, mock_post, mock_get):
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup(self, mock_get, mock_post):
+ mock_post.side_effect = [mocked_requests_config_post(),
+ mocked_requests_job_post()]
+ mock_get.side_effect = [mocked_requests_config_get(),
+ mocked_requests_job_get()]
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -146,14 +209,47 @@ class StorPerfTestCase(unittest.TestCase):
self.assertTrue(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
- side_effect=mocked_requests_job_post)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
- side_effect=mocked_requests_job_get)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
- def test_successful_run(self, mock_post, mock_get, mock_delete):
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_unsuccessful(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_not_created
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'get')
+ def test_query_setup_state_no_payload(self, mock_get):
+ mock_get.side_effect = mocked_requests_config_get_no_payload
+ args = {
+ "options": {}
+ }
+ s = storperf.StorPerf(args, self.ctx)
+ result = s._query_setup_state()
+ self.assertFalse(result)
+
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(requests, 'get')
+ def test_setup_config_post_failed(self, mock_get, mock_post):
+ mock_post.side_effect = mocked_requests_config_post_fail
+
+ args = {
+ "options": {
+ "public_network": "foo"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.setup)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v1_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -165,6 +261,74 @@ class StorPerfTestCase(unittest.TestCase):
"query_interval": 0,
"timeout": 60
}
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ "workload": "rs",
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ sample_output = '{"Status": "Completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+ expected_result = jsonutils.loads(sample_output)
+
+ s.run(self.result)
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/jobs',
+ json=jsonutils.loads(json.dumps(expected_post)))
+
+ self.assertEqual(self.result, expected_result)
+
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_v2_successful(self, mock_post, mock_get):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
args = {
"options": options
@@ -179,13 +343,126 @@ class StorPerfTestCase(unittest.TestCase):
expected_result = jsonutils.loads(sample_output)
s.run(self.result)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
self.assertEqual(self.result, expected_result)
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete)
- def test_successful_teardown(self, mock_delete):
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_run_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ options = {
+ "agent_count": 8,
+ "public_network": 'ext-net',
+ "volume_size": 10,
+ "block_sizes": 4096,
+ "queue_depths": 4,
+ "workloads": {
+ "read_sequential": {
+ "rw": "rs"
+ }
+ },
+ "StorPerf_ip": "192.168.23.2",
+ "query_interval": 0,
+ "timeout": 60
+ }
+ expected_post = {
+ 'metadata': {
+ 'build_tag': 'latest',
+ 'test_case': 'opnfv_yardstick_tc074'
+ },
+ 'deadline': 60,
+ 'block_sizes': 4096,
+ 'queue_depths': 4,
+ 'workloads': {
+ 'read_sequential': {
+ 'rw': 'rs'
+ }
+ },
+ 'agent_count': 8
+ }
+
+ args = {
+ "options": options
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+ s.setup_done = True
+
+ self.assertRaises(RuntimeError, s.run, self.ctx)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v2.0/jobs',
+ json=expected_post)
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ @mock.patch.object(storperf.StorPerf, 'setup')
+ def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ 'timeout': 60,
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.run(self.result)
+
+ mock_setup.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_job_post
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ s.initialize_disks()
+
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch('time.sleep')
+ @mock.patch.object(requests, 'get')
+ @mock.patch.object(requests, 'post')
+ def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+ mock_post.side_effect = mocked_requests_initialize_post_fail
+ mock_get.side_effect = mocked_requests_job_get
+
+ args = {
+ "options": {
+ "StorPerf_ip": "192.168.23.2"
+ }
+ }
+
+ s = storperf.StorPerf(args, self.ctx)
+
+ self.assertRaises(RuntimeError, s.initialize_disks)
+ mock_post.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/initializations',
+ json={})
+
+ @mock.patch.object(requests, 'delete')
+ def test_teardown(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_job_delete
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -207,11 +484,12 @@ class StorPerfTestCase(unittest.TestCase):
s.teardown()
self.assertFalse(s.setup_done)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
- @mock.patch(
- 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_delete_failed)
- def test_failed_teardown(self, mock_delete):
+ @mock.patch.object(requests, 'delete')
+ def test_teardown_request_delete_failed(self, mock_delete):
+ mock_delete.side_effect = mocked_requests_delete_failed
options = {
"agent_count": 8,
"public_network": 'ext-net',
@@ -230,4 +508,6 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
- self.assertRaises(AssertionError, s.teardown(), self.result)
+ self.assertRaises(RuntimeError, s.teardown)
+ mock_delete.assert_called_once_with(
+ 'http://192.168.23.2:5000/api/v1.0/configurations')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index ef4142148..3cf6c4d05 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -1391,3 +1391,19 @@ class GetPortIPTestCase(unittest.TestCase):
def test_return_value(self):
self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
+
+
+class SafeCaseTestCase(unittest.TestCase):
+
+ def test_correct_type_int(self):
+ self.assertEqual(35, utils.safe_cast('35', int, 0))
+
+ def test_correct_int_as_string(self):
+ self.assertEqual(25, utils.safe_cast('25', 'int', 0))
+
+ def test_incorrect_type_as_string(self):
+ with self.assertRaises(exceptions.InvalidType):
+ utils.safe_cast('100', 'intt', 0)
+
+ def test_default_value(self):
+ self.assertEqual(0, utils.safe_cast('', 'int', 0))
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 6f76eb77c..0759ecebd 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -575,87 +575,110 @@ class TestIXIARFC2544Profile(unittest.TestCase):
def test_get_drop_percentage_completed(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007,
+ 'Store-Forward_Avg_latency_ns': 23,
+ 'Store-Forward_Min_latency_ns': 13,
+ 'Store-Forward_Max_latency_ns': 28}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
+ self.assertEqual(21.5, samples['latency_ns_avg'])
+ self.assertEqual(14.0, samples['latency_ns_min'])
+ self.assertEqual(26.5, samples['latency_ns_max'])
def test_get_drop_percentage_over_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 0.05)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_under_drop_percentage(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
@mock.patch.object(ixia_rfc2544.LOG, 'info')
def test_get_drop_percentage_not_flow(self, *args):
samples = {'iface_name_1':
- {'RxThroughput': 0, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 0},
+ {'in_packets': 1000, 'out_packets': 0,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25},
'iface_name_2':
- {'RxThroughput': 0, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 0}
+ {'in_packets': 1005, 'out_packets': 0,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
rfc2544_profile.rate = 1000
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0.2, 1)
self.assertFalse(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(0, samples['RxThroughput'])
+ self.assertEqual(0.0, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(100, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_first_run(self):
samples = {'iface_name_1':
- {'RxThroughput': 10, 'TxThroughput': 10,
- 'in_packets': 1000, 'out_packets': 1000},
+ {'in_packets': 1000, 'out_packets': 1000,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25},
'iface_name_2':
- {'RxThroughput': 11, 'TxThroughput': 13,
- 'in_packets': 1005, 'out_packets': 1007}
+ {'in_packets': 1005, 'out_packets': 1007,
+ 'Store-Forward_Avg_latency_ns': 20,
+ 'Store-Forward_Min_latency_ns': 15,
+ 'Store-Forward_Max_latency_ns': 25}
}
rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
completed, samples = rfc2544_profile.get_drop_percentage(
samples, 0, 1, first_run=True)
self.assertTrue(completed)
- self.assertEqual(23.0, samples['TxThroughput'])
- self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(66.9, samples['TxThroughput'])
+ self.assertEqual(66.833, samples['RxThroughput'])
self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index c062308e8..c09903377 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -71,38 +71,43 @@ class TestProxBinSearchProfile(unittest.TestCase):
self.assertEqual(len(runs), 77)
# Result Samples inc theor_max
- result_tuple = {'Result_Actual_throughput': 5e-07,
- 'Result_theor_max_throughput': 7.5e-07,
- 'Result_pktSize': 200}
-
- profile.queue.put.assert_called_with(result_tuple)
-
- success_result_tuple = {"Success_CurrentDropPackets": 0.5,
- "Success_DropPackets": 0.5,
- "Success_LatencyAvg": 5.3,
- "Success_LatencyMax": 5.2,
- "Success_LatencyMin": 5.1,
- "Success_PktSize": 200,
- "Success_RxThroughput": 7.5e-07,
- "Success_Throughput": 7.5e-07,
- "Success_TxThroughput": 0.00012340000000000002}
+ result_tuple = {'Actual_throughput': 5e-07,
+ 'theor_max_throughput': 7.5e-07,
+ 'PktSize': 200,
+ 'Status': 'Result'}
+
+ test_results = profile.queue.put.call_args[0]
+ for k in result_tuple:
+ self.assertEqual(result_tuple[k], test_results[0][k])
+
+ success_result_tuple = {"CurrentDropPackets": 0.5,
+ "DropPackets": 0.5,
+ "LatencyAvg": 5.3,
+ "LatencyMax": 5.2,
+ "LatencyMin": 5.1,
+ "PktSize": 200,
+ "RxThroughput": 7.5e-07,
+ "Throughput": 7.5e-07,
+ "TxThroughput": 0.00012340000000000002,
+ "Status": 'Success'}
calls = profile.queue.put(success_result_tuple)
profile.queue.put.assert_has_calls(calls)
- success_result_tuple2 = {"Success_CurrentDropPackets": 0.5,
- "Success_DropPackets": 0.5,
- "Success_LatencyAvg": 5.3,
- "Success_LatencyMax": 5.2,
- "Success_LatencyMin": 5.1,
- "Success_PktSize": 200,
- "Success_RxThroughput": 7.5e-07,
- "Success_Throughput": 7.5e-07,
- "Success_TxThroughput": 123.4,
- "Success_can_be_lost": 409600,
- "Success_drop_total": 20480,
- "Success_rx_total": 4075520,
- "Success_tx_total": 4096000}
+ success_result_tuple2 = {"CurrentDropPackets": 0.5,
+ "DropPackets": 0.5,
+ "LatencyAvg": 5.3,
+ "LatencyMax": 5.2,
+ "LatencyMin": 5.1,
+ "PktSize": 200,
+ "RxThroughput": 7.5e-07,
+ "Throughput": 7.5e-07,
+ "TxThroughput": 123.4,
+ "can_be_lost": 409600,
+ "drop_total": 20480,
+ "rx_total": 4075520,
+ "tx_total": 4096000,
+ "Status": 'Success'}
calls = profile.queue.put(success_result_tuple2)
profile.queue.put.assert_has_calls(calls)
@@ -183,17 +188,16 @@ class TestProxBinSearchProfile(unittest.TestCase):
# Result Samples
- result_tuple = {'Result_Actual_throughput': 0, "Result_theor_max_throughput": 0,
- "Result_pktSize": 200}
+ result_tuple = {'Actual_throughput': 0, 'theor_max_throughput': 0,
+ "Status": 'Result', "Next_Step": ''}
profile.queue.put.assert_called_with(result_tuple)
# Check for success_ tuple (None expected)
calls = profile.queue.put.mock_calls
for call in calls:
for call_detail in call[1]:
- for k in call_detail:
- if "Success_" in k:
- self.assertRaises(AttributeError)
+ if call_detail["Status"] == 'Success':
+ self.assertRaises(AttributeError)
def test_execute_4(self):
@@ -237,38 +241,43 @@ class TestProxBinSearchProfile(unittest.TestCase):
self.assertEqual(len(runs), 7)
# Result Samples inc theor_max
- result_tuple = {'Result_Actual_throughput': 5e-07,
- 'Result_theor_max_throughput': 7.5e-07,
- 'Result_pktSize': 200}
-
- profile.queue.put.assert_called_with(result_tuple)
-
- success_result_tuple = {"Success_CurrentDropPackets": 0.5,
- "Success_DropPackets": 0.5,
- "Success_LatencyAvg": 5.3,
- "Success_LatencyMax": 5.2,
- "Success_LatencyMin": 5.1,
- "Success_PktSize": 200,
- "Success_RxThroughput": 7.5e-07,
- "Success_Throughput": 7.5e-07,
- "Success_TxThroughput": 0.00012340000000000002}
+ result_tuple = {'Actual_throughput': 5e-07,
+ 'theor_max_throughput': 7.5e-07,
+ 'PktSize': 200,
+ "Status": 'Result'}
+
+ test_results = profile.queue.put.call_args[0]
+ for k in result_tuple:
+ self.assertEqual(result_tuple[k], test_results[0][k])
+
+ success_result_tuple = {"CurrentDropPackets": 0.5,
+ "DropPackets": 0.5,
+ "LatencyAvg": 5.3,
+ "LatencyMax": 5.2,
+ "LatencyMin": 5.1,
+ "PktSize": 200,
+ "RxThroughput": 7.5e-07,
+ "Throughput": 7.5e-07,
+ "TxThroughput": 0.00012340000000000002,
+ "Status": 'Success'}
calls = profile.queue.put(success_result_tuple)
profile.queue.put.assert_has_calls(calls)
- success_result_tuple2 = {"Success_CurrentDropPackets": 0.5,
- "Success_DropPackets": 0.5,
- "Success_LatencyAvg": 5.3,
- "Success_LatencyMax": 5.2,
- "Success_LatencyMin": 5.1,
- "Success_PktSize": 200,
- "Success_RxThroughput": 7.5e-07,
- "Success_Throughput": 7.5e-07,
- "Success_TxThroughput": 123.4,
- "Success_can_be_lost": 409600,
- "Success_drop_total": 20480,
- "Success_rx_total": 4075520,
- "Success_tx_total": 4096000}
+ success_result_tuple2 = {"CurrentDropPackets": 0.5,
+ "DropPackets": 0.5,
+ "LatencyAvg": 5.3,
+ "LatencyMax": 5.2,
+ "LatencyMin": 5.1,
+ "PktSize": 200,
+ "RxThroughput": 7.5e-07,
+ "Throughput": 7.5e-07,
+ "TxThroughput": 123.4,
+ "can_be_lost": 409600,
+ "drop_total": 20480,
+ "rx_total": 4075520,
+ "tx_total": 4096000,
+ "Status": 'Success'}
calls = profile.queue.put(success_result_tuple2)
profile.queue.put.assert_has_calls(calls)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index 2e0331e8e..cfeebaa3a 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+import datetime
+import mock
from trex_stl_lib import api as Pkt
from trex_stl_lib import trex_stl_client
from trex_stl_lib import trex_stl_packet_builder_scapy
@@ -102,10 +103,10 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
mock_create_profile:
rfc2544_profile.execute_traffic(traffic_generator=mock_generator)
mock_create_profile.assert_has_calls([
- mock.call('profile1', rfc2544_profile.rate, mock.ANY),
- mock.call('profile1', rfc2544_profile.rate, mock.ANY),
- mock.call('profile2', rfc2544_profile.rate, mock.ANY),
- mock.call('profile2', rfc2544_profile.rate, mock.ANY)])
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY, False),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY, False)])
mock_generator.client.add_streams.assert_has_calls([
mock.call(mock.ANY, ports=[10]),
mock.call(mock.ANY, ports=[20]),
@@ -129,13 +130,14 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
mock_create_streams:
mock_create_imix.return_value = 'imix_data'
mock_create_streams.return_value = ['stream1']
- rfc2544_profile._create_profile(profile_data, rate, port_pg_id)
+ rfc2544_profile._create_profile(profile_data, rate, port_pg_id,
+ True)
mock_create_imix.assert_called_once_with('imix_info')
mock_create_vm.assert_called_once_with(
{'outer_l2': {'framesize': 'imix_info'}})
mock_create_streams.assert_called_once_with('imix_data', 100,
- port_pg_id)
+ port_pg_id, True)
mock_stl_profile.assert_called_once_with(['stream1'])
def test__create_imix_data(self):
@@ -208,7 +210,7 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
with mock.patch.object(rfc2544_profile, '_create_single_packet'):
output = rfc2544_profile._create_streams(imix_data, rate,
- port_pg_id)
+ port_pg_id, True)
self.assertEqual(['stream1', 'stream2'], output)
mock_latency.assert_has_calls([
mock.call(pg_id=1), mock.call(pg_id=2)])
@@ -219,34 +221,38 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
def test_get_drop_percentage(self):
rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
samples = [
- {'xe1': {'tx_throughput_fps': 100,
+ {'xe1': {'tx_throughput_fps': 110,
'rx_throughput_fps': 101,
- 'out_packets': 2000,
- 'in_packets': 2010},
- 'xe2': {'tx_throughput_fps': 200,
+ 'out_packets': 2100,
+ 'in_packets': 2010,
+ 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)},
+ 'xe2': {'tx_throughput_fps': 210,
'rx_throughput_fps': 201,
- 'out_packets': 4000,
- 'in_packets': 4010}},
- {'xe1': {'tx_throughput_fps': 106,
+ 'out_packets': 4100,
+ 'in_packets': 4010,
+ 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)}},
+ {'xe1': {'tx_throughput_fps': 156,
'rx_throughput_fps': 108,
- 'out_packets': 2031,
+ 'out_packets': 2110,
'in_packets': 2040,
- 'latency': 'Latency1'},
- 'xe2': {'tx_throughput_fps': 203,
+ 'latency': 'Latency1',
+ 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)},
+ 'xe2': {'tx_throughput_fps': 253,
'rx_throughput_fps': 215,
- 'out_packets': 4025,
- 'in_packets': 4040,
- 'latency': 'Latency2'}}
+ 'out_packets': 4150,
+ 'in_packets': 4010,
+ 'latency': 'Latency2',
+ 'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)}}
]
completed, output = rfc2544_profile.get_drop_percentage(
samples, 0, 0, False)
- expected = {'DropPercentage': 0.3963,
+ expected = {'DropPercentage': 50.0,
'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
- 'RxThroughput': 312.5,
- 'TxThroughput': 304.5,
- 'CurrentDropPercentage': 0.3963,
+ 'RxThroughput': 1000000.0,
+ 'TxThroughput': 2000000.0,
+ 'CurrentDropPercentage': 50.0,
'Rate': 100.0,
- 'Throughput': 312.5}
+ 'Throughput': 1000000.0}
self.assertEqual(expected, output)
self.assertFalse(completed)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
index 5ad182f22..a7e61da0f 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
@@ -317,6 +317,7 @@ class TestProxTrafficGen(unittest.TestCase):
prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id')
self.assertIsNone(prox_traffic_gen._tg_process)
self.assertIsNone(prox_traffic_gen._traffic_process)
+ self.assertIsNone(prox_traffic_gen._mq_producer)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index ddb63242e..ec0e6aa6d 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,6 +18,7 @@ import mock
import six
import unittest
+from yardstick.common import utils
from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -57,6 +58,7 @@ class TestIxiaResourceHelper(unittest.TestCase):
def test_run_traffic(self):
mock_tprofile = mock.Mock()
+ mock_tprofile.config.duration = 10
mock_tprofile.get_drop_percentage.return_value = True, 'fake_samples'
ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
ixia_rhelper.rfc_helper = mock.Mock()
@@ -64,7 +66,8 @@ class TestIxiaResourceHelper(unittest.TestCase):
ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
with mock.patch.object(ixia_rhelper, 'generate_samples'), \
mock.patch.object(ixia_rhelper, '_build_ports'), \
- mock.patch.object(ixia_rhelper, '_initialize_client'):
+ mock.patch.object(ixia_rhelper, '_initialize_client'), \
+ mock.patch.object(utils, 'wait_until_true'):
ixia_rhelper.run_traffic(mock_tprofile)
self.assertEqual('fake_samples', ixia_rhelper._queue.get())