aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/roles/install_yardstick/tasks/main.yml6
-rw-r--r--dashboard/opnfv_yardstick_tc058.json2
-rw-r--r--docker/Dockerfile.aarch64.patch26
-rw-r--r--samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml39
-rw-r--r--samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml171
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml9
-rw-r--r--samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml21
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml47
-rwxr-xr-xtests/ci/load_images.sh16
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py30
-rw-r--r--yardstick/benchmark/scenarios/lib/check_value.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py50
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py34
-rw-r--r--yardstick/common/ansible_common.py36
-rw-r--r--yardstick/common/constants.py1
-rw-r--r--yardstick/common/exceptions.py18
-rw-r--r--yardstick/common/kubernetes_utils.py60
-rw-r--r--yardstick/common/utils.py53
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py141
-rw-r--r--yardstick/network_services/pipeline.py11
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py1
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py54
-rw-r--r--yardstick/network_services/traffic_profile/pktgen.py61
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py16
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py17
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py3
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_pktgen.py103
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py3
-rw-r--r--yardstick/orchestrator/heat.py10
-rw-r--r--yardstick/orchestrator/kubernetes.py4
-rw-r--r--yardstick/service/environment.py10
-rw-r--r--yardstick/ssh.py5
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py9
-rw-r--r--yardstick/tests/unit/benchmark/core/test_testcase.py14
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_arithmetic.py220
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_duration.py276
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py64
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py764
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py197
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py56
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py39
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py34
-rw-r--r--yardstick/tests/unit/common/test_ansible_common.py205
-rw-r--r--yardstick/tests/unit/common/test_kubernetes_utils.py163
-rw-r--r--yardstick/tests/unit/common/test_template_format.py13
-rw-r--r--yardstick/tests/unit/common/test_utils.py88
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py400
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py9
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py32
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py63
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py7
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py4
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py5
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py79
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py5
-rw-r--r--yardstick/tests/unit/orchestrator/test_kubernetes.py5
-rw-r--r--yardstick/tests/unit/service/test_environment.py19
-rw-r--r--yardstick/tests/unit/test_ssh.py23
64 files changed, 2617 insertions, 1294 deletions
diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml
index 0975efaea..203acc3e5 100644
--- a/ansible/roles/install_yardstick/tasks/main.yml
+++ b/ansible/roles/install_yardstick/tasks/main.yml
@@ -41,7 +41,7 @@
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
virtualenv: "{{ yardstick_dir }}/virtualenv"
- async: 600
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == True
@@ -49,7 +49,7 @@
- name: Install Yardstick requirements
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
- async: 600
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == False
@@ -59,7 +59,7 @@
jid: "{{ pip_installer.ansible_job_id }}"
register: job_result
until: job_result.finished
- retries: 150
+ retries: 180
- name: Install Yardstick code (venv)
pip:
diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json
index 55b5a5f33..ed2a1750c 100644
--- a/dashboard/opnfv_yardstick_tc058.json
+++ b/dashboard/opnfv_yardstick_tc058.json
@@ -6,7 +6,7 @@
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
- "id": 33,
+ "id": null,
"links": [],
"refresh": "1m",
"rows": [
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index 472310f96..2e2808ed1 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,14 +1,14 @@
-From: ting wu <ting.wu@enea.com>
-Date: Tue, 8 May 2018 14:02:52 +0200
-Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file
+From: Cristina Pauna <cristina.pauna@enea.com>
+Date: Mon, 23 Jul 2018 15:16:59 +0300
+Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
-Signed-off-by: ting wu <ting.wu@enea.com>
+Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
---
- docker/Dockerfile | 11 ++++++-----
- 1 file changed, 6 insertions(+), 5 deletions(-)
+ docker/Dockerfile | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 71ce6b58..952d0f78 100644
+index 71ce6b58..fce7c116 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,9 +7,9 @@
@@ -33,7 +33,17 @@ index 71ce6b58..952d0f78 100644
RUN easy_install -U setuptools==30.0.0
RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.12.0 python-heatclient==1.11.0 ansible==2.5.5
-@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+@@ -40,7 +41,8 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${Y
+ RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
+ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
+
+-RUN ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
++RUN sed -i -e '/- configure_gui/d' ${YARDSTICK_REPO_DIR}/ansible/install.yaml && \
++ ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+
+ RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
+
+@@ -48,8 +50,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
diff --git a/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
new file mode 100644
index 000000000..81323e71c
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: cmts-tg-topology
+ name: cmts-tg-topology
+ short-name: cmts-tg-topology
+ description: cmts-tg-topology
+ constituent-vnfd:
+ - member-vnf-index: '1'
+ vnfd-id-ref: tg__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+ - member-vnf-index: '2'
+ vnfd-id-ref: vnf__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+
+ vld: []
+# - id: uplink
+# name: tg__0 to vnf__0 link 1
+# type: ELAN
+# vnfd-connection-point-ref:
+# - member-vnf-index-ref: '1'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: tg__0
+# - member-vnf-index-ref: '2'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: vnf__0
diff --git a/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
new file mode 100644
index 000000000..cab8bb885
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
@@ -0,0 +1,171 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/pktgen_throughput.yaml
+ topology: cmts-tg-topology.yaml
+ nodes:
+ tg__0: trafficgen-k8syardstick
+ vnf__0: vnf-k8syardstick
+ options: {}
+ runner:
+ type: IterationIPC
+ iterations: 10
+ interval: 15
+ timeout: 10000
+context:
+ name: k8syardstick
+ type: Kubernetes
+
+ servers:
+ vnf:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ trafficgen:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ networks:
+ flannel:
+ args: '[{ "delegate": { "isDefaultGateway": true }}]'
+ plugin: flannel
+ sriov01:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
+ sriov02:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index b34672907..507491446 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -52,12 +52,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -83,12 +85,14 @@ downlink_0:
dstip4: "{{get(flow, 'flow.public_ip_0', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -111,12 +115,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -142,9 +148,11 @@ downlink_1:
dstip4: "{{get(flow, 'flow.public_ip_1', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 513aefb40..3cbd7cd62 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -50,12 +50,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -76,12 +78,14 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_0', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -102,12 +106,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -128,9 +134,11 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_1', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index aad751549..edff3612e 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -72,6 +72,7 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -79,6 +80,7 @@ uplink_0:
srcport: "{{get(flow, 'flow.src_port_0', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
id: 2
ipv4:
@@ -97,6 +99,7 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -104,6 +107,7 @@ downlink_0:
srcport: "{{get(flow, 'flow.dst_port_0', '0') }}"
dstport: "{{get(flow, 'flow.src_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
id: 3
ipv4:
@@ -131,6 +135,8 @@ uplink_1:
proto: "tcp"
srcip4: "{{get(flow, 'flow.srcip_1', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dstip_1', '192.16.0.0-192.16.0.31') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -138,6 +144,7 @@ uplink_1:
srcport: "{{get(flow, 'flow.src_port_1', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
id: 4
ipv4:
@@ -156,6 +163,7 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -163,3 +171,4 @@ downlink_1:
srcport: "{{get(flow, 'flow.dst_port_1', '0') }}"
dstport: "{{get(flow, 'flow.src_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
new file mode 100644
index 000000000..e222e1d8c
--- /dev/null
+++ b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: "nsb:traffic_profile:0.1"
+
+name: pktgen
+description: Traffic profile to run throughput tests
+traffic_profile:
+ traffic_type: PktgenTrafficProfile
+ duration: 15
diff --git a/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
new file mode 100644
index 000000000..17e631652
--- /dev/null
+++ b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: PktgenTrafficGen # NSB class mapping
+ name: pktgen_tg
+ short-name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+ mgmt-interface:
+ vdu-id: pktgen
+ {% if ip is defined %}
+ ip: '{{ ip }}'
+ {% endif %}
+ {% if service_ports is defined and service_ports %}
+ service_ports:
+ {% for port in service_ports %}
+ - port: "{{ port['port']|int }}"
+ node_port: "{{ port['node_port']|int }}"
+ target_port: "{{ port['target_port']|int }}"
+ {% endfor %}
+ {% endif %}
+
+ vdu:
+ - id: pktgen_tg
+ name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+
+ benchmark:
+ kpi:
+ - rx_throughput_fps
+ - tx_throughput_fps
+ - tx_throughput_mbps
+ - rx_throughput_mbps
+ - in_packets
+ - out_packets
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 1e1591ce3..7a86abb4e 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -29,11 +29,6 @@ if [ "${INSTALLER_TYPE}" == 'fuel' ]; then
fi
export YARD_IMG_ARCH
-HW_FW_TYPE=""
-if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
- HW_FW_TYPE=uefi
-fi
-export HW_FW_TYPE
UCA_HOST="cloud-images.ubuntu.com"
if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
@@ -104,18 +99,12 @@ load_yardstick_image()
echo
echo "========== Loading yardstick cloud image =========="
EXTRA_PARAMS=""
- if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
- EXTRA_PARAMS="--property hw_video_model=vga"
- fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
output=$(eval openstack ${SECURE} image create \
@@ -165,7 +154,7 @@ load_cirros_image()
if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
CIRROS_IMAGE_VERSION="cirros-d161201"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-d161201-aarch64-disk.img"
- EXTRA_PARAMS="--property hw_video_model=vga --property short_id=ubuntu16.04"
+ EXTRA_PARAMS="--property short_id=ubuntu16.04"
else
CIRROS_IMAGE_VERSION="cirros-0.3.5"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-0.3.5-x86_64-disk.img"
@@ -184,9 +173,6 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
output=$(openstack ${SECURE} image create \
--disk-format qcow2 \
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 7534c4ea5..e1553c72b 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -110,7 +110,7 @@ class KubernetesContext(ctx_base.Context):
self._delete_rc(rc)
def _delete_rc(self, rc):
- k8s_utils.delete_replication_controller(rc)
+ k8s_utils.delete_replication_controller(rc, skip_codes=[404])
def _delete_pods(self):
for pod in self.template.pods:
@@ -159,7 +159,7 @@ class KubernetesContext(ctx_base.Context):
k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
def _delete_ssh_key(self):
- k8s_utils.delete_config_map(self.ssh_key)
+ k8s_utils.delete_config_map(self.ssh_key, skip_codes=[404])
utils.remove_file(self.key_path)
utils.remove_file(self.public_key_path)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
index 979e3ab14..53abd586b 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -23,7 +23,7 @@ def _execute_shell_command(command, stdin=None):
output = []
try:
output = subprocess.check_output(command, stdin=stdin, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -49,8 +49,7 @@ class BaremetalAttacker(BaseAttacker):
LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user'])
self.jump_connection = ssh.SSH.from_node(
jump_host,
- # why do we allow pwd for password?
- defaults={"user": "root", "password": jump_host.get("pwd")}
+ defaults={"user": "root", "password": jump_host.get("password")}
)
self.jump_connection.wait(timeout=600)
LOG.debug("ssh jump host success!")
@@ -59,7 +58,7 @@ class BaremetalAttacker(BaseAttacker):
self.ipmi_ip = host.get("ipmi_ip", None)
self.ipmi_user = host.get("ipmi_user", "root")
- self.ipmi_pwd = host.get("ipmi_pwd", None)
+ self.ipmi_pwd = host.get("ipmi_password", None)
self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
self.check_script = self.get_script_fullpath(
@@ -107,26 +106,3 @@ class BaremetalAttacker(BaseAttacker):
else:
_execute_shell_command(cmd, stdin=stdin_file)
LOG.info("Recover fault END")
-
-
-def _test(): # pragma: no cover
- host = {
- "ipmi_ip": "10.20.0.5",
- "ipmi_user": "root",
- "ipmi_pwd": "123456",
- "ip": "10.20.0.5",
- "user": "root",
- "key_filename": "/root/.ssh/id_rsa"
- }
- context = {"node1": host}
- attacker_cfg = {
- 'fault_type': 'bear-metal-down',
- 'host': 'node1',
- }
- ins = BaremetalAttacker(attacker_cfg, context)
- ins.setup()
- ins.inject_fault()
-
-
-if __name__ == '__main__': # pragma: no cover
- _test()
diff --git a/yardstick/benchmark/scenarios/lib/check_value.py b/yardstick/benchmark/scenarios/lib/check_value.py
index 759076068..4c9b27df4 100644
--- a/yardstick/benchmark/scenarios/lib/check_value.py
+++ b/yardstick/benchmark/scenarios/lib/check_value.py
@@ -13,6 +13,7 @@ from __future__ import absolute_import
import logging
from yardstick.benchmark.scenarios import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -34,24 +35,18 @@ class CheckValue(base.Scenario):
self.context_cfg = context_cfg
self.options = self.scenario_cfg['options']
- def run(self, result):
+ def run(self, _):
"""execute the test"""
op = self.options.get("operator")
LOG.debug("options=%s", self.options)
value1 = str(self.options.get("value1"))
value2 = str(self.options.get("value2"))
+ if (op == "eq" and value1 != value2) or (op == "ne" and
+ value1 == value2):
+ raise y_exc.ValueCheckError(
+ value1=value1, operator=op, value2=value2)
check_result = "PASS"
- if op == "eq" and value1 != value2:
- LOG.info("value1=%s, value2=%s, error: should equal!!!", value1,
- value2)
- check_result = "FAIL"
- assert value1 == value2, "Error %s!=%s" % (value1, value2)
- elif op == "ne" and value1 == value2:
- LOG.info("value1=%s, value2=%s, error: should not equal!!!",
- value1, value2)
- check_result = "FAIL"
- assert value1 != value2, "Error %s==%s" % (value1, value2)
LOG.info("Check result is %s", check_result)
keys = self.scenario_cfg.get('output', '').split()
values = [check_result]
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 7a11d3e76..10f10d4e6 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -64,37 +64,36 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._mq_ids = []
def _get_ip_flow_range(self, ip_start_range):
+ """Retrieve a CIDR first and last viable IPs
- # IP range is specified as 'x.x.x.x-y.y.y.y'
+ :param ip_start_range: could be the IP range itself or a dictionary
+ with the host name and the port.
+ :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
+ """
if isinstance(ip_start_range, six.string_types):
return ip_start_range
- node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+ node_name, range_or_interface = next(iter(ip_start_range.items()),
+ (None, '0.0.0.0'))
if node_name is None:
- # we are manually specifying the range
- ip_addr_range = range_or_interface
+ return range_or_interface
+
+ node = self.context_cfg['nodes'].get(node_name, {})
+ interface = node.get('interfaces', {}).get(range_or_interface)
+ if interface:
+ ip = interface['local_ip']
+ mask = interface['netmask']
else:
- node = self.context_cfg["nodes"].get(node_name, {})
- try:
- # the ip_range is the interface name
- interface = node.get("interfaces", {})[range_or_interface]
- except KeyError:
- ip = "0.0.0.0"
- mask = "255.255.255.0"
- else:
- ip = interface["local_ip"]
- # we can't default these values, they must both exist to be valid
- mask = interface["netmask"]
-
- ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
- hosts = list(ipaddr.hosts())
- if len(hosts) > 2:
- # skip the first host in case of gateway
- ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
- else:
- LOG.warning("Only single IP in range %s", ipaddr)
- # fall back to single IP range
- ip_addr_range = ip
+ ip = '0.0.0.0'
+ mask = '255.255.255.0'
+
+ ipaddr = ipaddress.ip_network(
+ six.text_type('{}/{}'.format(ip, mask)), strict=False)
+ if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
+ ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
+ else:
+ LOG.warning('Only single IP in range %s', ipaddr)
+ ip_addr_range = ip
return ip_addr_range
def _get_traffic_flow(self):
@@ -119,6 +118,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
flow["dst_port_{}".format(index)] = dst_port
flow["count"] = fflow["count"]
+ flow["seed"] = fflow["seed"]
except KeyError:
flow = {}
return {"flow": flow}
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 2b3474070..8344b1595 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -193,22 +193,19 @@ class Vsperf(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
- reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
# sla check; go through all defined SLAs and check if values measured
# by VSPERF are higher then those defined by SLAs
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 27bf40dcb..d5c8a3bfe 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario):
self.client.send_command(cmd)
else:
cmd = "cat ~/.testpmd.macaddr.port1"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port1_mac = stdout
+
cmd = "cat ~/.testpmd.macaddr.port2"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port2_mac = stdout
cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \
(self.moongen_port1_mac, self.moongen_port2_mac)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
time.sleep(1)
@@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario):
self.setup()
# remove results from previous tests
- self.client.execute("rm -rf /tmp/results*")
+ self.client.run("rm -rf /tmp/results*", raise_on_error=False)
# get vsperf options
options = self.scenario_cfg['options']
@@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario):
cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \
"root@%s -p 22" % (self.moongen_host_ip)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
@@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
result['nrFlows'] = multistream
# sla check; go through all defined SLAs and check if values measured
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index ca5a110e2..dee7044a5 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
import cgitb
import collections
import contextlib as cl
@@ -23,11 +21,11 @@ import os
from collections import Mapping, MutableMapping, Iterable, Callable, deque
from functools import partial
from itertools import chain
-from subprocess import CalledProcessError, Popen, PIPE
-from tempfile import NamedTemporaryFile
+import subprocess
+import tempfile
import six
-import six.moves.configparser as ConfigParser
+from six.moves import configparser
import yaml
from six import StringIO
from chainmap import ChainMap
@@ -134,10 +132,9 @@ class CustomTemporaryFile(object):
else:
self.data_types = self.DEFAULT_DATA_TYPES
# must open "w+" so unicode is encoded correctly
- self.creator = partial(NamedTemporaryFile, mode="w+", delete=False,
- dir=directory,
- prefix=prefix,
- suffix=self.suffix)
+ self.creator = partial(
+ tempfile.NamedTemporaryFile, mode="w+", delete=False,
+ dir=directory, prefix=prefix, suffix=self.suffix)
def make_context(self, data, write_func, descriptor='data'):
return TempfileContext(data, write_func, descriptor, self.data_types,
@@ -191,8 +188,8 @@ class FileNameGenerator(object):
if not prefix.endswith('_'):
prefix += '_'
- temp_file = NamedTemporaryFile(delete=False, dir=directory,
- prefix=prefix, suffix=suffix)
+ temp_file = tempfile.NamedTemporaryFile(delete=False, dir=directory,
+ prefix=prefix, suffix=suffix)
with cl.closing(temp_file):
return temp_file.name
@@ -474,7 +471,7 @@ class AnsibleCommon(object):
prefix = '_'.join([self.prefix, prefix, 'inventory'])
ini_temp_file = IniMapTemporaryFile(directory=directory, prefix=prefix)
- inventory_config = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory_config = configparser.ConfigParser(allow_no_value=True)
# disable default lowercasing
inventory_config.optionxform = str
return ini_temp_file.make_context(self.inventory_dict, write_func,
@@ -510,7 +507,7 @@ class AnsibleCommon(object):
return timeout
def _generate_ansible_cfg(self, directory):
- parser = ConfigParser.ConfigParser()
+ parser = configparser.ConfigParser()
parser.add_section('defaults')
parser.set('defaults', 'host_key_checking', 'False')
@@ -541,12 +538,12 @@ class AnsibleCommon(object):
cmd = ['ansible', 'all', '-m', 'setup', '-i',
inventory_path, '--tree', sut_dir]
- proc = Popen(cmd, stdout=PIPE, cwd=directory)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=directory)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
def _gen_sut_info_dict(self, sut_dir):
sut_info = {}
@@ -617,12 +614,13 @@ class AnsibleCommon(object):
# 'timeout': timeout / 2,
})
with Timer() as timer:
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
timeout -= timer.total_seconds()
cmd.remove("--syntax-check")
@@ -632,10 +630,10 @@ class AnsibleCommon(object):
# TODO: add timeout support of use subprocess32 backport
# 'timeout': timeout,
})
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
return output
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 4ed40f8af..3d775d48e 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -175,3 +175,4 @@ SCOPE_CLUSTER = 'Cluster'
# VNF definition
SSH_PORT = 22
+LUA_PORT = 22022
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 2e6cbdd7b..b39a0af9c 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -88,6 +88,10 @@ class YardstickBannedModuleImported(YardstickException):
message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
+class IXIAUnsupportedProtocol(YardstickException):
+ message = 'Protocol "%(protocol)" is not supported in IXIA'
+
+
class PayloadMissingAttributes(YardstickException):
message = ('Error instantiating a Payload class, missing attributes: '
'%(missing_attributes)s')
@@ -212,6 +216,10 @@ class WaitTimeout(YardstickException):
message = 'Wait timeout while waiting for condition'
+class PktgenActionError(YardstickException):
+ message = 'Error in "%(action)s" action'
+
+
class KubernetesApiException(YardstickException):
message = ('Kubernetes API errors. Action: %(action)s, '
'resource: %(resource)s')
@@ -398,5 +406,13 @@ class AclMissingActionArguments(YardstickException):
'[action=%(action_name)s parameter=%(action_param)s]')
-class AclUknownActionTemplate(YardstickException):
+class AclUnknownActionTemplate(YardstickException):
message = 'No ACL CLI template found for "%(action_name)s" action'
+
+
+class InvalidMacAddress(YardstickException):
+ message = 'Mac address "%(mac_address)s" is invalid'
+
+
+class ValueCheckError(YardstickException):
+ message = 'Constraint "%(value1)s %(operator)s %(value2)s" does not hold'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index f5b0443ea..323f13abb 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -121,8 +121,10 @@ def create_replication_controller(template,
def delete_replication_controller(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -131,9 +133,12 @@ def delete_replication_controller(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete replication controller failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ReplicationController')
def delete_pod(name,
@@ -196,8 +201,10 @@ def create_config_map(name,
def delete_config_map(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -206,9 +213,12 @@ def delete_config_map(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete config map failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ConfigMap')
def create_custom_resource_definition(body):
@@ -253,10 +263,30 @@ def get_custom_resource_definition(kind):
action='delete', resource='CustomResourceDefinition')
-def create_network(scope, group, version, plural, body, namespace='default'):
+def get_network(scope, group, version, plural, name, namespace='default'):
api = get_custom_objects_api()
try:
if scope == consts.SCOPE_CLUSTER:
+ network = api.get_cluster_custom_object(group, version, plural, name)
+ else:
+ network = api.get_namespaced_custom_object(
+ group, version, namespace, plural, name)
+ except ApiException as e:
+ if e.status in [404]:
+ return
+ else:
+ raise exceptions.KubernetesApiException(
+ action='get', resource='Custom Object: Network')
+ return network
+
+
+def create_network(scope, group, version, plural, body, name, namespace='default'):
+ api = get_custom_objects_api()
+ if get_network(scope, group, version, plural, name, namespace):
+ logging.info('Network %s already exists', name)
+ return
+ try:
+ if scope == consts.SCOPE_CLUSTER:
api.create_cluster_custom_object(group, version, plural, body)
else:
api.create_namespaced_custom_object(
@@ -266,7 +296,8 @@ def create_network(scope, group, version, plural, body, namespace='default'):
action='create', resource='Custom Object: Network')
-def delete_network(scope, group, version, plural, name, namespace='default'):
+def delete_network(scope, group, version, plural, name, namespace='default', skip_codes=None):
+ skip_codes = [] if not skip_codes else skip_codes
api = get_custom_objects_api()
try:
if scope == consts.SCOPE_CLUSTER:
@@ -274,9 +305,12 @@ def delete_network(scope, group, version, plural, name, namespace='default'):
else:
api.delete_namespaced_custom_object(
group, version, namespace, plural, name, {})
- except ApiException:
- raise exceptions.KubernetesApiException(
- action='delete', resource='Custom Object: Network')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Custom Object: Network')
def get_pod_list(namespace='default'): # pragma: no cover
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 85cecc714..c019cd264 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -28,6 +28,7 @@ import socket
import subprocess
import sys
import time
+import threading
import six
from flask import jsonify
@@ -193,20 +194,16 @@ def parse_ini_file(path):
def get_port_mac(sshclient, port):
cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
def get_port_ip(sshclient, port):
cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
"|cut -d ':' -f2 " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
@@ -281,11 +278,19 @@ def get_free_port(ip):
def mac_address_to_hex_list(mac):
- octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
- assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+ try:
+ octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+ except ValueError:
+ raise exceptions.InvalidMacAddress(mac_address=mac)
+ if len(octets) != 6 or all(len(octet) != 4 for octet in octets):
+ raise exceptions.InvalidMacAddress(mac_address=mac)
return octets
+def make_ipv4_address(ip_addr):
+ return ipaddress.IPv4Address(six.text_type(ip_addr))
+
+
def safe_ip_address(ip_addr):
""" get ip address version v6 or v4 """
try:
@@ -335,6 +340,14 @@ def ip_to_hex(ip_addr, separator=''):
return separator.join('{:02x}'.format(octet) for octet in address.packed)
+def get_mask_from_ip_range(ip_low, ip_high):
+ _ip_low = ipaddress.ip_address(ip_low)
+ _ip_high = ipaddress.ip_address(ip_high)
+ _ip_low_int = int(_ip_low)
+ _ip_high_int = int(_ip_high)
+ return _ip_high.max_prefixlen - (_ip_high_int ^ _ip_low_int).bit_length()
+
+
def try_int(s, *args):
"""Convert to integer if possible."""
try:
@@ -467,6 +480,9 @@ class Timer(object):
def __del__(self): # pragma: no cover
signal.alarm(0)
+ def delta_time_sec(self):
+ return (datetime.datetime.now() - self.start).total_seconds()
+
def read_meminfo(ssh_client):
"""Read "/proc/meminfo" file and parse all keys and values"""
@@ -513,17 +529,30 @@ def open_relative_file(path, task_path):
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
"""Wait until callable predicate is evaluated as True
+ When in a thread different from the main one, Timer(timeout) will fail
+ because signal is not handled. In this case
+
:param predicate: (func) callable deciding whether waiting should continue
:param timeout: (int) timeout in seconds how long should function wait
:param sleep: (int) polling interval for results in seconds
:param exception: exception instance to raise on timeout. If None is passed
(default) then WaitTimeout exception is raised.
"""
- try:
- with Timer(timeout=timeout):
- while not predicate():
+ if isinstance(threading.current_thread(), threading._MainThread):
+ try:
+ with Timer(timeout=timeout):
+ while not predicate():
+ time.sleep(sleep)
+ except exceptions.TimerTimeout:
+ if exception and issubclass(exception, Exception):
+ raise exception # pylint: disable=raising-bad-type
+ raise exceptions.WaitTimeout
+ else:
+ with Timer() as timer:
+ while timer.delta_time_sec() < timeout:
+ if predicate():
+ return
time.sleep(sleep)
- except exceptions.TimerTimeout:
if exception and issubclass(exception, Exception):
raise exception # pylint: disable=raising-bad-type
raise exceptions.WaitTimeout
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index cee768dba..8274ff9ce 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import ipaddress
import logging
import IxNetwork
@@ -33,12 +34,21 @@ PROTO_UDP = 'udp'
PROTO_TCP = 'tcp'
PROTO_VLAN = 'vlan'
-IP_VERSION_4_MASK = '0.0.0.255'
-IP_VERSION_6_MASK = '0:0:0:0:0:0:0:ff'
+SINGLE_VALUE = "singleValue"
+
+S_VLAN = 0
+C_VLAN = 1
+
+ETHER_TYPE_802_1ad = '0x88a8'
+
+IP_VERSION_4_MASK = 24
+IP_VERSION_6_MASK = 64
TRAFFIC_STATUS_STARTED = 'started'
TRAFFIC_STATUS_STOPPED = 'stopped'
+SUPPORTED_PROTO = [PROTO_UDP]
+
# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
class IxNextgen(object): # pragma: no cover
@@ -329,7 +339,7 @@ class IxNextgen(object): # pragma: no cover
'-valueType', 'singleValue')
self.ixnet.commit()
- def update_frame(self, traffic):
+ def update_frame(self, traffic, duration):
"""Update the L2 frame
This function updates the L2 frame options:
@@ -347,6 +357,7 @@ class IxNextgen(object): # pragma: no cover
:param traffic: list of traffic elements; each traffic element contains
the injection parameter for each flow group.
+ :param duration: (int) injection time in seconds.
"""
for traffic_param in traffic.values():
fg_id = str(traffic_param['id'])
@@ -355,7 +366,6 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
type = traffic_param.get('traffic_type', 'fixedDuration')
- duration = traffic_param.get('duration', 30)
rate = traffic_param['rate']
rate_unit = (
'framesPerSecond' if traffic_param['rate_unit'] ==
@@ -364,10 +374,28 @@ class IxNextgen(object): # pragma: no cover
traffic_param['outer_l2']['framesize'])
srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02'))
- # NOTE(ralonsoh): add QinQ tagging when
- # traffic_param['outer_l2']['QinQ'] exists.
- # s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
- # c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ if traffic_param['outer_l2']['QinQ']:
+ s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'etherType')
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', ETHER_TYPE_802_1ad,
+ '-fieldValue', ETHER_TYPE_802_1ad,
+ '-valueType', SINGLE_VALUE)
+
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_VLAN, config_element + '/stack:"ethernet-1"')
+
+ self._update_vlan_tag(fg_id, s_vlan, S_VLAN)
+ self._update_vlan_tag(fg_id, c_vlan, C_VLAN)
self.ixnet.setMultiAttribute(
config_element + '/transmissionControl',
@@ -388,6 +416,27 @@ class IxNextgen(object): # pragma: no cover
self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
'sourceAddress', srcmac)
+ def _update_vlan_tag(self, fg_id, params, vlan=0):
+ field_to_param_map = {
+ 'vlanUserPriority': 'priority',
+ 'cfi': 'cfi',
+ 'vlanID': 'id'
+ }
+ for field, param in field_to_param_map.items():
+ value = params.get(param)
+ if value:
+ field_descriptor = self._get_field_in_stack_item(
+ self._get_stack_item(fg_id, PROTO_VLAN)[vlan],
+ field)
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-singleValue', value,
+ '-fieldValue', value,
+ '-valueType', SINGLE_VALUE)
+
+ self.ixnet.commit()
+
def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
mask, count):
"""Set the IPv4 address in a config element stack IP field
@@ -397,15 +446,17 @@ class IxNextgen(object): # pragma: no cover
:param field: (str) field name, e.g.: scrIp, dstIp
:param ip_address: (str) IP address
:param seed: (int) seed length
- :param mask: (str) IP address mask
+ :param mask: (int) IP address mask length
:param count: (int) number of random IPs to generate
"""
field_descriptor = self._get_field_in_stack_item(ip_descriptor,
field)
+ random_mask = str(ipaddress.IPv4Address(
+ 2**(ipaddress.IPV4LENGTH - mask) - 1).compressed)
self.ixnet.setMultiAttribute(field_descriptor,
'-seed', seed,
'-fixedBits', ip_address,
- '-randomMask', mask,
+ '-randomMask', random_mask,
'-valueType', 'random',
'-countValue', count)
self.ixnet.commit()
@@ -424,15 +475,77 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
count = traffic_param['outer_l3']['count']
- srcip4 = str(traffic_param['outer_l3']['srcip4'])
- dstip4 = str(traffic_param['outer_l3']['dstip4'])
+ srcip = str(traffic_param['outer_l3']['srcip'])
+ dstip = str(traffic_param['outer_l3']['dstip'])
+ seed = traffic_param['outer_l3']['seed']
+ srcmask = traffic_param['outer_l3']['srcmask'] or IP_VERSION_4_MASK
+ dstmask = traffic_param['outer_l3']['dstmask'] or IP_VERSION_4_MASK
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'srcIp', srcip4, 1, IP_VERSION_4_MASK, count)
+ 'srcIp', srcip, seed, srcmask, count)
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'dstIp', dstip4, 1, IP_VERSION_4_MASK, count)
+ 'dstIp', dstip, seed, dstmask, count)
+
+ def update_l4(self, traffic):
+ """Update the L4 headers
+
+ NOTE: Only UDP is currently supported
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ proto = traffic_param['outer_l3']['proto']
+ if proto not in SUPPORTED_PROTO:
+ raise exceptions.IXIAUnsupportedProtocol(protocol=proto)
+
+ count = traffic_param['outer_l4']['count']
+ seed = traffic_param['outer_l4']['seed']
+
+ srcport = traffic_param['outer_l4']['srcport']
+ srcmask = traffic_param['outer_l4']['srcportmask']
+
+ dstport = traffic_param['outer_l4']['dstport']
+ dstmask = traffic_param['outer_l4']['dstportmask']
+
+ if proto in SUPPORTED_PROTO:
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'srcPort', srcport, seed, srcmask, count)
+
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'dstPort', dstport, seed, dstmask, count)
+
+ def _update_udp_port(self, descriptor, field, value,
+ seed=1, mask=0, count=1):
+ """Set the UDP port in a config element stack UDP field
+
+ :param udp_descriptor: (str) UDP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ :param field: (str) field name, e.g.: scrPort, dstPort
+ :param value: (int) UDP port fixed bits
+ :param seed: (int) seed length
+ :param mask: (int) UDP port mask
+ :param count: (int) number of random ports to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(descriptor, field)
+
+ if mask == 0:
+ seed = count = 1
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-seed', seed,
+ '-fixedBits', value,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+
+ self.ixnet.commit()
def _build_stats_map(self, view_obj, name_map):
return {data_yardstick: self.ixnet.execute(
diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py
index d781ba0cd..7155480d4 100644
--- a/yardstick/network_services/pipeline.py
+++ b/yardstick/network_services/pipeline.py
@@ -18,6 +18,8 @@ import itertools
from six.moves import zip
+from yardstick.common import utils
+
FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
@@ -59,8 +61,7 @@ class PipelineRules(object):
self.add_rule(FIREWALL_ADD_PRIO, ip)
def add_firewall_script(self, ip):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for i in range(256):
ip_addr[-2] = str(i)
@@ -87,8 +88,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_MPLS, ip, mac_addr, index)
def add_route_script(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for index in range(0, 256, 8):
ip_addr[-2] = str(index)
@@ -101,8 +101,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_QINQ, ip, mask, mac_addr, index)
def add_route_script2(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
mask = 24
for i in range(0, 256):
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index 356b36bd9..a1b26a24d 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -27,6 +27,7 @@ def register_modules():
'yardstick.network_services.traffic_profile.prox_profile',
'yardstick.network_services.traffic_profile.prox_ramp',
'yardstick.network_services.traffic_profile.rfc2544',
+ 'yardstick.network_services.traffic_profile.pktgen',
]
for module in modules:
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 49bac27e4..26dc1fe04 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -14,6 +14,7 @@
import logging
+from yardstick.common import utils
from yardstick.network_services.traffic_profile import base as tp_base
from yardstick.network_services.traffic_profile import trex_traffic_profile
@@ -33,6 +34,21 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
self.rate = self.config.frame_rate
self.rate_unit = self.config.rate_unit
+ def _get_ip_and_mask(self, ip_range):
+ _ip_range = ip_range.split('-')
+ if len(_ip_range) == 1:
+ return _ip_range[0], None
+
+ mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
+ return _ip_range[0], mask
+
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
+
+ return int(_port_range[0]), int(_port_range[1])
+
def _get_ixia_traffic_profile(self, profile_data, mac=None):
mac = {} if mac is None else mac
result = {}
@@ -50,14 +66,20 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
port_id = value.get('id', 1)
port_index = port_id - 1
- try:
- ip = value['outer_l3v6']
- except KeyError:
+
+ if value.get('outer_l3v4'):
ip = value['outer_l3v4']
src_key, dst_key = 'srcip4', 'dstip4'
else:
+ ip = value['outer_l3v6']
src_key, dst_key = 'srcip6', 'dstip6'
+ srcip, srcmask = self._get_ip_and_mask(ip[src_key])
+ dstip, dstmask = self._get_ip_and_mask(ip[dst_key])
+
+ outer_l4 = value.get('outer_l4')
+ src_port, src_port_mask = self._get_fixed_and_mask(outer_l4['srcport'])
+ dst_port, dst_port_mask = self._get_fixed_and_mask(outer_l4['dstport'])
result[traffickey] = {
'bidir': False,
'id': port_id,
@@ -66,6 +88,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'outer_l2': {
'framesize': value['outer_l2']['framesize'],
'framesPerSecond': True,
+ 'QinQ': value['outer_l2'].get('QinQ'),
'srcmac': mac['src_mac_{}'.format(port_index)],
'dstmac': mac['dst_mac_{}'.format(port_index)],
},
@@ -73,12 +96,23 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
'count': ip['count'],
'dscp': ip['dscp'],
'ttl': ip['ttl'],
- src_key: ip[src_key].split("-")[0],
- dst_key: ip[dst_key].split("-")[0],
+ 'seed': ip['seed'],
+ 'srcip': srcip,
+ 'dstip': dstip,
+ 'srcmask': srcmask,
+ 'dstmask': dstmask,
'type': key,
'proto': ip['proto'],
},
- 'outer_l4': value['outer_l4'],
+ 'outer_l4': {
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4['count'],
+ 'seed': outer_l4['seed'],
+ }
+
}
except KeyError:
continue
@@ -86,8 +120,9 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
return result
def _ixia_traffic_generate(self, traffic, ixia_obj):
- ixia_obj.update_frame(traffic)
+ ixia_obj.update_frame(traffic, self.config.duration)
ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
ixia_obj.start_traffic()
def update_traffic_profile(self, traffic_generator):
@@ -123,11 +158,12 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
self._ixia_traffic_generate(traffic, ixia_obj)
return first_run
- def get_drop_percentage(self, samples, tol_min, tolerance, duration=30.0,
+ def get_drop_percentage(self, samples, tol_min, tolerance,
first_run=False):
completed = False
drop_percent = 100
num_ifaces = len(samples)
+ duration = self.config.duration
in_packets_sum = sum(
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
@@ -155,7 +191,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
completed = True if drop_percent <= tolerance else False
if (first_run and
self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
- self.rate = out_packets_sum / duration / num_ifaces
+ self.rate = float(out_packets_sum) / duration / num_ifaces
if drop_percent > tolerance:
self.max_rate = self.rate
diff --git a/yardstick/network_services/traffic_profile/pktgen.py b/yardstick/network_services/traffic_profile/pktgen.py
new file mode 100644
index 000000000..30f81b794
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/pktgen.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+class PktgenTrafficProfile(tp_base.TrafficProfile):
+ """This class handles Pktgen Trex Traffic profile execution"""
+
+ def __init__(self, tp_config): # pragma: no cover
+ super(PktgenTrafficProfile, self).__init__(tp_config)
+ self._host = None
+ self._port = None
+
+ def init(self, host, port): # pragma: no cover
+ """Initialize control parameters
+
+ :param host: (str) ip or host name
+ :param port: (int) TCP socket port number for Lua commands
+ """
+ self._host = host
+ self._port = port
+
+ def start(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.start("0")') != 0:
+ raise exceptions.PktgenActionError(action='start')
+
+ def stop(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.stop("0")') != 0:
+ raise exceptions.PktgenActionError(action='stop')
+
+ def rate(self, rate):
+ command = 'pktgen.set("0", "rate", ' + str(rate) + ')'
+ if utils.send_socket_command(self._host, self._port, command) != 0:
+ raise exceptions.PktgenActionError(action='rate')
+
+ def clear_all_stats(self):
+ if utils.send_socket_command(self._host, self._port, 'clr') != 0:
+ raise exceptions.PktgenActionError(action='clear all stats')
+
+ def help(self):
+ if utils.send_socket_command(self._host, self._port, 'help') != 0:
+ raise exceptions.PktgenActionError(action='help')
+
+ def execute_traffic(self, *args, **kwargs): # pragma: no cover
+ pass
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 9457096c8..506a880e0 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -88,11 +88,6 @@ class ProxBinSearchProfile(ProxProfile):
theor_max_thruput = actual_max_thruput = 0
result_samples = {}
- rate_samples = {}
- pos_retry = 0
- neg_retry = 0
- total_retry = 0
- ok_retry = 0
# Store one time only value in influxdb
single_samples = {
@@ -110,15 +105,11 @@ class ProxBinSearchProfile(ProxProfile):
"interface_speed_gbps", constants.NIC_GBPS_DEFAULT) * constants.ONE_GIGABIT_IN_BITS
ok_retry = traffic_gen.scenario_helper.scenario_cfg["runner"].get("confirmation", 0)
- for test_value in self.bounds_iterator(LOG):
+ for step_id, test_value in enumerate(self.bounds_iterator(LOG)):
pos_retry = 0
neg_retry = 0
total_retry = 0
- rate_samples["MAX_Rate"] = self.current_upper
- rate_samples["MIN_Rate"] = self.current_lower
- rate_samples["Test_Rate"] = test_value
- self.queue.put(rate_samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info("Checking MAX %s MIN %s TEST %s",
self.current_upper, self.lower_bound, test_value)
while (pos_retry <= ok_retry) and (neg_retry <= ok_retry):
@@ -188,6 +179,11 @@ class ProxBinSearchProfile(ProxProfile):
self.queue.put({'theor_max_throughput': theor_max_thruput})
LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples)
+ samples["MAX_Rate"] = self.current_upper
+ samples["MIN_Rate"] = self.current_lower
+ samples["Test_Rate"] = test_value
+ samples["Step_Id"] = step_id
+ samples["Confirmation_Retry"] = total_retry
self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT)
LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s",
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index c24e2f65a..b54fc575f 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -52,7 +52,7 @@ class PortPgIDMap(object):
self._port_pg_id_map[port] = []
def get_pg_ids(self, port):
- return self._port_pg_id_map.get(port)
+ return self._port_pg_id_map.get(port, [])
def increase_pg_id(self, port=None):
port = self._last_port if not port else port
@@ -70,7 +70,7 @@ class PortPgIDMap(object):
class RFC2544Profile(trex_traffic_profile.TrexProfile):
"""TRex RFC2544 traffic profile"""
- TOLERANCE_LIMIT = 0.05
+ TOLERANCE_LIMIT = 0.01
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
@@ -246,6 +246,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
def get_drop_percentage(self, samples, tol_low, tol_high,
correlated_traffic):
"""Calculate the drop percentage and run the traffic"""
+ completed = False
tx_rate_fps = 0
rx_rate_fps = 0
for sample in samples:
@@ -266,15 +267,15 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
drop_percent = round(
(float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
- tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ tol_high = max(tol_high, self.TOLERANCE_LIMIT)
+ tol_low = min(tol_low, self.TOLERANCE_LIMIT)
if drop_percent > tol_high:
self.max_rate = self.rate
elif drop_percent < tol_low:
self.min_rate = self.rate
- # else:
- # NOTE(ralonsoh): the test should finish here
- # pass
+ else:
+ completed = True
+
last_rate = self.rate
self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
@@ -295,4 +296,4 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
'Rate': last_rate,
'Latency': latency
}
- return output
+ return completed, output
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 8e9bc87e1..11a602472 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -197,7 +197,7 @@ class AclApproxSetupEnvSetupEnvHelper(DpdkVnfSetupEnvHelper):
# e.g.: {"fwd": {"port": 0}}
# format action CLI command and add it to the list
if action_name not in _action_template_map.keys():
- raise exceptions.AclUknownActionTemplate(
+ raise exceptions.AclUnknownActionTemplate(
action_name=action_name)
template = _action_template_map[action_name]
try:
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 3ef7c33c5..a09f2a7a9 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -426,7 +426,8 @@ class ClientResourceHelper(ResourceHelper):
iteration_index = 0
while self._terminated.value == 0:
iteration_index += 1
- self._run_traffic_once(traffic_profile)
+ if self._run_traffic_once(traffic_profile):
+ self._terminated.value = 1
mq_producer.tg_method_iteration(iteration_index)
self.client.stop(self.all_ports)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
new file mode 100644
index 000000000..9d452213f
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import multiprocessing
+import time
+import uuid
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenTrafficGen(vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint):
+ """DPDK Pktgen traffic generator
+
+ Website: http://pktgen-dpdk.readthedocs.io/en/latest/index.html
+ """
+
+ TIMEOUT = 30
+
+ def __init__(self, name, vnfd, task_id):
+ vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id)
+ self.queue = multiprocessing.Queue()
+ self._id = uuid.uuid1().int
+ self._mq_producer = self._setup_mq_producer(self._id)
+ vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id],
+ self.queue)
+ self._consumer = vnf_base.GenericVNFConsumer([task_id], self)
+ self._consumer.start_rpc_server()
+ self._traffic_profile = None
+ self._node_ip = vnfd['mgmt-interface'].get('ip')
+ self._lua_node_port = self._get_lua_node_port(
+ vnfd['mgmt-interface'].get('service_ports', []))
+ self._rate = 1
+
+ def instantiate(self, scenario_cfg, context_cfg): # pragma: no cover
+ pass
+
+ def run_traffic(self, traffic_profile):
+ self._traffic_profile = traffic_profile
+ self._traffic_profile.init(self._node_ip, self._lua_node_port)
+ utils.wait_until_true(self._is_running, timeout=self.TIMEOUT,
+ sleep=2)
+
+ def terminate(self): # pragma: no cover
+ pass
+
+ def collect_kpi(self): # pragma: no cover
+ pass
+
+ def scale(self, flavor=''): # pragma: no cover
+ pass
+
+ def wait_for_instantiate(self): # pragma: no cover
+ pass
+
+ def runner_method_start_iteration(self, ctxt, **kwargs):
+ # pragma: no cover
+ LOG.debug('Start method')
+ # NOTE(ralonsoh): 'rate' should be modified between iterations. The
+ # current implementation is just for testing.
+ self._rate += 1
+ self._traffic_profile.start()
+ self._traffic_profile.rate(self._rate)
+ time.sleep(4)
+ self._traffic_profile.stop()
+ self._mq_producer.tg_method_iteration(1, 1, {})
+
+ def runner_method_stop_iteration(self, ctxt, **kwargs): # pragma: no cover
+ # pragma: no cover
+ LOG.debug('Stop method')
+
+ @staticmethod
+ def _get_lua_node_port(service_ports):
+ for port in (port for port in service_ports if
+ int(port['port']) == constants.LUA_PORT):
+ return int(port['node_port'])
+ # NOTE(ralonsoh): in case LUA port is not present, an exception should
+ # be raised.
+
+ def _is_running(self):
+ try:
+ self._traffic_profile.help()
+ return True
+ except exceptions.PktgenActionError:
+ return False
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index cdbb41485..7ecb12478 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -45,11 +45,12 @@ class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
time.sleep(self.SAMPLING_PERIOD)
traffic_profile.stop_traffic(self)
- output = traffic_profile.get_drop_percentage(
+ completed, output = traffic_profile.get_drop_percentage(
samples, self.rfc2544_helper.tolerance_low,
self.rfc2544_helper.tolerance_high,
self.rfc2544_helper.correlated_traffic)
self._queue.put(output)
+ return completed
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index e0c0db262..99a5760a3 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -227,14 +227,10 @@ name (i.e. %s).
def add_volume_attachment(self, server_name, volume_name, mountpoint=None):
"""add to the template an association of volume to instance"""
- log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ", server_name,
- volume_name)
-
+ log.debug("adding Cinder::VolumeAttachment server '%s' volume '%s' ",
+ server_name, volume_name)
name = "%s-%s" % (server_name, volume_name)
-
- volume_id = op_utils.get_volume_id(volume_name)
- if not volume_id:
- volume_id = {'get_resource': volume_name}
+ volume_id = {'get_resource': volume_name}
self.resources[name] = {
'type': 'OS::Cinder::VolumeAttachment',
'properties': {'instance_uuid': {'get_resource': server_name},
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 7b1450230..b0b93a3c2 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -437,11 +437,11 @@ class NetworkObject(object):
def create(self):
k8s_utils.create_network(self.scope, self.group, self.version,
- self.plural, self.template)
+ self.plural, self.template, self._name)
def delete(self):
k8s_utils.delete_network(self.scope, self.group, self.version,
- self.plural, self._name)
+ self.plural, self._name, skip_codes=[404])
class KubernetesTemplate(object):
diff --git a/yardstick/service/environment.py b/yardstick/service/environment.py
index 324589f79..d910e31e9 100644
--- a/yardstick/service/environment.py
+++ b/yardstick/service/environment.py
@@ -36,7 +36,7 @@ class Environment(Service):
return self._format_sut_info(sut_info)
- def _load_pod_info(self):
+ def _load_pod_info(self): # pragma: no cover
if self.pod is None:
raise MissingPodInfoError
@@ -51,10 +51,10 @@ class Environment(Service):
except (ValueError, KeyError):
raise UnsupportedPodFormatError
- def _format_sut_info(self, sut_info):
+ def _format_sut_info(self, sut_info): # pragma: no cover
return {k: self._format_node_info(v) for k, v in sut_info.items()}
- def _format_node_info(self, node_info):
+ def _format_node_info(self, node_info): # pragma: no cover
info = []
facts = node_info.get('ansible_facts', {})
@@ -93,9 +93,9 @@ class Environment(Service):
return info
- def _get_interface_info(self, facts, name):
+ def _get_interface_info(self, facts, name): # pragma: no cover
mac = facts.get('ansible_{}'.format(name), {}).get('macaddress')
return [name, mac] if mac else []
- def _get_device_info(self, name, info):
+ def _get_device_info(self, name, info): # pragma: no cover
return ['disk_{}'.format(name), info.get('size')]
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index e6a26ab6b..8bdc32c7c 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -499,9 +499,10 @@ class AutoConnectSSH(SSH):
""" Don't close anything, just force creation of a new client """
self._client = False
- def execute(self, cmd, stdin=None, timeout=3600):
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
self._connect()
- return super(AutoConnectSSH, self).execute(cmd, stdin, timeout)
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600,
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 35236637d..e1414c2ae 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -9,11 +9,13 @@
import copy
import io
+import logging
import os
import sys
import mock
import six
+from six.moves import builtins
import unittest
import uuid
@@ -322,9 +324,9 @@ class TaskTestCase(unittest.TestCase):
actual_result = t._parse_options(options)
self.assertEqual(expected_result, actual_result)
- @mock.patch('six.moves.builtins.open', side_effect=mock.mock_open())
+ @mock.patch.object(builtins, 'open', side_effect=mock.mock_open())
@mock.patch.object(task, 'utils')
- @mock.patch('logging.root')
+ @mock.patch.object(logging, 'root')
def test_set_log(self, mock_logging_root, *args):
task_obj = task.Task()
task_obj.task_id = 'task_id'
@@ -561,7 +563,8 @@ key2:
mock_open.assert_has_calls([mock.call('args_file'),
mock.call('task_file')])
- def test__render_task_error_arguments(self):
+ @mock.patch.object(builtins, 'print')
+ def test__render_task_error_arguments(self, *args):
with self.assertRaises(exceptions.TaskRenderArgumentError):
task.TaskParser('task_file')._render_task('value1="var3"', None)
diff --git a/yardstick/tests/unit/benchmark/core/test_testcase.py b/yardstick/tests/unit/benchmark/core/test_testcase.py
index 119465887..077848d77 100644
--- a/yardstick/tests/unit/benchmark/core/test_testcase.py
+++ b/yardstick/tests/unit/benchmark/core/test_testcase.py
@@ -7,28 +7,28 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.testcase
-
-from __future__ import absolute_import
-import unittest
+import mock
+from six.moves import builtins
from yardstick.benchmark.core import testcase
+from yardstick.tests.unit import base as ut_base
class Arg(object):
def __init__(self):
- self.casename = ('opnfv_yardstick_tc001',)
+ self.casename = ('opnfv_yardstick_tc001', )
-class TestcaseUT(unittest.TestCase):
+class TestcaseTestCase(ut_base.BaseUnitTestCase):
def test_list_all(self):
t = testcase.Testcase()
result = t.list_all("")
self.assertIsInstance(result, list)
- def test_show(self):
+ @mock.patch.object(builtins, 'print')
+ def test_show(self, *args):
t = testcase.Testcase()
casename = Arg()
result = t.show(casename)
diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
new file mode 100644
index 000000000..7b1e1e976
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
@@ -0,0 +1,220 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+import time
+
+from yardstick.benchmark.runners import arithmetic
+
+
+class ArithmeticRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ def __init__(self):
+ self.count = 101
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ return self.count
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {
+ 'interval': 0,
+ 'iter_type': 'nested_for_loops',
+ 'iterators': [
+ {
+ 'name': 'stride',
+ 'start': 64,
+ 'stop': 128,
+ 'step': 64
+ },
+ {
+ 'name': 'size',
+ 'start': 500,
+ 'stop': 2000,
+ 'step': 500
+ }
+ ]
+ },
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_process_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = arithmetic.ArithmeticRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='Arithmetic-some_type-101',
+ target=arithmetic._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_nested_for_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 8)
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 8)
+ self.assertEqual(mock_time_sleep.call_count, 8)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_tuple_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 2)
+ self.assertEqual(self.benchmark.my_method.call_count, 2)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 2)
+ self.assertEqual(mock_time_sleep.call_count, 2)
+
+ def test__worker_process_stored_options_nested_for_loops(self):
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_stored_options_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 1000})
+
+ def test__worker_process_aborted_set_early(self):
+ aborted = multiprocessing.Event()
+ aborted.set()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ aborted, mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.scenario_cfg['options'], {})
+ self.benchmark.my_method.assert_not_called()
+
+ def test__worker_process_output_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103, 104, 105, 106, 107, 108, 109])
+
+ def test__worker_process_output_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103])
+
+ def test__worker_process_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+
+ def test__worker_process_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
diff --git a/yardstick/tests/unit/benchmark/runner/test_duration.py b/yardstick/tests/unit/benchmark/runner/test_duration.py
index 21e3874b8..d4801ef2c 100644
--- a/yardstick/tests/unit/benchmark/runner/test_duration.py
+++ b/yardstick/tests/unit/benchmark/runner/test_duration.py
@@ -11,17 +11,50 @@ import mock
import unittest
import multiprocessing
import os
+import time
from yardstick.benchmark.runners import duration
+from yardstick.common import exceptions as y_exc
class DurationRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
+ BROAD_EXCEPTION_SIDE_EFFECT = 2
+
+ def __init__(self, side_effect=0):
+ self.count = 101
+ self.side_effect = side_effect
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ if self.side_effect == self.SLA_VALIDATION_ERROR_SIDE_EFFECT:
+ raise y_exc.SLAValidationError(case_name='My Case',
+ error_msg='my error message')
+ elif self.side_effect == self.BROAD_EXCEPTION_SIDE_EFFECT:
+ raise y_exc.YardstickException
+ return self.count
+
def setUp(self):
self.scenario_cfg = {
'runner': {'interval': 0, "duration": 0},
'type': 'some_type'
}
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ def _assert_defaults__worker_run_one_iteration(self):
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
@mock.patch.object(os, 'getpid')
@mock.patch.object(multiprocessing, 'Process')
def test__run_benchmark_called_with(self, mock_multiprocessing_process,
@@ -37,3 +70,246 @@ class DurationRunnerTest(unittest.TestCase):
target=duration._worker_process,
args=(runner.result_queue, benchmark_cls, 'my_method',
self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ def test__worker_process_called_with_cfg(self):
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_called_with_cfg_loop(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.call_count, 2)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ def test__worker_process_called_without_cfg(self):
+ scenario_cfg = {'runner': {}}
+ aborted = multiprocessing.Event()
+ aborted.set()
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ scenario_cfg, {}, aborted, mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(1)
+ self.benchmark.my_method.assert_called_once_with({})
+ self.benchmark.post_run_wait_time.assert_called_once_with(1)
+ self.benchmark.teardown.assert_called_once()
+
+ def test__worker_process_output_queue(self):
+ self.benchmark.my_method = mock.Mock(return_value='my_result')
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+ self.assertEquals(output_queue.get(), 'my_result')
+
+ def test__worker_process_output_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.01
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 101
+ while not output_queue.empty():
+ count += 1
+ self.assertEquals(output_queue.get(), count)
+
+ def test__worker_process_queue(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_queue_multiple_iterations(self):
+ self.scenario_cfg['runner']['duration'] = 0.5
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.assertGreater(self.benchmark.pre_run_wait_time.call_count, 2)
+ self.assertGreater(self.benchmark.my_method.count, 103)
+ self.assertGreater(self.benchmark.post_run_wait_time.call_count, 2)
+
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+
+ def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
+ self.scenario_cfg['sla'] = {}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
+ self.scenario_cfg['sla'] = {'action': 'assert'}
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.SLAValidationError)
+
+ with self.assertRaises(y_exc.SLAValidationError):
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.my_method.assert_called_once_with({})
+
+ def test__worker_process_queue_on_sla_validation_error_monitor(self):
+ self.scenario_cfg['sla'] = {'action': 'monitor'}
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertEqual(result['errors'], ('My Case SLA validation failed. '
+ 'Error: my error message',))
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_broad_exception(self):
+ self.benchmark.my_method = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ duration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
+
+ def test__worker_process_queue_on_broad_exception(self):
+ self.benchmark.my_method = self.MyMethod(
+ side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ duration._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.1)
+
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self.benchmark.pre_run_wait_time.assert_called_once_with(0)
+ self.benchmark.post_run_wait_time.assert_called_once_with(0)
+
+ result = queue.get()
+ self.assertGreater(result['timestamp'], timestamp)
+ self.assertNotEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': 102})
+ self.assertEqual(result['sequence'], 1)
+
+ def test__worker_process_benchmark_teardown_on_broad_exception(self):
+ self.benchmark.teardown = mock.Mock(
+ side_effect=y_exc.YardstickException)
+
+ with self.assertRaises(SystemExit) as raised:
+ duration._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ self.assertEqual(raised.exception.code, 1)
+ self._assert_defaults__worker_run_setup_and_teardown()
+ self._assert_defaults__worker_run_one_iteration()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index d5c95a086..0f68753fd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -48,7 +48,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
host = {
"ipmi_ip": "10.20.0.5",
"ipmi_user": "root",
- "ipmi_pwd": "123456",
+ "ipmi_password": "123456",
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
@@ -77,7 +77,7 @@ class AttackerBaremetalTestCase(unittest.TestCase):
def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
self.attacker_cfg["jump_host"] = 'node1'
- self.context["node1"]["pwd"] = "123456"
+ self.context["node1"]["password"] = "123456"
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
index 7a2324b3d..b0488bacd 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
@@ -8,28 +8,56 @@
##############################################################################
import unittest
-from yardstick.benchmark.scenarios.lib.check_value import CheckValue
+from yardstick.benchmark.scenarios.lib import check_value
+from yardstick.common import exceptions as y_exc
+
class CheckValueTestCase(unittest.TestCase):
- def setUp(self):
- self.result = {}
+ def test_eq_pass(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_ne_pass(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertEqual({}, result)
+
+ def test_result(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 1},
+ 'output': 'foo'}
+ obj = check_value.CheckValue(scenario_cfg, {})
+ result = obj.run({})
+
+ self.assertDictEqual(result, {'foo': 'PASS'})
- def test_check_value_eq(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 2}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ def test_eq(self):
+ scenario_cfg = {'options': {'operator': 'eq',
+ 'value1': 1,
+ 'value2': 2}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_eq_pass(self):
- scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
- obj.run(self.result)
- self.assertEqual({}, self.result)
+ def test_ne(self):
+ scenario_cfg = {'options': {'operator': 'ne',
+ 'value1': 1,
+ 'value2': 1}}
+ obj = check_value.CheckValue(scenario_cfg, {})
- def test_check_value_ne(self):
- scenario_cfg = {'options': {'operator': 'ne', 'value1': 1, 'value2': 1}}
- obj = CheckValue(scenario_cfg, {})
- self.assertRaises(AssertionError, obj.run, self.result)
- self.assertEqual({}, self.result)
+ with self.assertRaises(y_exc.ValueCheckError):
+ result = obj.run({})
+ self.assertEqual({}, result)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 4016f5055..5761e2403 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -9,18 +9,22 @@
import mock
import unittest
+import logging
from oslo_serialization import jsonutils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen
from yardstick.common import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
+logging.disable(logging.CRITICAL)
+
+
class PktgenTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -33,636 +37,416 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
+ }
- def test_pktgen_successful_setup(self, mock_ssh):
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.setup()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().run.return_value = 0
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ self.addCleanup(self._stop_mock)
- def test_pktgen_successful_iptables_setup(self, mock_ssh):
+ self.scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ self.scenario.setup()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+
+ def test_setup_successful(self):
+ self.assertIsNotNone(self.scenario.server)
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
- p._iptables_setup()
+ def test_iptables_setup_successful(self):
+ self.scenario.number_of_ports = 10
+ self.scenario._iptables_setup()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo iptables -F; "
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
% 1010, timeout=60)
- def test_pktgen_unsuccessful_iptables_setup(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_setup)
+ def test_iptables_setup_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- def test_pktgen_successful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_setup()
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def test_iptables_get_result_successful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.return_value = (0, '150000', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '150000', '')
- result = p._iptables_get_result()
- expected_result = 150000
- self.assertEqual(result, expected_result)
+ result = self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(result, 150000)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo iptables -L INPUT -vnx |"
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
% 1010, raise_on_error=True)
- def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_iptables_get_result_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_get_result)
+ def test_run_successful_no_sla(self):
+ self.scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_successful_no_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
result = {}
+ self.scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_successful_sla(self, mock_ssh):
+ def test_run_successful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 10000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 10000}
- }
result = {}
+ scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_unsuccessful_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_run_unsuccessful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 1000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p._iptables_get_result = mock.Mock(return_value=149300)
+ def test_run_ssh_error_not_caught(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
- def test_pktgen_unsuccessful_script_error(self, mock_ssh):
+ def test_get_vnic_driver_name(self):
+ self.mock_SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
+ vnic_driver_name = self.scenario._get_vnic_driver_name()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p.run, result)
-
- def test_pktgen_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
-
- vnic_driver_name = p._get_vnic_driver_name()
self.assertEqual(vnic_driver_name, 'ixgbevf')
- def test_pktgen_unsuccessful_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._get_vnic_driver_name)
+ def test_get_vnic_driver_name_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_vnic_driver_name()
- p.queue_number = p._get_sriov_queue_number()
- self.assertEqual(p.queue_number, 2)
+ def test_get_sriov_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
- def test_pktgen_unsuccessful_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ self.scenario.queue_number = self.scenario._get_sriov_queue_number()
+ self.assertEqual(self.scenario.queue_number, 2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_sriov_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_sriov_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_sriov_queue_number()
- def test_pktgen_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_available_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
-
- self.assertEqual(p._get_available_queue_number(), 4)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_available_queue_number(), 4)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | head -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_available_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_available_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_available_queue_number()
- def test_pktgen_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_get_usable_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
- self.assertEqual(p._get_usable_queue_number(), 1)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_usable_queue_number(), 1)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | tail -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_usable_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_usable_queue_number()
- self.assertRaises(y_exc.SSHError, p._get_usable_queue_number)
+ def test_enable_ovs_multiqueue(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.mock_SSH.from_node().run.assert_has_calls(
+ (mock.call("sudo ethtool -L eth0 combined 4"),
+ mock.call("sudo ethtool -L eth0 combined 4")))
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ def test_enable_ovs_multiqueue_1q(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 4)
-
- def test_pktgen_enable_ovs_multiqueue_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.mock_SSH.from_node().run.assert_not_called()
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_enable_ovs_multiqueue_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
-
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_unsuccessful_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._enable_ovs_multiqueue()
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
-
- self.assertRaises(y_exc.SSHError, p._enable_ovs_multiqueue)
-
- def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
-
- p._setup_irqmapping_ovs(4)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 8 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_ovs(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 4)
+ def test_setup_irqmapping_ovs_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(4)
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 1)
-
- def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_setup_irqmapping_ovs_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_sriov(2)
+ def test_setup_irqmapping_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 2 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(1)
- p._setup_irqmapping_sriov(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 2)
+ def test_setup_irqmapping_sriov_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_sriov_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 1)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(1)
- def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
- result = p._is_irqbalance_disabled()
- self.assertFalse(result)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertFalse(self.scenario._is_irqbalance_disabled())
+ self.mock_SSH.from_node().execute.assert_called_with(
"grep ENABLED /etc/default/irqbalance", raise_on_error=True)
- def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._is_irqbalance_disabled)
-
- def test_pktgen_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().run.return_value = (0, '', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._is_irqbalance_disabled()
- p._disable_irqbalance()
+ def test_disable_irqbalance(self):
+ self.scenario._disable_irqbalance()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo service irqbalance disable")
- def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._disable_irqbalance)
-
- def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ def test_disable_irqbalance_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._disable_irqbalance()
- p.multiqueue_setup()
+ def test_multiqueue_setup_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- self.assertEqual(p.queue_number, 4)
+ self.scenario.multiqueue_setup()
- def test_pktgen_multiqueue_setup_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_multiqueue_setup_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.multiqueue_setup()
- p.multiqueue_setup()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- self.assertEqual(p.queue_number, 1)
+ def test_multiqueue_setup_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- def test_pktgen_multiqueue_setup_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.scenario.multiqueue_setup()
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ self.assertEqual(self.scenario.queue_number, 2)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ def test_multiqueue_setup_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p.multiqueue_setup()
+ self.scenario.multiqueue_setup()
- self.assertEqual(p.queue_number, 2)
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- def test_pktgen_multiqueue_setup_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
-
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
-
- p.multiqueue_setup()
-
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_run_with_setup_done(self, mock_ssh):
- args = {
+ def test_run_with_setup_done(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
- 'max_ppm': 1}}
- result = {}
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ 'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.server = self.mock_SSH.from_node()
+ scenario.client = self.mock_SSH.from_node()
+ scenario.setup_done = True
+ scenario.multiqueue_setup_done = True
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.setup_done = True
- p.multiqueue_setup_done = True
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_ovs_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_ovs_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
- p._enable_ovs_multiqueue = mock.Mock(return_value=4)
- p._setup_irqmapping_ovs = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ scenario._get_available_queue_number = mock.Mock(return_value=4)
+ scenario._enable_ovs_multiqueue = mock.Mock(return_value=4)
+ scenario._setup_irqmapping_ovs = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_sriov_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_sriov_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ scenario._get_sriov_queue_number = mock.Mock(return_value=2)
+ scenario._setup_irqmapping_sriov = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p._get_sriov_queue_number = mock.Mock(return_value=2)
- p._setup_irqmapping_sriov = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index bcd417830..70cd8ad40 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -9,16 +9,22 @@
import mock
import unittest
+import time
+import logging
import yardstick.common.utils as utils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
from yardstick.common import exceptions as y_exc
+logging.disable(logging.CRITICAL)
+
+
class PktgenDPDKLatencyTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -31,165 +37,100 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
-
- self._mock_ssh = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.ssh')
- self.mock_ssh = self._mock_ssh.start()
- self._mock_time = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.time')
- self.mock_time = self._mock_time.start()
-
- self.addCleanup(self._stop_mock)
-
- def _stop_mock(self):
- self._mock_ssh.stop()
- self._mock_time.stop()
-
- def test_pktgen_dpdk_successful_setup(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.setup()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- def test_pktgen_dpdk_successful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- utils.get_port_ip(p.server, "eth1")
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig eth1 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ")
-
- def test_pktgen_dpdk_unsuccessful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self._mock_utils_get_port_ip = mock.patch.object(utils, 'get_port_ip')
+ self.mock_utils_get_port_ip = self._mock_utils_get_port_ip.start()
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_ip, p.server, "eth1")
+ self._mock_utils_get_port_mac = mock.patch.object(utils,
+ 'get_port_mac')
+ self.mock_utils_get_port_mac = self._mock_utils_get_port_mac.start()
- def test_pktgen_dpdk_successful_get_port_mac(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.addCleanup(self._stop_mock)
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ self.scenario.server = self.mock_SSH.from_node()
+ self.scenario.client = self.mock_SSH.from_node()
- utils.get_port_mac(p.server, "eth1")
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_time_sleep.stop()
+ self._mock_utils_get_port_ip.stop()
+ self._mock_utils_get_port_mac.stop()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig |grep HWaddr |grep eth1 |awk '{print $5}' ")
+ def test_setup(self):
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ scenario.setup()
- def test_pktgen_dpdk_unsuccessful_get_port_mac(self):
+ self.assertIsNotNone(scenario.server)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
- args = {
- 'options': {'packetsize': 60},
- }
+ def test_run_get_port_ip_command(self):
+ self.scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.mock_utils_get_port_ip.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_mac, p.server, "eth1")
+ def test_get_port_mac_command(self):
+ self.scenario.run({})
- def test_pktgen_dpdk_successful_no_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
-
- result = {}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.mock_utils_get_port_mac.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens5'),
+ mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
+ def test_run_no_sla(self):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.run(result)
+ result = {}
+ self.scenario.run(result)
# with python 3 we get float, might be due python division changes
# AssertionError: {'avg_latency': 132.33333333333334} != {
# 'avg_latency': 132}
delta = result['avg_latency'] - 132
self.assertLessEqual(delta, 1)
- def test_pktgen_dpdk_successful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_sla(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n100\n100\n100\n100\n100\n100\n100\n100\n100\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
-
- self.assertEqual(result, {"avg_latency": 100})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_unsuccessful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
result = {}
+ scenario.run(result)
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.assertEqual(result, {"avg_latency": 100})
- p.server = self.mock_ssh.SSH.from_node()
- p.client = self.mock_ssh.SSH.from_node()
+ def test_run_sla_error(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_run_unsuccessful_get_port_mac(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
-
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, result)
-
- def test_pktgen_dpdk_run_unsuccessful_script_error(self):
- args = {
- 'options': {'packetsize': 60}
- }
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_last_command_raise_on_error(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.mock_ssh.SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- y_exc.SSHError)
- self.assertRaises(y_exc.SSHError, p.run, {})
- self.assertEqual(self.mock_ssh.SSH.from_node().execute.call_count, 6)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index cdb91f66d..49578b383 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -358,35 +358,49 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.assertIsNotNone(self.topology)
def test__get_ip_flow_range_string(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
result = '152.16.100.2-152.16.100.254'
self.assertEqual(result, self.s._get_ip_flow_range(
'152.16.100.2-152.16.100.254'))
- def test__get_ip_flow_range(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
- result = '152.16.100.2-152.16.100.254'
- self.assertEqual(result, self.s._get_ip_flow_range({"tg__1": 'xe0'}))
+ def test__get_ip_flow_range_no_nodes(self):
+ self.assertEqual('0.0.0.0', self.s._get_ip_flow_range({}))
- @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.ipaddress')
- def test__get_ip_flow_range_no_node_data(self, mock_ipaddress):
- scenario_cfg = deepcopy(self.scenario_cfg)
- scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
+ def test__get_ip_flow_range_no_node_data(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {}
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('0.0.0.2-0.0.0.254', result)
- mock_ipaddress.ip_network.return_value = ipaddr = mock.Mock()
- ipaddr.hosts.return_value = []
+ def test__et_ip_flow_range_ipv4(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15',
+ 'netmask': '255.255.255.128'}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.2-192.168.1.126', result)
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({"tg__2": 'xe0'})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv4_mask_30(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15', 'netmask': 30}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.15', result)
- def test__get_ip_flow_range_no_nodes(self):
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv6(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '2001::11', 'netmask': 64}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('2001::2-2001::ffff:ffff:ffff:fffe', result)
def test___get_traffic_flow(self):
self.scenario_cfg["traffic_options"]["flow"] = \
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index a606543e5..a1c27f5fb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -54,7 +54,8 @@ class VsperfTestCase(unittest.TestCase):
self._mock_SSH = mock.patch.object(ssh, 'SSH')
self.mock_SSH = self._mock_SSH.start()
- self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
@@ -104,40 +105,23 @@ class VsperfTestCase(unittest.TestCase):
def test_run_ok(self):
self.scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
def test_run_ok_setup_not_done(self):
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertTrue(self.scenario.setup_done)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (1, '', ''))
+ def test_run_ssh_command_call_counts(self):
+ self.scenario.run({})
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
-
- def test_run_failed_csv_report(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (1, '', ''))
-
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
- self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
+ self.mock_SSH.from_node().run.assert_called_once()
def test_run_sla_fail(self):
self.mock_SSH.from_node().execute.return_value = (
@@ -160,14 +144,21 @@ class VsperfTestCase(unittest.TestCase):
self.assertTrue('throughput_rx_fps was not collected by VSPERF'
in str(raised.exception))
+ def test_run_faulty_result_csv(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
+
def test_run_sla_fail_metric_not_defined_in_sla(self):
del self.scenario_cfg['sla']['throughput_rx_fps']
scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
with self.assertRaises(y_exc.SLAValidationError) as raised:
scenario.run({})
self.assertTrue('throughput_rx_fps is not defined in SLA'
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index b305fc93b..8bbe6911e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -116,15 +116,6 @@ class VsperfDPDKTestCase(unittest.TestCase):
self.assertTrue(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
- @mock.patch.object(time, 'sleep')
- def test_dpdk_setup_runtime_error(self, *args):
- self.assertIsNotNone(self.scenario.client)
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
- self.assertTrue(self.scenario.setup_done)
-
- self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
-
- @mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_run_ok(self, *args):
# run() specific mocks
@@ -135,17 +126,6 @@ class VsperfDPDKTestCase(unittest.TestCase):
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
-
- self.assertRaises(RuntimeError, self.scenario.run, {})
-
- def test_run_falied_csv_report(self):
- # run() specific mocks
- self.mock_ssh.from_node().execute.return_value = (1, '', '')
-
- self.assertRaises(RuntimeError, self.scenario.run, {})
-
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail(self, *args):
@@ -173,6 +153,20 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected_faulty_csv(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail_sla_not_defined(self, *args):
del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
self.scenario.setup()
diff --git a/yardstick/tests/unit/common/test_ansible_common.py b/yardstick/tests/unit/common/test_ansible_common.py
index 48d8a60c8..bf82f6288 100644
--- a/yardstick/tests/unit/common/test_ansible_common.py
+++ b/yardstick/tests/unit/common/test_ansible_common.py
@@ -12,28 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from __future__ import absolute_import
-
-import os
-import tempfile
+import collections
import shutil
-from collections import defaultdict
+import subprocess
+import tempfile
import mock
-import unittest
-
-from six.moves.configparser import ConfigParser
-from six.moves import StringIO
+from six import moves
+from six.moves import configparser
from yardstick.common import ansible_common
+from yardstick.tests.unit import base as ut_base
-PREFIX = 'yardstick.common.ansible_common'
+class OverwriteDictTestCase(ut_base.BaseUnitTestCase):
-class OverwriteDictTestCase(unittest.TestCase):
def test_overwrite_dict_cfg(self):
- c = ConfigParser(allow_no_value=True)
+ c = configparser.ConfigParser(allow_no_value=True)
d = {
"section_a": "empty_value",
"section_b": {"key_c": "Val_d", "key_d": "VAL_D"},
@@ -43,86 +38,78 @@ class OverwriteDictTestCase(unittest.TestCase):
# Python3 and Python2 convert empty values into None or ''
# we don't really care but we need to compare correctly for unittest
self.assertTrue(c.has_option("section_a", "empty_value"))
- self.assertEqual(sorted(c.items("section_b")), [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
+ self.assertEqual(sorted(c.items("section_b")),
+ [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
self.assertTrue(c.has_option("section_c", "key_c"))
self.assertTrue(c.has_option("section_c", "key_d"))
-class FilenameGeneratorTestCase(unittest.TestCase):
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
+class FilenameGeneratorTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
def test__handle_existing_file(self, _):
- ansible_common.FileNameGenerator._handle_existing_file("/dev/null")
+ ansible_common.FileNameGenerator._handle_existing_file('/dev/null')
def test_get_generator_from_file(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "", "")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', '')
def test_get_generator_from_file_middle(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "",
- "null")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', 'null')
def test_get_generator_from_file_prefix(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "null",
- "middle")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', 'null', 'middle')
-class AnsibleNodeTestCase(unittest.TestCase):
- def test_ansible_node(self):
- ansible_common.AnsibleNode()
+class AnsibleNodeTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_len(self):
- a = ansible_common.AnsibleNode()
- len(a)
+ self.assertEqual(0, len(ansible_common.AnsibleNode()))
def test_ansible_node_repr(self):
- a = ansible_common.AnsibleNode()
- repr(a)
+ self.assertEqual('AnsibleNode<{}>', repr(ansible_common.AnsibleNode()))
def test_ansible_node_iter(self):
- a = ansible_common.AnsibleNode()
- for _ in a:
- pass
+ node = ansible_common.AnsibleNode(data={'a': 1, 'b': 2, 'c': 3})
+ for key in node:
+ self.assertIn(key, ('a', 'b', 'c'))
def test_is_role(self):
- a = ansible_common.AnsibleNode()
- self.assertFalse(a.is_role("", default="foo"))
+ node = ansible_common.AnsibleNode()
+ self.assertFalse(node.is_role('', default='foo'))
def test_ansible_node_get_tuple(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertEqual(a.get_tuple(), ('name', a))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(node.get_tuple(), ('name', node))
def test_gen_inventory_line(self):
- a = ansible_common.AnsibleNode(defaultdict(str))
+ a = ansible_common.AnsibleNode(collections.defaultdict(str))
self.assertEqual(a.gen_inventory_line(), "")
def test_ansible_node_delitem(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- del a['name']
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(1, len(node))
+ del node['name']
+ self.assertEqual(0, len(node))
def test_ansible_node_getattr(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertIsNone(getattr(a, "nosuch", None))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertIsNone(getattr(node, 'nosuch', None))
-class AnsibleNodeDictTestCase(unittest.TestCase):
- def test_ansible_node_dict(self):
- n = ansible_common.AnsibleNode
- ansible_common.AnsibleNodeDict(n, {})
+class AnsibleNodeDictTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_dict_len(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- len(a)
+ self.assertEqual(0, len(a))
def test_ansible_node_dict_repr(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- repr(a)
-
- def test_ansible_node_dict_iter(self):
- n = ansible_common.AnsibleNode
- a = ansible_common.AnsibleNodeDict(n, {})
- for _ in a:
- pass
+ self.assertEqual('{}', repr(a))
def test_ansible_node_dict_get(self):
n = ansible_common.AnsibleNode
@@ -144,12 +131,15 @@ class AnsibleNodeDictTestCase(unittest.TestCase):
["name ansible_ssh_pass=PASS ansible_user=user"])
-class AnsibleCommonTestCase(unittest.TestCase):
- def test_get_timeouts(self):
- self.assertAlmostEqual(ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
+class AnsibleCommonTestCase(ut_base.BaseUnitTestCase):
- def test__init__(self):
- ansible_common.AnsibleCommon({})
+ @staticmethod
+ def _delete_tmpdir(dir):
+ shutil.rmtree(dir)
+
+ def test_get_timeouts(self):
+ self.assertAlmostEqual(
+ ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
def test_reset(self):
a = ansible_common.AnsibleCommon({})
@@ -184,81 +174,68 @@ class AnsibleCommonTestCase(unittest.TestCase):
a.deploy_dir = "d"
self.assertEqual(a.deploy_dir, "d")
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list(self, _):
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_inventory_file(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_inventory_file(self, *args):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon(nodes)
- a.gen_inventory_ini_dict()
- inv_context = a._gen_ansible_inventory_file(d)
- with inv_context:
- c = StringIO()
- inv_context.write_func(c)
- self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list_multiple(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon(nodes)
+ a.gen_inventory_ini_dict()
+ inv_context = a._gen_ansible_inventory_file(d)
+ with inv_context:
+ c = moves.StringIO()
+ inv_context.write_func(c)
+ self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list_multiple(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a", "b"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_do_install_tmp_dir(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a", "b"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(subprocess, 'Popen')
+ @mock.patch.object(moves.builtins, 'open')
+ def test_do_install_tmp_dir(self, _, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.do_install('', d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_execute_ansible_check(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.do_install('', d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ @mock.patch.object(subprocess, 'Popen')
+ def test_execute_ansible_check(self, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.execute_ansible('', d, ansible_check=True, verbose=True)
- finally:
- os.rmdir(d)
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.execute_ansible('', d, ansible_check=True, verbose=True)
def test_get_sut_info(self):
d = tempfile.mkdtemp()
a = ansible_common.AnsibleCommon({})
- try:
+ self.addCleanup(self._delete_tmpdir, d)
+ with mock.patch.object(a, '_exec_get_sut_info_cmd'):
a.get_sut_info(d)
- finally:
- shutil.rmtree(d)
def test_get_sut_info_not_exist(self):
a = ansible_common.AnsibleCommon({})
- try:
+ with self.assertRaises(OSError):
a.get_sut_info('/hello/world')
- except OSError:
- pass
diff --git a/yardstick/tests/unit/common/test_kubernetes_utils.py b/yardstick/tests/unit/common/test_kubernetes_utils.py
index 42aa9f7e0..ba6b5f388 100644
--- a/yardstick/tests/unit/common/test_kubernetes_utils.py
+++ b/yardstick/tests/unit/common/test_kubernetes_utils.py
@@ -176,7 +176,7 @@ class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
kubernetes_utils.get_custom_resource_definition('kind')
-class CreateNetworkTestCase(base.BaseUnitTestCase):
+class GetNetworkTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
def test_execute_correct(self, mock_get_api):
mock_api = mock.Mock()
@@ -184,29 +184,97 @@ class CreateNetworkTestCase(base.BaseUnitTestCase):
group = 'group.com'
version = mock.Mock()
plural = 'networks'
+ name = 'net_one'
+
+ kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ mock_api.get_cluster_custom_object.assert_called_once_with(
+ group, version, plural, name)
+
+ mock_api.reset_mock()
+ kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ mock_api.get_namespaced_custom_object.assert_called_once_with(
+ group, version, 'default', plural, name)
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.get_cluster_custom_object.side_effect = rest.ApiException(404)
+ mock_api.get_namespaced_custom_object.side_effect = rest.ApiException(404)
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ name = 'net_one'
+
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+ mock_api.reset_mock()
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+
+class CreateNetworkTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_correct(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
body = mock.Mock()
+ name = 'net_one'
kubernetes_utils.create_network(
- constants.SCOPE_CLUSTER, group, version, plural, body)
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
mock_api.create_cluster_custom_object.assert_called_once_with(
group, version, plural, body)
mock_api.reset_mock()
kubernetes_utils.create_network(
- constants.SCOPE_NAMESPACED, group, version, plural, body)
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
mock_api.create_namespaced_custom_object.assert_called_once_with(
group, version, 'default', plural, body)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_network_already_created(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = mock.Mock
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ body = mock.Mock()
+ name = 'net_one'
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
+ mock_api.create_cluster_custom_object.assert_not_called()
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
+ mock_api.create_namespaced_custom_object.assert_not_called()
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
- def test_execute_exception(self, mock_get_api):
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_exception(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
mock_api = mock.Mock()
mock_api.create_cluster_custom_object.side_effect = rest.ApiException
mock_get_api.return_value = mock_api
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.create_network(
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
- mock.ANY)
+ mock.ANY, mock.ANY)
class DeleteNetworkTestCase(base.BaseUnitTestCase):
@@ -240,6 +308,19 @@ class DeleteNetworkTestCase(base.BaseUnitTestCase):
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
mock.ANY)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_cluster_custom_object.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_network(
+ constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
class DeletePodTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_core_api')
@@ -260,8 +341,9 @@ class DeletePodTestCase(base.BaseUnitTestCase):
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+ @mock.patch.object(kubernetes_utils, 'LOG')
@mock.patch.object(kubernetes_utils, 'get_core_api')
- def test_execute_skip_exception(self, mock_get_api):
+ def test_execute_skip_exception(self, mock_get_api, *args):
mock_api = mock.Mock()
mock_api.delete_namespaced_pod.side_effect = rest.ApiException(status=404)
@@ -289,12 +371,77 @@ class DeleteServiceTestCase(base.BaseUnitTestCase):
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
- @mock.patch.object(kubernetes_utils, 'get_core_api')
@mock.patch.object(kubernetes_utils, 'LOG')
- def test_execute_skip_exception(self, mock_log, mock_get_api):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_skip_exception(self, mock_get_api, *args):
mock_api = mock.Mock()
mock_api.delete_namespaced_service.side_effect = rest.ApiException(status=404)
mock_get_api.return_value = mock_api
kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
+
+
+class DeleteReplicationControllerTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(
+ "name", "default", body=None)
+
+ mock_api.delete_namespaced_replication_controller.assert_called_once_with(
+ "name", "default", None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=200)
+ )
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=404)
+ )
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
+
+class DeleteConfigMapTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map("name", body=None)
+ mock_api.delete_namespaced_config_map.assert_called_once_with(
+ "name", "default", None
+ )
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
mock_log.info.assert_called_once()
diff --git a/yardstick/tests/unit/common/test_template_format.py b/yardstick/tests/unit/common/test_template_format.py
index 56253efbc..6e4827e16 100644
--- a/yardstick/tests/unit/common/test_template_format.py
+++ b/yardstick/tests/unit/common/test_template_format.py
@@ -22,16 +22,21 @@ from yardstick.common import template_format
class TemplateFormatTestCase(unittest.TestCase):
- def test_parse_to_value_exception(self):
+ def test_parse_scanner(self):
- # TODO(elfoley): Don't hide the error that occurs in
- # template_format.parse
- # TODO(elfoley): Separate these tests; one per error type
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.scanner.ScannerError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_parser(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = yaml.parser.ParserError()
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
+
+ def test_parse_reader(self):
+
+ with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = \
yaml.reader.ReaderError('', '', '', '', '')
self.assertRaises(ValueError, template_format.parse, 'FOOBAR')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 446afdd38..ef4142148 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -12,12 +12,14 @@ import errno
import importlib
import ipaddress
from itertools import product, chain
-import mock
import os
-import six
-from six.moves import configparser
import socket
import time
+import threading
+
+import mock
+import six
+from six.moves import configparser
import unittest
import yardstick
@@ -194,6 +196,14 @@ class TestMacAddressToHex(ut_base.BaseUnitTestCase):
self.assertEqual(utils.mac_address_to_hex_list("ea:3e:e1:9a:99:e8"),
['0xea', '0x3e', '0xe1', '0x9a', '0x99', '0xe8'])
+ def test_mac_address_to_hex_list_too_short_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("ea:3e:e1:9a")
+
+ def test_mac_address_to_hex_list_no_int_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("invalid_mac")
+
class TranslateToStrTestCase(ut_base.BaseUnitTestCase):
@@ -1113,6 +1123,19 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
u'123:4567:89ab:cdef:123:4567:89ab:cdef/129',
]
+ def test_make_ipv4_address(self):
+ for addr in self.GOOD_IP_V4_ADDRESS_STR_LIST:
+ # test with no mask
+ expected = ipaddress.IPv4Address(addr)
+ self.assertEqual(utils.make_ipv4_address(addr), expected, addr)
+
+ def test_make_ipv4_address_error(self):
+ addr_list = self.INVALID_IP_ADDRESS_STR_LIST +\
+ self.GOOD_IP_V6_ADDRESS_STR_LIST
+ for addr in addr_list:
+ self.assertRaises(Exception, utils.make_ipv4_address, addr)
+
+
def test_safe_ip_address(self):
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
for addr in addr_list:
@@ -1196,6 +1219,20 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
for value in chain(value_iter, self.INVALID_IP_ADDRESS_STR_LIST):
self.assertEqual(utils.ip_to_hex(value), value)
+ def test_get_mask_from_ip_range_ipv4(self):
+ ip_str = '1.1.1.1'
+ for mask in range(8, 30):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
+ def test_get_mask_from_ip_range_ipv6(self):
+ ip_str = '2001::1'
+ for mask in range(8, 120):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
class SafeDecodeUtf8TestCase(ut_base.BaseUnitTestCase):
@@ -1263,6 +1300,10 @@ class TimerTestCase(ut_base.BaseUnitTestCase):
time.sleep(1.1)
self.assertEqual(2, len(iterations))
+ def test_delta_time_sec(self):
+ with utils.Timer() as timer:
+ self.assertIsInstance(timer.delta_time_sec(), float)
+
class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
@@ -1284,6 +1325,15 @@ class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
utils.wait_until_true(lambda: False, timeout=1, sleep=1,
exception=MyTimeoutException))
+ def _run_thread(self):
+ with self.assertRaises(exceptions.WaitTimeout):
+ utils.wait_until_true(lambda: False, timeout=1, sleep=1)
+
+ def test_timeout_no_main_thread(self):
+ new_thread = threading.Thread(target=self._run_thread)
+ new_thread.start()
+ new_thread.join(timeout=3)
+
class SendSocketCommandTestCase(unittest.TestCase):
@@ -1309,3 +1359,35 @@ class SendSocketCommandTestCase(unittest.TestCase):
mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
mock_socket_obj.close.assert_called_once()
+
+
+class GetPortMacTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_mac(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig |grep HWaddr |grep 99 |awk '{print $5}' ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_mac(self.ssh_client, 99))
+
+
+class GetPortIPTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_ip(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig 99 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index afa4cc3dc..e078d70ad 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -16,6 +16,8 @@ import mock
import IxNetwork
import unittest
+from copy import deepcopy
+
from yardstick.common import exceptions
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
@@ -31,34 +33,27 @@ TRAFFIC_PARAMETERS = {
'rate': 10000.5,
'rate_unit': 'fps',
'outer_l2': {
- 'framesize': {'64B': '25', '256B': '75'}
+ 'framesize': {'64B': '25', '256B': '75'},
+ 'QinQ': None
},
'outer_l3': {
'count': 512,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.40.20',
'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'dscp': 0,
- 'dstip4': '152.16.40.20',
- 'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '152.16.40.20',
+ 'srcip': '152.16.100.20',
+ 'dstmask': 24,
+ 'srcmask': 24
},
'outer_l4': {
- 'dstport': '2001',
- 'srcport': '1234'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 2001,
+ 'srcport': 1234,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
},
@@ -69,35 +64,27 @@ TRAFFIC_PARAMETERS = {
'rate': 75.2,
'rate_unit': '%',
'outer_l2': {
- 'framesize': {'128B': '35', '1024B': '65'}
+ 'framesize': {'128B': '35', '1024B': '65'},
+ 'QinQ': None
},
'outer_l3': {
'count': 1024,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.100.20',
'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '2001::10',
+ 'srcip': '2021::10',
+ 'dstmask': 64,
+ 'srcmask': 64
},
'outer_l4': {
- 'dstport': '1234',
- 'srcport': '2001'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 1234,
+ 'srcport': 2001,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
}
@@ -110,6 +97,8 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet = mock.Mock()
self.ixnet.execute = mock.Mock()
self.ixnet.getRoot.return_value = 'my_root'
+ self.ixnet_gen = ixnet_api.IxNextgen()
+ self.ixnet_gen._ixnet = self.ixnet
def test_get_config(self):
tg_cfg = {
@@ -147,64 +136,50 @@ class TestIxNextgen(unittest.TestCase):
self.assertEqual(result, expected)
def test__get_config_element_by_flow_group_name(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertEqual('traffic_item/configElement:flow_group_01', output)
def test__get_config_element_by_flow_group_name_no_match(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertIsNone(output)
def test__get_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
+ self.ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name') as \
+ self.ixnet_gen, '_get_config_element_by_flow_group_name') as \
mock_get_cfg_element:
mock_get_cfg_element.return_value = 'cfg_element'
- output = ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
+ output = self.ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
self.assertEqual(['tcp1', 'tcp2'], output)
def test__get_stack_item_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
+ self.ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
def test__get_field_in_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
- output = ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ output = self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
self.assertEqual('field2', output)
def test__get_field_in_stack_item_no_field_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
with self.assertRaises(exceptions.IxNetworkFieldNotPresentInStackItem):
- ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
+ self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
def test__parse_framesize(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
framesize = {'64B': '75', '512b': '25'}
- output = ixnet_gen._parse_framesize(framesize)
+ output = self.ixnet_gen._parse_framesize(framesize)
self.assertEqual(2, len(output))
self.assertIn([64, 64, 75], output)
self.assertIn([512, 512, 25], output)
@@ -212,55 +187,44 @@ class TestIxNextgen(unittest.TestCase):
@mock.patch.object(IxNetwork, 'IxNet')
def test_connect(self, mock_ixnet):
mock_ixnet.return_value = self.ixnet
- ixnet_gen = ixnet_api.IxNextgen()
- with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ with mock.patch.object(self.ixnet_gen, 'get_config') as mock_config:
mock_config.return_value = {'machine': 'machine_fake',
'port': 'port_fake',
'version': 12345}
- ixnet_gen.connect(mock.ANY)
+ self.ixnet_gen.connect(mock.ANY)
self.ixnet.connect.assert_called_once_with(
'machine_fake', '-port', 'port_fake', '-version', '12345')
mock_config.assert_called_once()
def test_connect_invalid_config_no_machine(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'port': 'port_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_port(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_version(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'port': 'port_fake'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet_gen.get_config = mock.Mock(return_value={})
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_clear_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.clear_config()
+ self.ixnet_gen.clear_config()
self.ixnet.execute.assert_called_once_with('newConfig')
@mock.patch.object(ixnet_api, 'log')
@@ -270,11 +234,9 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['2', '2']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
+ self.ixnet_gen._cfg = config
- self.assertIsNone(ixnet_gen.assign_ports())
+ self.assertIsNone(self.ixnet_gen.assign_ports())
self.assertEqual(self.ixnet.execute.call_count, 2)
self.assertEqual(self.ixnet.commit.call_count, 4)
self.assertEqual(self.ixnet.getAttribute.call_count, 2)
@@ -286,25 +248,19 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['3', '4']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
- ixnet_gen.assign_ports()
+ self.ixnet_gen._cfg = config
+ self.ixnet_gen.assign_ports()
mock_log.warning.assert_called()
def test_assign_ports_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = {}
- self.assertRaises(KeyError, ixnet_gen.assign_ports)
+ self.ixnet_gen._cfg = {}
+ self.assertRaises(KeyError, self.ixnet_gen.assign_ports)
def test__create_traffic_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
self.ixnet.add.return_value = 'my_new_traffic_item'
self.ixnet.remapIds.return_value = ['my_traffic_item_id']
- ixnet_gen._create_traffic_item()
+ self.ixnet_gen._create_traffic_item()
self.ixnet.add.assert_called_once_with(
'my_root/traffic', 'trafficItem')
self.ixnet.setMultiAttribute.assert_called_once_with(
@@ -315,41 +271,35 @@ class TestIxNextgen(unittest.TestCase):
'-trackBy', 'trafficGroupId0')
def test__create_flow_groups(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
- ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
- ixnet_gen._create_flow_groups()
- ixnet_gen.ixnet.add.assert_has_calls([
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
+ self.ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
+ self.ixnet_gen._create_flow_groups()
+ self.ixnet_gen.ixnet.add.assert_has_calls([
mock.call('traffic_item', 'endpointSet'),
mock.call('traffic_item', 'endpointSet')])
- ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
mock.call('endp1', '-name', '1', '-sources', ['1/protocols'],
'-destinations', ['2/protocols']),
mock.call('endp2', '-name', '2', '-sources', ['2/protocols'],
'-destinations', ['1/protocols'])])
def test__append_protocol_to_stack(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
+ self.ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
self.ixnet.execute.assert_called_with(
'append', 'prev_element',
'my_root/traffic/protocolTemplate:"my_protocol"')
def test__setup_config_elements(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
['cfg_element']]
- with mock.patch.object(ixnet_gen, '_append_procotol_to_stack') as \
+ with mock.patch.object(self.ixnet_gen, '_append_procotol_to_stack') as \
mock_append_proto:
- ixnet_gen._setup_config_elements()
+ self.ixnet_gen._setup_config_elements()
mock_append_proto.assert_has_calls([
mock.call(ixnet_api.PROTO_UDP, 'cfg_element/stack:"ethernet-1"'),
mock.call(ixnet_api.PROTO_IPV4, 'cfg_element/stack:"ethernet-1"')])
- ixnet_gen.ixnet.setAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setAttribute.assert_has_calls([
mock.call('cfg_element/frameRateDistribution', '-portDistribution',
'splitRateEvenly'),
mock.call('cfg_element/frameRateDistribution',
@@ -361,156 +311,214 @@ class TestIxNextgen(unittest.TestCase):
def test_create_traffic_model(self, mock__setup_config_elements,
mock__create_flow_groups,
mock__create_traffic_item):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.create_traffic_model()
+ self.ixnet_gen.create_traffic_model()
mock__create_traffic_item.assert_called_once()
mock__create_flow_groups.assert_called_once()
mock__setup_config_elements.assert_called_once()
def test__update_frame_mac(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item') as \
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item') as \
mock_get_field:
mock_get_field.return_value = 'field_descriptor'
- ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
+ self.ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
mock_get_field.assert_called_once_with('ethernet_descriptor', 'field')
- ixnet_gen.ixnet.setMultiAttribute(
+ self.ixnet_gen.ixnet.setMultiAttribute(
'field_descriptor', '-singleValue', 'mac', '-fieldValue', 'mac',
'-valueType', 'singleValue')
- ixnet_gen.ixnet.commit.assert_called_once()
+ self.ixnet_gen.ixnet.commit.assert_called_once()
def test_update_frame(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value='cfg_element'), \
- mock.patch.object(ixnet_gen, '_update_frame_mac') as \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac') as \
mock_update_frame, \
- mock.patch.object(ixnet_gen, '_get_stack_item') as \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item') as \
mock_get_stack_item:
mock_get_stack_item.side_effect = [['item1'], ['item2'],
['item3'], ['item4']]
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 50)
- self.assertEqual(6, len(ixnet_gen.ixnet.setMultiAttribute.mock_calls))
+ self.assertEqual(6, len(self.ixnet_gen.ixnet.setMultiAttribute.mock_calls))
self.assertEqual(4, len(mock_update_frame.mock_calls))
- ixnet_gen.ixnet.setMultiAttribute.assert_has_calls(
- [mock.call('cfg_element/frameRate', '-rate', 10000.5,
- '-type', 'framesPerSecond'),
- mock.call('cfg_element/frameRate', '-rate', 75.2, '-type',
- 'percentLineRate')],
- any_order=True)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('cfg_element/transmissionControl',
+ '-type', 'continuous', '-duration', 50)
+ ])
+
+ def test_update_frame_qinq(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='cfg_element'), \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac'),\
+ mock.patch.object(self.ixnet_gen, '_get_stack_item',
+ return_value='item'), \
+ mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field'):
+
+ traffic_parameters = deepcopy(TRAFFIC_PARAMETERS)
+ traffic_parameters[UPLINK]['outer_l2']['QinQ'] = {
+ 'S-VLAN': {'id': 128,
+ 'priority': 1,
+ 'cfi': 0},
+ 'C-VLAN': {'id': 512,
+ 'priority': 0,
+ 'cfi': 2}
+ }
+
+ self.ixnet_gen.update_frame(traffic_parameters, 50)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('field', '-auto', 'false', '-singleValue', '0x88a8',
+ '-fieldValue', '0x88a8', '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 1,
+ '-fieldValue', 1, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 128,
+ '-fieldValue', 128, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 512,
+ '-fieldValue', 512, '-valueType', 'singleValue'),
+ mock.call('field', '-auto', 'false', '-singleValue', 2,
+ '-fieldValue', 2, '-valueType', 'singleValue')
+ ], any_order=True)
def test_update_frame_flow_not_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 40)
def test_get_statistics(self):
- ixnet_gen = ixnet_api.IxNextgen()
port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
- with mock.patch.object(ixnet_gen, '_build_stats_map') as \
+ with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \
mock_build_stats:
- ixnet_gen.get_statistics()
+ self.ixnet_gen.get_statistics()
mock_build_stats.assert_has_calls([
- mock.call(port_statistics, ixnet_gen.PORT_STATS_NAME_MAP),
- mock.call(flow_statistics, ixnet_gen.LATENCY_NAME_MAP)])
+ mock.call(port_statistics, self.ixnet_gen.PORT_STATS_NAME_MAP),
+ mock.call(flow_statistics, self.ixnet_gen.LATENCY_NAME_MAP)])
def test__update_ipv4_address(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item',
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
return_value='field_desc'):
- ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
- 100, '255.255.255.0', 25)
- ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ self.ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
+ 100, 26, 25)
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
'field_desc', '-seed', 100, '-fixedBits', '192.168.1.1',
- '-randomMask', '255.255.255.0', '-valueType', 'random',
+ '-randomMask', '0.0.0.63', '-valueType', 'random',
'-countValue', 25)
+ def test__update_udp_port(self):
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_udp_port(mock.ANY, mock.ANY, 1234,
+ 2, 0, 2)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-auto', 'false',
+ '-seed', 1,
+ '-fixedBits', 1234,
+ '-randomMask', 0,
+ '-valueType', 'random',
+ '-countValue', 1)
+
def test_update_ip_packet(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_update_ipv4_address') as \
+ with mock.patch.object(self.ixnet_gen, '_update_ipv4_address') as \
mock_update_add, \
- mock.patch.object(ixnet_gen, '_get_stack_item'), \
- mock.patch.object(ixnet_gen,
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name', return_value='celm'):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
self.assertEqual(4, len(mock_update_add.mock_calls))
def test_update_ip_packet_exception_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen,
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+
+ def test_update_l4(self):
+ with mock.patch.object(self.ixnet_gen, '_update_udp_port') as \
+ mock_update_udp, \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name', return_value='celm'):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(4, len(mock_update_udp.mock_calls))
+
+ def test_update_l4_exception_no_config_element(self):
+ with mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ def test_update_l4_exception_no_supported_proto(self):
+ traffic_parameters = {
+ UPLINK: {
+ 'id': 1,
+ 'outer_l3': {
+ 'proto': 'unsupported',
+ },
+ },
+ }
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='celm'):
+ with self.assertRaises(exceptions.IXIAUnsupportedProtocol):
+ self.ixnet_gen.update_l4(traffic_parameters)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic(self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'started', 'started', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_traffic_running(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_to_stop(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'started', 'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_start(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'stopped', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 2a366fc94..0dc3e0579 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -80,12 +80,17 @@ class TrafficProfileConfigTestCase(unittest.TestCase):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual({'64B': 100}, tp_config_obj.packet_sizes)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
+ tp_config_obj.duration)
+
+ def test__init_set_duration(self):
+ tp_config = {'traffic_profile': {'duration': 15}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual(base.TrafficProfileConfig.DEFAULT_SCHEMA,
tp_config_obj.schema)
self.assertEqual(float(base.TrafficProfileConfig.DEFAULT_FRAME_RATE),
tp_config_obj.frame_rate)
- self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
- tp_config_obj.duration)
+ self.assertEqual(15, tp_config_obj.duration)
def test__parse_rate(self):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 4ea19a121..6f76eb77c 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -446,6 +446,38 @@ class TestIXIARFC2544Profile(unittest.TestCase):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(t_profile_data)
self.assertEqual(12345678, r_f_c2544_profile.rate)
+ def test__get_ip_and_mask_range(self):
+ ip_range = '1.2.0.2-1.2.255.254'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('1.2.0.2', ip)
+ self.assertEqual(16, mask)
+
+ def test__get_ip_and_mask_single(self):
+ ip_range = '192.168.1.10'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('192.168.1.10', ip)
+ self.assertIsNone(mask)
+
+ def test__get_fixed_and_mask_range(self):
+ fixed_mask = '8-48'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(8, fixed)
+ self.assertEqual(48, mask)
+
+ def test__get_fixed_and_mask_single(self):
+ fixed_mask = 1234
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(1234, fixed)
+ self.assertEqual(0, mask)
+
def test__get_ixia_traffic_profile_default_args(self):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
self.TRAFFIC_PROFILE)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
new file mode 100644
index 000000000..08542b4f1
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class TestIXIARFC2544Profile(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._tp_config = {'traffic_profile': {}}
+ self._host = 'localhost'
+ self._port = '12345'
+ self.tp = pktgen.PktgenTrafficProfile(self._tp_config)
+ self.tp.init(self._host, self._port)
+ self._mock_send_socket_command = mock.patch.object(
+ utils, 'send_socket_command', return_value=0)
+ self.mock_send_socket_command = self._mock_send_socket_command.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_send_socket_command.stop()
+
+ def test_start(self):
+ self.tp.start()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.start("0")')
+
+ def test_stop(self):
+ self.tp.stop()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.stop("0")')
+
+ def test_rate(self):
+ rate = 75
+ self.tp.rate(rate)
+ command = 'pktgen.set("0", "rate", 75)'
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, command)
+
+ def test_clear_all_stats(self):
+ self.tp.clear_all_stats()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'clr')
+
+ def test_help(self):
+ self.tp.help()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'help')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index 0cf93f9ae..2e0331e8e 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -238,15 +238,17 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
'in_packets': 4040,
'latency': 'Latency2'}}
]
- output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ completed, output = rfc2544_profile.get_drop_percentage(
+ samples, 0, 0, False)
expected = {'DropPercentage': 0.3963,
'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
'RxThroughput': 312.5,
'TxThroughput': 304.5,
'CurrentDropPercentage': 0.3963,
- 'Rate': 100,
+ 'Rate': 100.0,
'Throughput': 312.5}
self.assertEqual(expected, output)
+ self.assertFalse(completed)
class PortPgIDMapTestCase(base.BaseUnitTestCase):
@@ -266,6 +268,7 @@ class PortPgIDMapTestCase(base.BaseUnitTestCase):
port_pg_id_map.increase_pg_id()
self.assertEqual([1, 2], port_pg_id_map.get_pg_ids(10))
self.assertEqual([3], port_pg_id_map.get_pg_ids(20))
+ self.assertEqual([], port_pg_id_map.get_pg_ids(30))
def test_increase_pg_id_no_port(self):
port_pg_id_map = rfc2544.PortPgIDMap()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index 01fc19aa0..69a5fb484 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -450,8 +450,8 @@ class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
# duplicate config and add invald action
acl_config = copy.deepcopy(self.ACL_CONFIG)
acl_config['access-list-entries'][0]["actions"].append({"xnat": {}})
- self.assertRaises(exceptions.AclUknownActionTemplate,
- setup_helper.get_flows_config, acl_config)
+ self.assertRaises(exceptions.AclUnknownActionTemplate,
+ setup_helper.get_flows_config, acl_config)
@mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
def test_get_flows_config_invalid_action_param(self, get_default_flows):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index c35d2db35..4a1d8c30e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1091,7 +1091,8 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertIs(client_resource_helper._connect(client), client)
@mock.patch.object(ClientResourceHelper, '_build_ports')
- @mock.patch.object(ClientResourceHelper, '_run_traffic_once')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once',
+ return_value=(True, mock.ANY))
def test_run_traffic(self, mock_run_traffic_once, mock_build_ports):
client_resource_helper = ClientResourceHelper(mock.Mock())
client = mock.Mock()
@@ -1103,7 +1104,7 @@ class TestClientResourceHelper(unittest.TestCase):
as mock_terminated:
mock_connect.return_value = client
type(mock_terminated).value = mock.PropertyMock(
- side_effect=[0, 1, lambda x: x])
+ side_effect=[0, 1, 1, lambda x: x])
client_resource_helper.run_traffic(traffic_profile, mq_producer)
mock_build_ports.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
new file mode 100644
index 000000000..d341b970b
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+
+import mock
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+from yardstick.network_services.vnf_generic.vnf import tg_pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class PktgenTrafficGenTestCase(ut_base.BaseUnitTestCase):
+
+ SERVICE_PORTS = [{'port': constants.LUA_PORT,
+ 'node_port': '34501'}]
+ VNFD = {'mgmt-interface': {'ip': '1.2.3.4',
+ 'service_ports': SERVICE_PORTS},
+ 'vdu': [{'external-interface': 'interface'}],
+ 'benchmark': {'kpi': 'fake_kpi'}
+ }
+
+ def setUp(self):
+ self._id = uuid.uuid1().int
+ self._mock_vnf_consumer = mock.patch.object(vnf_base,
+ 'GenericVNFConsumer')
+ self.mock_vnf_consumer = self._mock_vnf_consumer.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_vnf_consumer.stop()
+
+ def test__init(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ self.assertTrue(isinstance(tg, (vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint)))
+
+ def test_run_traffic(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ mock_tp = mock.Mock()
+ with mock.patch.object(tg, '_is_running', return_value=True):
+ tg.run_traffic(mock_tp)
+
+ mock_tp.init.assert_called_once_with(tg._node_ip, tg._lua_node_port)
+
+ def test__get_lua_node_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': constants.LUA_PORT,
+ 'node_port': '12345'}]
+ self.assertEqual(12345, tg._get_lua_node_port(service_ports))
+
+ def test__get_lua_node_port_no_lua_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': '333'}]
+ self.assertIsNone(tg._get_lua_node_port(service_ports))
+
+ def test__is_running(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile'):
+ self.assertTrue(tg._is_running())
+
+ def test__is_running_exception(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile') as mock_tp:
+ mock_tp.help.side_effect = exceptions.PktgenActionError()
+ self.assertFalse(tg._is_running())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 6aba41006..a5b9f258e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -30,13 +30,14 @@ class TestTrexRfcResouceHelper(unittest.TestCase):
mock_traffic_profile.config.duration = 3
mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
'port_pg_id_map')
- mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ mock_traffic_profile.get_drop_percentage.return_value = (True,
+ 'percentage')
rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
rfc_rh.TRANSIENT_PERIOD = 0
rfc_rh.rfc2544_helper = mock.Mock()
with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
- rfc_rh._run_traffic_once(mock_traffic_profile)
+ self.assertTrue(rfc_rh._run_traffic_once(mock_traffic_profile))
mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)
diff --git a/yardstick/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py
index 5a6f8c6a0..2d5c4a26f 100644
--- a/yardstick/tests/unit/orchestrator/test_kubernetes.py
+++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py
@@ -529,9 +529,10 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._version = 'version'
net_obj._plural = 'plural'
net_obj._template = 'template'
+ net_obj._name = 'fake_name'
net_obj.create()
mock_create_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'template')
+ 'scope', 'group', 'version', 'plural', 'template', 'fake_name')
@mock.patch.object(kubernetes_utils, 'delete_network')
def test_delete(self, mock_delete_network):
@@ -543,7 +544,7 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._name = 'name'
net_obj.delete()
mock_delete_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'name')
+ 'scope', 'group', 'version', 'plural', 'name', skip_codes=[404])
class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
diff --git a/yardstick/tests/unit/service/test_environment.py b/yardstick/tests/unit/service/test_environment.py
index be4882e30..779e6eaa0 100644
--- a/yardstick/tests/unit/service/test_environment.py
+++ b/yardstick/tests/unit/service/test_environment.py
@@ -9,9 +9,8 @@
import mock
-from yardstick.common.exceptions import UnsupportedPodFormatError
-from yardstick.service.environment import Environment
-from yardstick.service.environment import AnsibleCommon
+from yardstick.common import exceptions
+from yardstick.service import environment
from yardstick.tests.unit import base as ut_base
@@ -31,15 +30,17 @@ class EnvironmentTestCase(ut_base.BaseUnitTestCase):
]
}
- with mock.patch.object(AnsibleCommon, 'gen_inventory_ini_dict'), \
- mock.patch.object(AnsibleCommon, 'get_sut_info',
- return_value={'node1': {}}):
- env = Environment(pod=pod_info)
+ with mock.patch.object(environment.AnsibleCommon,
+ 'gen_inventory_ini_dict'), \
+ mock.patch.object(environment.AnsibleCommon, 'get_sut_info',
+ return_value={'node1': {}}), \
+ mock.patch.object(environment.Environment, '_format_sut_info'):
+ env = environment.Environment(pod=pod_info)
env.get_sut_info()
def test_get_sut_info_pod_str(self):
pod_info = 'nodes'
- env = Environment(pod=pod_info)
- with self.assertRaises(UnsupportedPodFormatError):
+ env = environment.Environment(pod=pod_info)
+ with self.assertRaises(exceptions.UnsupportedPodFormatError):
env.get_sut_info()
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index b727e821d..71929f1a2 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -617,3 +617,26 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put_file('a', 'b')
mock_put_sftp.assert_called_once()
+
+ def test_execute(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(return_value=0)
+ exit_code, _, _ = auto_connect_ssh.execute('')
+ self.assertEqual(exit_code, 0)
+
+ def _mock_run(self, *args, **kwargs):
+ if args[0] == 'ls':
+ if kwargs.get('raise_on_error'):
+ raise exceptions.SSHError(error_msg='Command error')
+ return 1
+ return 0
+
+ def test_execute_command_error(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(side_effect=self._mock_run)
+ self.assertRaises(exceptions.SSHError, auto_connect_ssh.execute, 'ls',
+ raise_on_error=True)
+ exit_code, _, _ = auto_connect_ssh.execute('ls')
+ self.assertNotEqual(exit_code, 0)