aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/roles/install_yardstick/tasks/main.yml6
-rw-r--r--dashboard/opnfv_yardstick_tc058.json2
-rw-r--r--docker/k8s/Dockerfile39
-rwxr-xr-xdocs/testing/developer/devguide/devguide.rst26
-rw-r--r--samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml39
-rw-r--r--samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml171
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml8
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml9
-rw-r--r--samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml21
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml47
-rwxr-xr-xtests/ci/load_images.sh16
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py4
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py50
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py34
-rw-r--r--yardstick/common/ansible_common.py36
-rw-r--r--yardstick/common/constants.py1
-rw-r--r--yardstick/common/exceptions.py20
-rw-r--r--yardstick/common/kubernetes_utils.py85
-rw-r--r--yardstick/common/utils.py53
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py98
-rw-r--r--yardstick/network_services/pipeline.py11
-rw-r--r--yardstick/network_services/traffic_profile/__init__.py1
-rw-r--r--yardstick/network_services/traffic_profile/base.py32
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py83
-rw-r--r--yardstick/network_services/traffic_profile/pktgen.py61
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_pktgen.py103
-rw-r--r--yardstick/orchestrator/kubernetes.py8
-rw-r--r--yardstick/service/environment.py10
-rw-r--r--yardstick/ssh.py5
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py9
-rw-r--r--yardstick/tests/unit/benchmark/core/test_testcase.py14
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_arithmetic.py220
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py764
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py197
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py56
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py39
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py146
-rw-r--r--yardstick/tests/unit/common/test_ansible_common.py205
-rw-r--r--yardstick/tests/unit/common/test_kubernetes_utils.py207
-rw-r--r--yardstick/tests/unit/common/test_utils.py88
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py352
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py30
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py40
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py63
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py1
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py4
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py79
-rw-r--r--yardstick/tests/unit/orchestrator/test_kubernetes.py8
-rw-r--r--yardstick/tests/unit/service/test_environment.py19
-rw-r--r--yardstick/tests/unit/test_ssh.py23
54 files changed, 2362 insertions, 1310 deletions
diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml
index 973b2b027..203acc3e5 100644
--- a/ansible/roles/install_yardstick/tasks/main.yml
+++ b/ansible/roles/install_yardstick/tasks/main.yml
@@ -41,7 +41,7 @@
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
virtualenv: "{{ yardstick_dir }}/virtualenv"
- async: 300
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == True
@@ -49,7 +49,7 @@
- name: Install Yardstick requirements
pip:
requirements: "{{ yardstick_dir }}/requirements.txt"
- async: 300
+ async: 900
poll: 0
register: pip_installer
when: virtual_environment == False
@@ -59,7 +59,7 @@
jid: "{{ pip_installer.ansible_job_id }}"
register: job_result
until: job_result.finished
- retries: 100
+ retries: 180
- name: Install Yardstick code (venv)
pip:
diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json
index 55b5a5f33..ed2a1750c 100644
--- a/dashboard/opnfv_yardstick_tc058.json
+++ b/dashboard/opnfv_yardstick_tc058.json
@@ -6,7 +6,7 @@
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
- "id": 33,
+ "id": null,
"links": [],
"refresh": "1m",
"rows": [
diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile
new file mode 100644
index 000000000..2f8d9b161
--- /dev/null
+++ b/docker/k8s/Dockerfile
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+FROM ubuntu:16.04
+
+LABEL image=opnfv/yardstick-image-k8s
+
+ARG BRANCH=master
+
+# GIT repo directory
+ENV CLONE_DEST="/opt/tempT"
+
+RUN apt-get update && apt-get install -y \
+ git bc bonnie++ fio gcc iperf3 ethtool \
+ iproute2 linux-tools-common linux-tools-generic \
+ lmbench make netperf patch perl rt-tests stress \
+ sysstat iputils-ping openssh-server sudo && \
+ apt-get -y autoremove && apt-get clean
+
+RUN rm -rf -- ${CLONE_DEST}
+RUN git clone https://github.com/kdlucas/byte-unixbench.git ${CLONE_DEST}
+RUN mkdir -p ${CLONE_DEST}/UnixBench/
+
+RUN git clone https://github.com/beefyamoeba5/ramspeed.git ${CLONE_DEST}/RAMspeed
+WORKDIR ${CLONE_DEST}/RAMspeed/ramspeed-2.6.0
+RUN mkdir -p ${CLONE_DEST}/RAMspeed/ramspeed-2.6.0/temp
+RUN bash build.sh
+
+RUN git clone https://github.com/beefyamoeba5/cachestat.git ${CLONE_DEST}/Cachestat
+
+WORKDIR /
+
+CMD /bin/bash
diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst
index dbe92b846..91f2c2148 100755
--- a/docs/testing/developer/devguide/devguide.rst
+++ b/docs/testing/developer/devguide/devguide.rst
@@ -540,6 +540,32 @@ The final step consists in pushing the newly modified commit to Gerrit::
git review
+Backporting changes to stable branches
+--------------------------------------
+During the release cycle, when master and the ``stable/<release>`` branch have
+diverged, it may be necessary to backport (cherry-pick) changes top the
+``stable/<release>`` branch once they have merged to master.
+These changes should be identified by the committers reviewing the patch.
+Changes should be backported **as soon as possible** after merging of the
+original code.
+
+..note::
+ Besides the commit and review process below, the Jira tick must be updated to
+ add dual release versions and indicate that the change is to be backported.
+
+The process for backporting is as follows:
+
+* Committer A merges a change to master (process for normal changes).
+* Committer A cherry-picks the change to ``stable/<release>`` branch (if the
+ bug has been identified for backporting).
+* The original author should review the code and verify that it still works
+ (and give a ``+1``).
+* Committer B reviews the change, gives a ``+2`` and merges to
+ ``stable/<release>``.
+
+A backported change needs a ``+1`` and a ``+2`` from a committer who didn’t
+propose the change (i.e. minimum 3 people involved).
+
Plugins
-------
diff --git a/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
new file mode 100644
index 000000000..81323e71c
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/cmts-tg-topology.yaml
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+nsd:nsd-catalog:
+ nsd:
+ - id: cmts-tg-topology
+ name: cmts-tg-topology
+ short-name: cmts-tg-topology
+ description: cmts-tg-topology
+ constituent-vnfd:
+ - member-vnf-index: '1'
+ vnfd-id-ref: tg__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+ - member-vnf-index: '2'
+ vnfd-id-ref: vnf__0
+ VNF model: ../../vnf_descriptors/tg_pktgen.yaml
+
+ vld: []
+# - id: uplink
+# name: tg__0 to vnf__0 link 1
+# type: ELAN
+# vnfd-connection-point-ref:
+# - member-vnf-index-ref: '1'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: tg__0
+# - member-vnf-index-ref: '2'
+# vnfd-connection-point-ref: sriov01
+# vnfd-id-ref: vnf__0
diff --git a/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
new file mode 100644
index 000000000..cab8bb885
--- /dev/null
+++ b/samples/vnf_samples/nsut/cmts/tc_k8s_pktgen_01.yaml
@@ -0,0 +1,171 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/pktgen_throughput.yaml
+ topology: cmts-tg-topology.yaml
+ nodes:
+ tg__0: trafficgen-k8syardstick
+ vnf__0: vnf-k8syardstick
+ options: {}
+ runner:
+ type: IterationIPC
+ iterations: 10
+ interval: 15
+ timeout: 10000
+context:
+ name: k8syardstick
+ type: Kubernetes
+
+ servers:
+ vnf:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ trafficgen:
+ containers:
+ - image: si-docker.ir.intel.com/vcmts-ubuntu/vcmts-pktgen-uepi
+ args: ["/opt/bin/cmk isolate --conf-dir=/etc/cmk --socket-id=0 --pool=dataplane /vcmts/setup.sh anga_mac_1_ds.pcap ds"]
+ env:
+ - name: LUA_PATH
+ value: "/vcmts/Pktgen.lua"
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ resources:
+ requests:
+ pod.alpha.kubernetes.io/opaque-int-resource-cmk: "1"
+ ports:
+ - containerPort: 22022
+ volumeMounts:
+ - name: hugepages
+ mountPath: /dev/hugepages
+ - name: sysfs
+ mountPath: /sys
+ - name: sriov
+ mountPath: /sriov-cni
+ - name: host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+ - name: cmk-conf-dir
+ mountPath: /etc/cmk
+ securityContext:
+ allowPrivilegeEscalation: true
+ privileged: true
+
+ node_ports:
+ - name: lua # Lower case alphanumeric characters or '-'
+ port: 22022
+ networks:
+ - flannel
+ - sriov01
+ volumes:
+ - name: hugepages
+ hostPath:
+ path: /dev/hugepages
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: sriov
+ hostPath:
+ path: /var/lib/cni/sriov
+ - name: cmk-install-dir
+ hostPath:
+ path: /opt/bin
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: cmk-conf-dir
+ hostPath:
+ path: /etc/cmk
+
+ networks:
+ flannel:
+ args: '[{ "delegate": { "isDefaultGateway": true }}]'
+ plugin: flannel
+ sriov01:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
+ sriov02:
+ plugin: sriov
+ args: '[{"if0": "ens802f0",
+ "if0name": "net0",
+ "dpdk": {
+ "kernel_driver": "i40evf",
+ "dpdk_driver": "igb_uio",
+ "dpdk_tool": "/opt/nsb_bin/dpdk-devbind.py"}
+ }]'
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index b34672907..507491446 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -52,12 +52,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -83,12 +85,14 @@ downlink_0:
dstip4: "{{get(flow, 'flow.public_ip_0', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -111,12 +115,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -142,9 +148,11 @@ downlink_1:
dstip4: "{{get(flow, 'flow.public_ip_1', '90.90.1.1-90.105.255.255') }}"
{% endif %}
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
index 513aefb40..3cbd7cd62 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
@@ -50,12 +50,14 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
ipv4:
id: 2
@@ -76,12 +78,14 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_0', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_0', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
ipv4:
id: 3
@@ -102,12 +106,14 @@ uplink_1:
srcip4: "{{get(flow, 'flow.src_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.src_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
ipv4:
id: 4
@@ -128,9 +134,11 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '1.1.1.1-1.15.255.255') }}"
dstip4: "{{get(flow, 'flow.public_ip_1', '10.0.2.1-10.0.2.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 0
outer_l4:
srcport: "{{get(flow, 'flow.dst_port_1', '1234') }}"
dstport: "{{get(flow, 'flow.src_port_1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index aad751549..edff3612e 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -72,6 +72,7 @@ uplink_0:
srcip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -79,6 +80,7 @@ uplink_0:
srcport: "{{get(flow, 'flow.src_port_0', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_0:
id: 2
ipv4:
@@ -97,6 +99,7 @@ downlink_0:
srcip4: "{{get(flow, 'flow.dst_ip_0', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_0', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -104,6 +107,7 @@ downlink_0:
srcport: "{{get(flow, 'flow.dst_port_0', '0') }}"
dstport: "{{get(flow, 'flow.src_port_0', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
uplink_1:
id: 3
ipv4:
@@ -131,6 +135,8 @@ uplink_1:
proto: "tcp"
srcip4: "{{get(flow, 'flow.srcip_1', '192.168.0.0-192.168.255.255') }}"
dstip4: "{{get(flow, 'flow.dstip_1', '192.16.0.0-192.16.0.31') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -138,6 +144,7 @@ uplink_1:
srcport: "{{get(flow, 'flow.src_port_1', '0') }}"
dstport: "{{get(flow, 'flow.dst_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
downlink_1:
id: 4
ipv4:
@@ -156,6 +163,7 @@ downlink_1:
srcip4: "{{get(flow, 'flow.dst_ip_1', '192.16.0.0-192.16.0.31') }}"
dstip4: "{{get(flow, 'flow.src_ip_1', '192.168.0.0-192.168.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
ttl: 32
dscp: 32
@@ -163,3 +171,4 @@ downlink_1:
srcport: "{{get(flow, 'flow.dst_port_1', '0') }}"
dstport: "{{get(flow, 'flow.src_port_1', '0') }}"
count: "{{get(flow, 'flow.count', '1') }}"
+ seed: "{{get(flow, 'flow.seed', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
new file mode 100644
index 000000000..e222e1d8c
--- /dev/null
+++ b/samples/vnf_samples/traffic_profiles/pktgen_throughput.yaml
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+schema: "nsb:traffic_profile:0.1"
+
+name: pktgen
+description: Traffic profile to run throughput tests
+traffic_profile:
+ traffic_type: PktgenTrafficProfile
+ duration: 15
diff --git a/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
new file mode 100644
index 000000000..17e631652
--- /dev/null
+++ b/samples/vnf_samples/vnf_descriptors/tg_pktgen.yaml
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: PktgenTrafficGen # NSB class mapping
+ name: pktgen_tg
+ short-name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+ mgmt-interface:
+ vdu-id: pktgen
+ {% if ip is defined %}
+ ip: '{{ ip }}'
+ {% endif %}
+ {% if service_ports is defined and service_ports %}
+ service_ports:
+ {% for port in service_ports %}
+ - port: "{{ port['port']|int }}"
+ node_port: "{{ port['node_port']|int }}"
+ target_port: "{{ port['target_port']|int }}"
+ {% endfor %}
+ {% endif %}
+
+ vdu:
+ - id: pktgen_tg
+ name: pktgen_tg
+ description: Pktgen DPDK traffic generator
+
+ benchmark:
+ kpi:
+ - rx_throughput_fps
+ - tx_throughput_fps
+ - tx_throughput_mbps
+ - rx_throughput_mbps
+ - in_packets
+ - out_packets
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 1e1591ce3..7a86abb4e 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -29,11 +29,6 @@ if [ "${INSTALLER_TYPE}" == 'fuel' ]; then
fi
export YARD_IMG_ARCH
-HW_FW_TYPE=""
-if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
- HW_FW_TYPE=uefi
-fi
-export HW_FW_TYPE
UCA_HOST="cloud-images.ubuntu.com"
if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
@@ -104,18 +99,12 @@ load_yardstick_image()
echo
echo "========== Loading yardstick cloud image =========="
EXTRA_PARAMS=""
- if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
- EXTRA_PARAMS="--property hw_video_model=vga"
- fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
output=$(eval openstack ${SECURE} image create \
@@ -165,7 +154,7 @@ load_cirros_image()
if [[ "${YARD_IMG_ARCH}" == "arm64" ]]; then
CIRROS_IMAGE_VERSION="cirros-d161201"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-d161201-aarch64-disk.img"
- EXTRA_PARAMS="--property hw_video_model=vga --property short_id=ubuntu16.04"
+ EXTRA_PARAMS="--property short_id=ubuntu16.04"
else
CIRROS_IMAGE_VERSION="cirros-0.3.5"
CIRROS_IMAGE_PATH="/home/opnfv/images/cirros-0.3.5-x86_64-disk.img"
@@ -184,9 +173,6 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- if [[ -n "${HW_FW_TYPE}" ]]; then
- EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
- fi
output=$(openstack ${SECURE} image create \
--disk-format qcow2 \
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 7534c4ea5..e1553c72b 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -110,7 +110,7 @@ class KubernetesContext(ctx_base.Context):
self._delete_rc(rc)
def _delete_rc(self, rc):
- k8s_utils.delete_replication_controller(rc)
+ k8s_utils.delete_replication_controller(rc, skip_codes=[404])
def _delete_pods(self):
for pod in self.template.pods:
@@ -159,7 +159,7 @@ class KubernetesContext(ctx_base.Context):
k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
def _delete_ssh_key(self):
- k8s_utils.delete_config_map(self.ssh_key)
+ k8s_utils.delete_config_map(self.ssh_key, skip_codes=[404])
utils.remove_file(self.key_path)
utils.remove_file(self.public_key_path)
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 7a11d3e76..10f10d4e6 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -64,37 +64,36 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._mq_ids = []
def _get_ip_flow_range(self, ip_start_range):
+ """Retrieve a CIDR first and last viable IPs
- # IP range is specified as 'x.x.x.x-y.y.y.y'
+ :param ip_start_range: could be the IP range itself or a dictionary
+ with the host name and the port.
+ :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
+ """
if isinstance(ip_start_range, six.string_types):
return ip_start_range
- node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+ node_name, range_or_interface = next(iter(ip_start_range.items()),
+ (None, '0.0.0.0'))
if node_name is None:
- # we are manually specifying the range
- ip_addr_range = range_or_interface
+ return range_or_interface
+
+ node = self.context_cfg['nodes'].get(node_name, {})
+ interface = node.get('interfaces', {}).get(range_or_interface)
+ if interface:
+ ip = interface['local_ip']
+ mask = interface['netmask']
else:
- node = self.context_cfg["nodes"].get(node_name, {})
- try:
- # the ip_range is the interface name
- interface = node.get("interfaces", {})[range_or_interface]
- except KeyError:
- ip = "0.0.0.0"
- mask = "255.255.255.0"
- else:
- ip = interface["local_ip"]
- # we can't default these values, they must both exist to be valid
- mask = interface["netmask"]
-
- ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
- hosts = list(ipaddr.hosts())
- if len(hosts) > 2:
- # skip the first host in case of gateway
- ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
- else:
- LOG.warning("Only single IP in range %s", ipaddr)
- # fall back to single IP range
- ip_addr_range = ip
+ ip = '0.0.0.0'
+ mask = '255.255.255.0'
+
+ ipaddr = ipaddress.ip_network(
+ six.text_type('{}/{}'.format(ip, mask)), strict=False)
+ if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
+ ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
+ else:
+ LOG.warning('Only single IP in range %s', ipaddr)
+ ip_addr_range = ip
return ip_addr_range
def _get_traffic_flow(self):
@@ -119,6 +118,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
flow["dst_port_{}".format(index)] = dst_port
flow["count"] = fflow["count"]
+ flow["seed"] = fflow["seed"]
except KeyError:
flow = {}
return {"flow": flow}
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 2b3474070..8344b1595 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -193,22 +193,19 @@ class Vsperf(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
- reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
# sla check; go through all defined SLAs and check if values measured
# by VSPERF are higher then those defined by SLAs
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 27bf40dcb..d5c8a3bfe 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -205,22 +205,17 @@ class VsperfDPDK(base.Scenario):
self.client.send_command(cmd)
else:
cmd = "cat ~/.testpmd.macaddr.port1"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port1_mac = stdout
+
cmd = "cat ~/.testpmd.macaddr.port2"
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
self.tgen_port2_mac = stdout
cmd = "screen -d -m sudo -E bash ~/testpmd_vsperf.sh %s %s" % \
(self.moongen_port1_mac, self.moongen_port2_mac)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
time.sleep(1)
@@ -245,7 +240,7 @@ class VsperfDPDK(base.Scenario):
self.setup()
# remove results from previous tests
- self.client.execute("rm -rf /tmp/results*")
+ self.client.run("rm -rf /tmp/results*", raise_on_error=False)
# get vsperf options
options = self.scenario_cfg['options']
@@ -291,9 +286,7 @@ class VsperfDPDK(base.Scenario):
cmd = "sshpass -p yardstick ssh-copy-id -o StrictHostKeyChecking=no " \
"root@%s -p 22" % (self.moongen_host_ip)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# execute vsperf
cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf ; "
@@ -302,22 +295,19 @@ class VsperfDPDK(base.Scenario):
cmd += "--conf-file ~/vsperf.conf "
cmd += "--test-params=\"%s\"" % (';'.join(test_params))
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ self.client.run(cmd)
# get test results
cmd = "cat /tmp/results*/result.csv"
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
-
- if status:
- raise RuntimeError(stderr)
+ _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
# convert result.csv to JSON format
reader = csv.DictReader(stdout.split('\r\n'))
- result.update(next(reader))
+ try:
+ result.update(next(reader))
+ except StopIteration:
+ pass
result['nrFlows'] = multistream
# sla check; go through all defined SLAs and check if values measured
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index ca5a110e2..dee7044a5 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
import cgitb
import collections
import contextlib as cl
@@ -23,11 +21,11 @@ import os
from collections import Mapping, MutableMapping, Iterable, Callable, deque
from functools import partial
from itertools import chain
-from subprocess import CalledProcessError, Popen, PIPE
-from tempfile import NamedTemporaryFile
+import subprocess
+import tempfile
import six
-import six.moves.configparser as ConfigParser
+from six.moves import configparser
import yaml
from six import StringIO
from chainmap import ChainMap
@@ -134,10 +132,9 @@ class CustomTemporaryFile(object):
else:
self.data_types = self.DEFAULT_DATA_TYPES
# must open "w+" so unicode is encoded correctly
- self.creator = partial(NamedTemporaryFile, mode="w+", delete=False,
- dir=directory,
- prefix=prefix,
- suffix=self.suffix)
+ self.creator = partial(
+ tempfile.NamedTemporaryFile, mode="w+", delete=False,
+ dir=directory, prefix=prefix, suffix=self.suffix)
def make_context(self, data, write_func, descriptor='data'):
return TempfileContext(data, write_func, descriptor, self.data_types,
@@ -191,8 +188,8 @@ class FileNameGenerator(object):
if not prefix.endswith('_'):
prefix += '_'
- temp_file = NamedTemporaryFile(delete=False, dir=directory,
- prefix=prefix, suffix=suffix)
+ temp_file = tempfile.NamedTemporaryFile(delete=False, dir=directory,
+ prefix=prefix, suffix=suffix)
with cl.closing(temp_file):
return temp_file.name
@@ -474,7 +471,7 @@ class AnsibleCommon(object):
prefix = '_'.join([self.prefix, prefix, 'inventory'])
ini_temp_file = IniMapTemporaryFile(directory=directory, prefix=prefix)
- inventory_config = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory_config = configparser.ConfigParser(allow_no_value=True)
# disable default lowercasing
inventory_config.optionxform = str
return ini_temp_file.make_context(self.inventory_dict, write_func,
@@ -510,7 +507,7 @@ class AnsibleCommon(object):
return timeout
def _generate_ansible_cfg(self, directory):
- parser = ConfigParser.ConfigParser()
+ parser = configparser.ConfigParser()
parser.add_section('defaults')
parser.set('defaults', 'host_key_checking', 'False')
@@ -541,12 +538,12 @@ class AnsibleCommon(object):
cmd = ['ansible', 'all', '-m', 'setup', '-i',
inventory_path, '--tree', sut_dir]
- proc = Popen(cmd, stdout=PIPE, cwd=directory)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=directory)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
def _gen_sut_info_dict(self, sut_dir):
sut_info = {}
@@ -617,12 +614,13 @@ class AnsibleCommon(object):
# 'timeout': timeout / 2,
})
with Timer() as timer:
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
timeout -= timer.total_seconds()
cmd.remove("--syntax-check")
@@ -632,10 +630,10 @@ class AnsibleCommon(object):
# TODO: add timeout support of use subprocess32 backport
# 'timeout': timeout,
})
- proc = Popen(cmd, stdout=PIPE, **exec_args)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, **exec_args)
output, _ = proc.communicate()
retcode = proc.wait()
LOG.debug("exit status = %s", retcode)
if retcode != 0:
- raise CalledProcessError(retcode, cmd, output)
+ raise subprocess.CalledProcessError(retcode, cmd, output)
return output
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 4ed40f8af..3d775d48e 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -175,3 +175,4 @@ SCOPE_CLUSTER = 'Cluster'
# VNF definition
SSH_PORT = 22
+LUA_PORT = 22022
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index cbb294989..9fbe19949 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -85,17 +85,19 @@ class InfluxDBConfigurationMissing(YardstickException):
class YardstickBannedModuleImported(YardstickException):
- # pragma: no cover
message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
+class IXIAUnsupportedProtocol(YardstickException):
+ message = 'Protocol "%(protocol)" is not supported in IXIA'
+
+
class PayloadMissingAttributes(YardstickException):
message = ('Error instantiating a Payload class, missing attributes: '
'%(missing_attributes)s')
class HeatTemplateError(YardstickException):
- """Error in Heat during the stack deployment"""
message = ('Error in Heat during the creation of the OpenStack stack '
'"%(stack_name)s"')
@@ -108,6 +110,10 @@ class TrafficProfileNotImplemented(YardstickException):
message = 'No implementation for traffic profile %(profile_class)s.'
+class TrafficProfileRate(YardstickException):
+ message = 'Traffic profile rate must be "<number>[fps|%]"'
+
+
class DPDKSetupDriverError(YardstickException):
message = '"igb_uio" driver is not loaded'
@@ -210,6 +216,10 @@ class WaitTimeout(YardstickException):
message = 'Wait timeout while waiting for condition'
+class PktgenActionError(YardstickException):
+ message = 'Error in "%(action)s" action'
+
+
class KubernetesApiException(YardstickException):
message = ('Kubernetes API errors. Action: %(action)s, '
'resource: %(resource)s')
@@ -396,5 +406,9 @@ class AclMissingActionArguments(YardstickException):
'[action=%(action_name)s parameter=%(action_param)s]')
-class AclUknownActionTemplate(YardstickException):
+class AclUnknownActionTemplate(YardstickException):
message = 'No ACL CLI template found for "%(action_name)s" action'
+
+
+class InvalidMacAddress(YardstickException):
+ message = 'Mac address "%(mac_address)s" is invalid'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index 35e590f2b..323f13abb 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -75,15 +75,18 @@ def create_service(template,
raise
-def delete_service(name,
- namespace='default',
- **kwargs): # pragma: no cover
+def delete_service(name, namespace='default', skip_codes=None, **kwargs):
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
try:
body = client.V1DeleteOptions()
core_v1_api.delete_namespaced_service(name, namespace, body, **kwargs)
- except ApiException:
- LOG.exception('Delete Service failed')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Service')
def get_service_list(namespace='default', **kwargs):
@@ -118,8 +121,10 @@ def create_replication_controller(template,
def delete_replication_controller(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -128,9 +133,12 @@ def delete_replication_controller(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete replication controller failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ReplicationController')
def delete_pod(name,
@@ -193,8 +201,10 @@ def create_config_map(name,
def delete_config_map(name,
namespace='default',
wait=False,
- **kwargs): # pragma: no cover
+ skip_codes=None,
+ **kwargs):
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -203,9 +213,12 @@ def delete_config_map(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete config map failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='ConfigMap')
def create_custom_resource_definition(body):
@@ -223,14 +236,18 @@ def create_custom_resource_definition(body):
action='create', resource='CustomResourceDefinition')
-def delete_custom_resource_definition(name):
+def delete_custom_resource_definition(name, skip_codes=None):
+ skip_codes = [] if not skip_codes else skip_codes
api = get_extensions_v1beta_api()
body_obj = client.V1DeleteOptions()
try:
api.delete_custom_resource_definition(name, body_obj)
- except ApiException:
- raise exceptions.KubernetesApiException(
- action='delete', resource='CustomResourceDefinition')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='CustomResourceDefinition')
def get_custom_resource_definition(kind):
@@ -246,8 +263,28 @@ def get_custom_resource_definition(kind):
action='delete', resource='CustomResourceDefinition')
-def create_network(scope, group, version, plural, body, namespace='default'):
+def get_network(scope, group, version, plural, name, namespace='default'):
+ api = get_custom_objects_api()
+ try:
+ if scope == consts.SCOPE_CLUSTER:
+ network = api.get_cluster_custom_object(group, version, plural, name)
+ else:
+ network = api.get_namespaced_custom_object(
+ group, version, namespace, plural, name)
+ except ApiException as e:
+ if e.status in [404]:
+ return
+ else:
+ raise exceptions.KubernetesApiException(
+ action='get', resource='Custom Object: Network')
+ return network
+
+
+def create_network(scope, group, version, plural, body, name, namespace='default'):
api = get_custom_objects_api()
+ if get_network(scope, group, version, plural, name, namespace):
+ logging.info('Network %s already exists', name)
+ return
try:
if scope == consts.SCOPE_CLUSTER:
api.create_cluster_custom_object(group, version, plural, body)
@@ -259,7 +296,8 @@ def create_network(scope, group, version, plural, body, namespace='default'):
action='create', resource='Custom Object: Network')
-def delete_network(scope, group, version, plural, name, namespace='default'):
+def delete_network(scope, group, version, plural, name, namespace='default', skip_codes=None):
+ skip_codes = [] if not skip_codes else skip_codes
api = get_custom_objects_api()
try:
if scope == consts.SCOPE_CLUSTER:
@@ -267,9 +305,12 @@ def delete_network(scope, group, version, plural, name, namespace='default'):
else:
api.delete_namespaced_custom_object(
group, version, namespace, plural, name, {})
- except ApiException:
- raise exceptions.KubernetesApiException(
- action='delete', resource='Custom Object: Network')
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Custom Object: Network')
def get_pod_list(namespace='default'): # pragma: no cover
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 85cecc714..c019cd264 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -28,6 +28,7 @@ import socket
import subprocess
import sys
import time
+import threading
import six
from flask import jsonify
@@ -193,20 +194,16 @@ def parse_ini_file(path):
def get_port_mac(sshclient, port):
cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
def get_port_ip(sshclient, port):
cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
"|cut -d ':' -f2 " % port
- status, stdout, stderr = sshclient.execute(cmd)
+ _, stdout, _ = sshclient.execute(cmd, raise_on_error=True)
- if status:
- raise RuntimeError(stderr)
return stdout.rstrip()
@@ -281,11 +278,19 @@ def get_free_port(ip):
def mac_address_to_hex_list(mac):
- octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
- assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+ try:
+ octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+ except ValueError:
+ raise exceptions.InvalidMacAddress(mac_address=mac)
+ if len(octets) != 6 or all(len(octet) != 4 for octet in octets):
+ raise exceptions.InvalidMacAddress(mac_address=mac)
return octets
+def make_ipv4_address(ip_addr):
+ return ipaddress.IPv4Address(six.text_type(ip_addr))
+
+
def safe_ip_address(ip_addr):
""" get ip address version v6 or v4 """
try:
@@ -335,6 +340,14 @@ def ip_to_hex(ip_addr, separator=''):
return separator.join('{:02x}'.format(octet) for octet in address.packed)
+def get_mask_from_ip_range(ip_low, ip_high):
+ _ip_low = ipaddress.ip_address(ip_low)
+ _ip_high = ipaddress.ip_address(ip_high)
+ _ip_low_int = int(_ip_low)
+ _ip_high_int = int(_ip_high)
+ return _ip_high.max_prefixlen - (_ip_high_int ^ _ip_low_int).bit_length()
+
+
def try_int(s, *args):
"""Convert to integer if possible."""
try:
@@ -467,6 +480,9 @@ class Timer(object):
def __del__(self): # pragma: no cover
signal.alarm(0)
+ def delta_time_sec(self):
+ return (datetime.datetime.now() - self.start).total_seconds()
+
def read_meminfo(ssh_client):
"""Read "/proc/meminfo" file and parse all keys and values"""
@@ -513,17 +529,30 @@ def open_relative_file(path, task_path):
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
"""Wait until callable predicate is evaluated as True
+ When in a thread different from the main one, Timer(timeout) will fail
+ because signal is not handled. In this case
+
:param predicate: (func) callable deciding whether waiting should continue
:param timeout: (int) timeout in seconds how long should function wait
:param sleep: (int) polling interval for results in seconds
:param exception: exception instance to raise on timeout. If None is passed
(default) then WaitTimeout exception is raised.
"""
- try:
- with Timer(timeout=timeout):
- while not predicate():
+ if isinstance(threading.current_thread(), threading._MainThread):
+ try:
+ with Timer(timeout=timeout):
+ while not predicate():
+ time.sleep(sleep)
+ except exceptions.TimerTimeout:
+ if exception and issubclass(exception, Exception):
+ raise exception # pylint: disable=raising-bad-type
+ raise exceptions.WaitTimeout
+ else:
+ with Timer() as timer:
+ while timer.delta_time_sec() < timeout:
+ if predicate():
+ return
time.sleep(sleep)
- except exceptions.TimerTimeout:
if exception and issubclass(exception, Exception):
raise exception # pylint: disable=raising-bad-type
raise exceptions.WaitTimeout
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index 74deeecb5..82f406d08 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import ipaddress
import logging
import IxNetwork
from yardstick.common import exceptions
from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
log = logging.getLogger(__name__)
@@ -32,12 +34,14 @@ PROTO_UDP = 'udp'
PROTO_TCP = 'tcp'
PROTO_VLAN = 'vlan'
-IP_VERSION_4_MASK = '0.0.0.255'
-IP_VERSION_6_MASK = '0:0:0:0:0:0:0:ff'
+IP_VERSION_4_MASK = 24
+IP_VERSION_6_MASK = 64
TRAFFIC_STATUS_STARTED = 'started'
TRAFFIC_STATUS_STOPPED = 'stopped'
+SUPPORTED_PROTO = [PROTO_UDP]
+
# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
class IxNextgen(object): # pragma: no cover
@@ -328,7 +332,7 @@ class IxNextgen(object): # pragma: no cover
'-valueType', 'singleValue')
self.ixnet.commit()
- def update_frame(self, traffic):
+ def update_frame(self, traffic, duration):
"""Update the L2 frame
This function updates the L2 frame options:
@@ -336,8 +340,8 @@ class IxNextgen(object): # pragma: no cover
- Duration: in case of traffic_type="fixedDuration", amount of seconds
to inject traffic.
- Rate: in frames per seconds or percentage.
- - Type of rate: "framesPerSecond" ("bitsPerSecond" and
- "percentLineRate" no used)
+ - Type of rate: "framesPerSecond" or "percentLineRate" ("bitsPerSecond"
+ no used)
- Frame size: custom IMIX [1] definition; a list of packet size in
bytes and the weight. E.g.:
[[64, 64, 10], [128, 128, 15], [512, 512, 5]]
@@ -346,6 +350,7 @@ class IxNextgen(object): # pragma: no cover
:param traffic: list of traffic elements; each traffic element contains
the injection parameter for each flow group.
+ :param duration: (int) injection time in seconds.
"""
for traffic_param in traffic.values():
fg_id = str(traffic_param['id'])
@@ -355,7 +360,10 @@ class IxNextgen(object): # pragma: no cover
type = traffic_param.get('traffic_type', 'fixedDuration')
duration = traffic_param.get('duration', 30)
- rate = traffic_param['iload']
+ rate = traffic_param['rate']
+ rate_unit = (
+ 'framesPerSecond' if traffic_param['rate_unit'] ==
+ tp_base.TrafficProfileConfig.RATE_FPS else 'percentLineRate')
weighted_range_pairs = self._parse_framesize(
traffic_param['outer_l2']['framesize'])
srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
@@ -370,7 +378,7 @@ class IxNextgen(object): # pragma: no cover
'-type', type, '-duration', duration)
self.ixnet.setMultiAttribute(
config_element + '/frameRate',
- '-rate', rate, '-type', 'framesPerSecond')
+ '-rate', rate, '-type', rate_unit)
self.ixnet.setMultiAttribute(
config_element + '/frameSize',
'-type', 'weightedPairs',
@@ -393,15 +401,17 @@ class IxNextgen(object): # pragma: no cover
:param field: (str) field name, e.g.: scrIp, dstIp
:param ip_address: (str) IP address
:param seed: (int) seed length
- :param mask: (str) IP address mask
+ :param mask: (int) IP address mask length
:param count: (int) number of random IPs to generate
"""
field_descriptor = self._get_field_in_stack_item(ip_descriptor,
field)
+ random_mask = str(ipaddress.IPv4Address(
+ 2**(ipaddress.IPV4LENGTH - mask) - 1).compressed)
self.ixnet.setMultiAttribute(field_descriptor,
'-seed', seed,
'-fixedBits', ip_address,
- '-randomMask', mask,
+ '-randomMask', random_mask,
'-valueType', 'random',
'-countValue', count)
self.ixnet.commit()
@@ -420,15 +430,77 @@ class IxNextgen(object): # pragma: no cover
raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
count = traffic_param['outer_l3']['count']
- srcip4 = str(traffic_param['outer_l3']['srcip4'])
- dstip4 = str(traffic_param['outer_l3']['dstip4'])
+ srcip = str(traffic_param['outer_l3']['srcip'])
+ dstip = str(traffic_param['outer_l3']['dstip'])
+ seed = traffic_param['outer_l3']['seed']
+ srcmask = traffic_param['outer_l3']['srcmask'] or IP_VERSION_4_MASK
+ dstmask = traffic_param['outer_l3']['dstmask'] or IP_VERSION_4_MASK
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'srcIp', srcip4, 1, IP_VERSION_4_MASK, count)
+ 'srcIp', srcip, seed, srcmask, count)
self._update_ipv4_address(
self._get_stack_item(fg_id, PROTO_IPV4)[0],
- 'dstIp', dstip4, 1, IP_VERSION_4_MASK, count)
+ 'dstIp', dstip, seed, dstmask, count)
+
+ def update_l4(self, traffic):
+ """Update the L4 headers
+
+ NOTE: Only UDP is currently supported
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ proto = traffic_param['outer_l3']['proto']
+ if proto not in SUPPORTED_PROTO:
+ raise exceptions.IXIAUnsupportedProtocol(protocol=proto)
+
+ count = traffic_param['outer_l4']['count']
+ seed = traffic_param['outer_l4']['seed']
+
+ srcport = traffic_param['outer_l4']['srcport']
+ srcmask = traffic_param['outer_l4']['srcportmask']
+
+ dstport = traffic_param['outer_l4']['dstport']
+ dstmask = traffic_param['outer_l4']['dstportmask']
+
+ if proto in SUPPORTED_PROTO:
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'srcPort', srcport, seed, srcmask, count)
+
+ self._update_udp_port(self._get_stack_item(fg_id, proto)[0],
+ 'dstPort', dstport, seed, dstmask, count)
+
+ def _update_udp_port(self, descriptor, field, value,
+ seed=1, mask=0, count=1):
+ """Set the UDP port in a config element stack UDP field
+
+ :param udp_descriptor: (str) UDP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ :param field: (str) field name, e.g.: scrPort, dstPort
+ :param value: (int) UDP port fixed bits
+ :param seed: (int) seed length
+ :param mask: (int) UDP port mask
+ :param count: (int) number of random ports to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(descriptor, field)
+
+ if mask == 0:
+ seed = count = 1
+
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-auto', 'false',
+ '-seed', seed,
+ '-fixedBits', value,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+
+ self.ixnet.commit()
def _build_stats_map(self, view_obj, name_map):
return {data_yardstick: self.ixnet.execute(
diff --git a/yardstick/network_services/pipeline.py b/yardstick/network_services/pipeline.py
index d781ba0cd..7155480d4 100644
--- a/yardstick/network_services/pipeline.py
+++ b/yardstick/network_services/pipeline.py
@@ -18,6 +18,8 @@ import itertools
from six.moves import zip
+from yardstick.common import utils
+
FIREWALL_ADD_DEFAULT = "p {0} firewall add default 1"
FIREWALL_ADD_PRIO = """\
p {0} firewall add priority 1 ipv4 {1} 24 0.0.0.0 0 0 65535 0 65535 6 0xFF port 0"""
@@ -59,8 +61,7 @@ class PipelineRules(object):
self.add_rule(FIREWALL_ADD_PRIO, ip)
def add_firewall_script(self, ip):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for i in range(256):
ip_addr[-2] = str(i)
@@ -87,8 +88,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_MPLS, ip, mac_addr, index)
def add_route_script(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
for index in range(0, 256, 8):
ip_addr[-2] = str(index)
@@ -101,8 +101,7 @@ class PipelineRules(object):
self.add_rule(ROUTE_ADD_ETHER_QINQ, ip, mask, mac_addr, index)
def add_route_script2(self, ip, mac_addr):
- ip_addr = ip.split('.')
- assert len(ip_addr) == 4
+ ip_addr = str(utils.make_ipv4_address(ip)).split('.')
ip_addr[-1] = '0'
mask = 24
for i in range(0, 256):
diff --git a/yardstick/network_services/traffic_profile/__init__.py b/yardstick/network_services/traffic_profile/__init__.py
index 356b36bd9..a1b26a24d 100644
--- a/yardstick/network_services/traffic_profile/__init__.py
+++ b/yardstick/network_services/traffic_profile/__init__.py
@@ -27,6 +27,7 @@ def register_modules():
'yardstick.network_services.traffic_profile.prox_profile',
'yardstick.network_services.traffic_profile.prox_ramp',
'yardstick.network_services.traffic_profile.rfc2544',
+ 'yardstick.network_services.traffic_profile.pktgen',
]
for module in modules:
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index f4b5b178c..a8f950b7b 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
+
from yardstick.common import exceptions
from yardstick.common import utils
@@ -21,10 +23,12 @@ class TrafficProfileConfig(object):
This object will parse and validate the traffic profile information.
"""
-
DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
- DEFAULT_FRAME_RATE = 100
+ DEFAULT_FRAME_RATE = '100'
DEFAULT_DURATION = 30
+ RATE_FPS = 'fps'
+ RATE_PERCENTAGE = '%'
+ RATE_REGEX = re.compile(r'([0-9]*\.[0-9]+|[0-9]+)\s*(fps|%)*(.*)')
def __init__(self, tp_config):
self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
@@ -32,7 +36,8 @@ class TrafficProfileConfig(object):
self.description = tp_config.get('description')
tprofile = tp_config['traffic_profile']
self.traffic_type = tprofile.get('traffic_type')
- self.frame_rate = tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE)
+ self.frame_rate, self.rate_unit = self._parse_rate(
+ tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE))
self.test_precision = tprofile.get('test_precision')
self.packet_sizes = tprofile.get('packet_sizes')
self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
@@ -40,6 +45,27 @@ class TrafficProfileConfig(object):
self.upper_bound = tprofile.get('upper_bound')
self.step_interval = tprofile.get('step_interval')
+ def _parse_rate(self, rate):
+ """Parse traffic profile rate
+
+ The line rate can be defined in fps or percentage over the maximum line
+ rate:
+ - frame_rate = 5000 (by default, unit is 'fps')
+ - frame_rate = 5000fps
+ - frame_rate = 25%
+
+ :param rate: (string, int) line rate in fps or %
+ :return: (tuple: int, string) line rate number and unit
+ """
+ match = self.RATE_REGEX.match(str(rate))
+ if not match:
+ exceptions.TrafficProfileRate()
+ rate = float(match.group(1))
+ unit = match.group(2) if match.group(2) else self.RATE_FPS
+ if match.group(3):
+ raise exceptions.TrafficProfileRate()
+ return rate, unit
+
class TrafficProfile(object):
"""
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 39336785e..2086273e6 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -14,25 +14,43 @@
import logging
-from yardstick.network_services.traffic_profile.trex_traffic_profile import \
- TrexProfile
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
+
LOG = logging.getLogger(__name__)
-class IXIARFC2544Profile(TrexProfile):
+class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
+ DROP_PERCENT_ROUND = 6
+ RATE_ROUND = 5
def __init__(self, yaml_data):
super(IXIARFC2544Profile, self).__init__(yaml_data)
self.rate = self.config.frame_rate
+ self.rate_unit = self.config.rate_unit
- def _get_ixia_traffic_profile(self, profile_data, mac=None):
- if mac is None:
- mac = {}
+ def _get_ip_and_mask(self, ip_range):
+ _ip_range = ip_range.split('-')
+ if len(_ip_range) == 1:
+ return _ip_range[0], None
+
+ mask = utils.get_mask_from_ip_range(_ip_range[0], _ip_range[1])
+ return _ip_range[0], mask
+
+ def _get_fixed_and_mask(self, port_range):
+ _port_range = str(port_range).split('-')
+ if len(_port_range) == 1:
+ return int(_port_range[0]), 0
+ return int(_port_range[0]), int(_port_range[1])
+
+ def _get_ixia_traffic_profile(self, profile_data, mac=None):
+ mac = {} if mac is None else mac
result = {}
for traffickey, values in profile_data.items():
if not traffickey.startswith((self.UPLINK, self.DOWNLINK)):
@@ -48,18 +66,25 @@ class IXIARFC2544Profile(TrexProfile):
port_id = value.get('id', 1)
port_index = port_id - 1
- try:
- ip = value['outer_l3v6']
- except KeyError:
+
+ if value.get('outer_l3v4'):
ip = value['outer_l3v4']
src_key, dst_key = 'srcip4', 'dstip4'
else:
+ ip = value['outer_l3v6']
src_key, dst_key = 'srcip6', 'dstip6'
+ srcip, srcmask = self._get_ip_and_mask(ip[src_key])
+ dstip, dstmask = self._get_ip_and_mask(ip[dst_key])
+
+ outer_l4 = value.get('outer_l4')
+ src_port, src_port_mask = self._get_fixed_and_mask(outer_l4['srcport'])
+ dst_port, dst_port_mask = self._get_fixed_and_mask(outer_l4['dstport'])
result[traffickey] = {
'bidir': False,
- 'iload': '100',
'id': port_id,
+ 'rate': self.rate,
+ 'rate_unit': self.rate_unit,
'outer_l2': {
'framesize': value['outer_l2']['framesize'],
'framesPerSecond': True,
@@ -70,12 +95,23 @@ class IXIARFC2544Profile(TrexProfile):
'count': ip['count'],
'dscp': ip['dscp'],
'ttl': ip['ttl'],
- src_key: ip[src_key].split("-")[0],
- dst_key: ip[dst_key].split("-")[0],
+ 'seed': ip['seed'],
+ 'srcip': srcip,
+ 'dstip': dstip,
+ 'srcmask': srcmask,
+ 'dstmask': dstmask,
'type': key,
'proto': ip['proto'],
},
- 'outer_l4': value['outer_l4'],
+ 'outer_l4': {
+ 'srcport': src_port,
+ 'dstport': dst_port,
+ 'srcportmask': src_port_mask,
+ 'dstportmask': dst_port_mask,
+ 'count': outer_l4['count'],
+ 'seed': outer_l4['seed'],
+ }
+
}
except KeyError:
continue
@@ -83,11 +119,9 @@ class IXIARFC2544Profile(TrexProfile):
return result
def _ixia_traffic_generate(self, traffic, ixia_obj):
- for key, value in traffic.items():
- if key.startswith((self.UPLINK, self.DOWNLINK)):
- value['iload'] = str(self.rate)
- ixia_obj.update_frame(traffic)
+ ixia_obj.update_frame(traffic, self.config.duration)
ixia_obj.update_ip_packet(traffic)
+ ixia_obj.update_l4(traffic)
ixia_obj.start_traffic()
def update_traffic_profile(self, traffic_generator):
@@ -114,19 +148,21 @@ class IXIARFC2544Profile(TrexProfile):
self.pg_id = 0
self.update_traffic_profile(traffic_generator)
self.max_rate = self.rate
- self.min_rate = 0
+ self.min_rate = 0.0
else:
- self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 2)
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0,
+ self.RATE_ROUND)
traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
self._ixia_traffic_generate(traffic, ixia_obj)
return first_run
- def get_drop_percentage(self, samples, tol_min, tolerance, duration=30.0,
+ def get_drop_percentage(self, samples, tol_min, tolerance,
first_run=False):
completed = False
drop_percent = 100
num_ifaces = len(samples)
+ duration = self.config.duration
in_packets_sum = sum(
[samples[iface]['in_packets'] for iface in samples])
out_packets_sum = sum(
@@ -141,7 +177,8 @@ class IXIARFC2544Profile(TrexProfile):
try:
drop_percent = round(
- (packet_drop / float(out_packets_sum)) * 100, 2)
+ (packet_drop / float(out_packets_sum)) * 100,
+ self.DROP_PERCENT_ROUND)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
@@ -150,8 +187,10 @@ class IXIARFC2544Profile(TrexProfile):
samples['DropPercentage'] = drop_percent
if first_run:
- self.rate = out_packets_sum / duration / num_ifaces
completed = True if drop_percent <= tolerance else False
+ if (first_run and
+ self.rate_unit == tp_base.TrafficProfileConfig.RATE_FPS):
+ self.rate = float(out_packets_sum) / duration / num_ifaces
if drop_percent > tolerance:
self.max_rate = self.rate
diff --git a/yardstick/network_services/traffic_profile/pktgen.py b/yardstick/network_services/traffic_profile/pktgen.py
new file mode 100644
index 000000000..30f81b794
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/pktgen.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+class PktgenTrafficProfile(tp_base.TrafficProfile):
+ """This class handles Pktgen Trex Traffic profile execution"""
+
+ def __init__(self, tp_config): # pragma: no cover
+ super(PktgenTrafficProfile, self).__init__(tp_config)
+ self._host = None
+ self._port = None
+
+ def init(self, host, port): # pragma: no cover
+ """Initialize control parameters
+
+ :param host: (str) ip or host name
+ :param port: (int) TCP socket port number for Lua commands
+ """
+ self._host = host
+ self._port = port
+
+ def start(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.start("0")') != 0:
+ raise exceptions.PktgenActionError(action='start')
+
+ def stop(self):
+ if utils.send_socket_command(self._host, self._port,
+ 'pktgen.stop("0")') != 0:
+ raise exceptions.PktgenActionError(action='stop')
+
+ def rate(self, rate):
+ command = 'pktgen.set("0", "rate", ' + str(rate) + ')'
+ if utils.send_socket_command(self._host, self._port, command) != 0:
+ raise exceptions.PktgenActionError(action='rate')
+
+ def clear_all_stats(self):
+ if utils.send_socket_command(self._host, self._port, 'clr') != 0:
+ raise exceptions.PktgenActionError(action='clear all stats')
+
+ def help(self):
+ if utils.send_socket_command(self._host, self._port, 'help') != 0:
+ raise exceptions.PktgenActionError(action='help')
+
+ def execute_traffic(self, *args, **kwargs): # pragma: no cover
+ pass
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index c24e2f65a..0e1dbd592 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -52,7 +52,7 @@ class PortPgIDMap(object):
self._port_pg_id_map[port] = []
def get_pg_ids(self, port):
- return self._port_pg_id_map.get(port)
+ return self._port_pg_id_map.get(port, [])
def increase_pg_id(self, port=None):
port = self._last_port if not port else port
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 8e9bc87e1..11a602472 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -197,7 +197,7 @@ class AclApproxSetupEnvSetupEnvHelper(DpdkVnfSetupEnvHelper):
# e.g.: {"fwd": {"port": 0}}
# format action CLI command and add it to the list
if action_name not in _action_template_map.keys():
- raise exceptions.AclUknownActionTemplate(
+ raise exceptions.AclUnknownActionTemplate(
action_name=action_name)
template = _action_template_map[action_name]
try:
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
new file mode 100644
index 000000000..9d452213f
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_pktgen.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import multiprocessing
+import time
+import uuid
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PktgenTrafficGen(vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint):
+ """DPDK Pktgen traffic generator
+
+ Website: http://pktgen-dpdk.readthedocs.io/en/latest/index.html
+ """
+
+ TIMEOUT = 30
+
+ def __init__(self, name, vnfd, task_id):
+ vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id)
+ self.queue = multiprocessing.Queue()
+ self._id = uuid.uuid1().int
+ self._mq_producer = self._setup_mq_producer(self._id)
+ vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id],
+ self.queue)
+ self._consumer = vnf_base.GenericVNFConsumer([task_id], self)
+ self._consumer.start_rpc_server()
+ self._traffic_profile = None
+ self._node_ip = vnfd['mgmt-interface'].get('ip')
+ self._lua_node_port = self._get_lua_node_port(
+ vnfd['mgmt-interface'].get('service_ports', []))
+ self._rate = 1
+
+ def instantiate(self, scenario_cfg, context_cfg): # pragma: no cover
+ pass
+
+ def run_traffic(self, traffic_profile):
+ self._traffic_profile = traffic_profile
+ self._traffic_profile.init(self._node_ip, self._lua_node_port)
+ utils.wait_until_true(self._is_running, timeout=self.TIMEOUT,
+ sleep=2)
+
+ def terminate(self): # pragma: no cover
+ pass
+
+ def collect_kpi(self): # pragma: no cover
+ pass
+
+ def scale(self, flavor=''): # pragma: no cover
+ pass
+
+ def wait_for_instantiate(self): # pragma: no cover
+ pass
+
+ def runner_method_start_iteration(self, ctxt, **kwargs):
+ # pragma: no cover
+ LOG.debug('Start method')
+ # NOTE(ralonsoh): 'rate' should be modified between iterations. The
+ # current implementation is just for testing.
+ self._rate += 1
+ self._traffic_profile.start()
+ self._traffic_profile.rate(self._rate)
+ time.sleep(4)
+ self._traffic_profile.stop()
+ self._mq_producer.tg_method_iteration(1, 1, {})
+
+ def runner_method_stop_iteration(self, ctxt, **kwargs): # pragma: no cover
+ # pragma: no cover
+ LOG.debug('Stop method')
+
+ @staticmethod
+ def _get_lua_node_port(service_ports):
+ for port in (port for port in service_ports if
+ int(port['port']) == constants.LUA_PORT):
+ return int(port['node_port'])
+ # NOTE(ralonsoh): in case LUA port is not present, an exception should
+ # be raised.
+
+ def _is_running(self):
+ try:
+ self._traffic_profile.help()
+ return True
+ except exceptions.PktgenActionError:
+ return False
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 8d9fc41c9..b0b93a3c2 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -308,7 +308,7 @@ class ServiceNodePortObject(object):
k8s_utils.create_service(self.template)
def delete(self):
- k8s_utils.delete_service(self._name)
+ k8s_utils.delete_service(self._name, skip_codes=[404])
class CustomResourceDefinitionObject(object):
@@ -349,7 +349,7 @@ class CustomResourceDefinitionObject(object):
k8s_utils.create_custom_resource_definition(self._template)
def delete(self):
- k8s_utils.delete_custom_resource_definition(self._name)
+ k8s_utils.delete_custom_resource_definition(self._name, skip_codes=[404])
class NetworkObject(object):
@@ -437,11 +437,11 @@ class NetworkObject(object):
def create(self):
k8s_utils.create_network(self.scope, self.group, self.version,
- self.plural, self.template)
+ self.plural, self.template, self._name)
def delete(self):
k8s_utils.delete_network(self.scope, self.group, self.version,
- self.plural, self._name)
+ self.plural, self._name, skip_codes=[404])
class KubernetesTemplate(object):
diff --git a/yardstick/service/environment.py b/yardstick/service/environment.py
index 324589f79..d910e31e9 100644
--- a/yardstick/service/environment.py
+++ b/yardstick/service/environment.py
@@ -36,7 +36,7 @@ class Environment(Service):
return self._format_sut_info(sut_info)
- def _load_pod_info(self):
+ def _load_pod_info(self): # pragma: no cover
if self.pod is None:
raise MissingPodInfoError
@@ -51,10 +51,10 @@ class Environment(Service):
except (ValueError, KeyError):
raise UnsupportedPodFormatError
- def _format_sut_info(self, sut_info):
+ def _format_sut_info(self, sut_info): # pragma: no cover
return {k: self._format_node_info(v) for k, v in sut_info.items()}
- def _format_node_info(self, node_info):
+ def _format_node_info(self, node_info): # pragma: no cover
info = []
facts = node_info.get('ansible_facts', {})
@@ -93,9 +93,9 @@ class Environment(Service):
return info
- def _get_interface_info(self, facts, name):
+ def _get_interface_info(self, facts, name): # pragma: no cover
mac = facts.get('ansible_{}'.format(name), {}).get('macaddress')
return [name, mac] if mac else []
- def _get_device_info(self, name, info):
+ def _get_device_info(self, name, info): # pragma: no cover
return ['disk_{}'.format(name), info.get('size')]
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index e6a26ab6b..8bdc32c7c 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -499,9 +499,10 @@ class AutoConnectSSH(SSH):
""" Don't close anything, just force creation of a new client """
self._client = False
- def execute(self, cmd, stdin=None, timeout=3600):
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
self._connect()
- return super(AutoConnectSSH, self).execute(cmd, stdin, timeout)
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
def run(self, cmd, stdin=None, stdout=None, stderr=None,
raise_on_error=True, timeout=3600,
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 35236637d..e1414c2ae 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -9,11 +9,13 @@
import copy
import io
+import logging
import os
import sys
import mock
import six
+from six.moves import builtins
import unittest
import uuid
@@ -322,9 +324,9 @@ class TaskTestCase(unittest.TestCase):
actual_result = t._parse_options(options)
self.assertEqual(expected_result, actual_result)
- @mock.patch('six.moves.builtins.open', side_effect=mock.mock_open())
+ @mock.patch.object(builtins, 'open', side_effect=mock.mock_open())
@mock.patch.object(task, 'utils')
- @mock.patch('logging.root')
+ @mock.patch.object(logging, 'root')
def test_set_log(self, mock_logging_root, *args):
task_obj = task.Task()
task_obj.task_id = 'task_id'
@@ -561,7 +563,8 @@ key2:
mock_open.assert_has_calls([mock.call('args_file'),
mock.call('task_file')])
- def test__render_task_error_arguments(self):
+ @mock.patch.object(builtins, 'print')
+ def test__render_task_error_arguments(self, *args):
with self.assertRaises(exceptions.TaskRenderArgumentError):
task.TaskParser('task_file')._render_task('value1="var3"', None)
diff --git a/yardstick/tests/unit/benchmark/core/test_testcase.py b/yardstick/tests/unit/benchmark/core/test_testcase.py
index 119465887..077848d77 100644
--- a/yardstick/tests/unit/benchmark/core/test_testcase.py
+++ b/yardstick/tests/unit/benchmark/core/test_testcase.py
@@ -7,28 +7,28 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.cmd.commands.testcase
-
-from __future__ import absolute_import
-import unittest
+import mock
+from six.moves import builtins
from yardstick.benchmark.core import testcase
+from yardstick.tests.unit import base as ut_base
class Arg(object):
def __init__(self):
- self.casename = ('opnfv_yardstick_tc001',)
+ self.casename = ('opnfv_yardstick_tc001', )
-class TestcaseUT(unittest.TestCase):
+class TestcaseTestCase(ut_base.BaseUnitTestCase):
def test_list_all(self):
t = testcase.Testcase()
result = t.list_all("")
self.assertIsInstance(result, list)
- def test_show(self):
+ @mock.patch.object(builtins, 'print')
+ def test_show(self, *args):
t = testcase.Testcase()
casename = Arg()
result = t.show(casename)
diff --git a/yardstick/tests/unit/benchmark/runner/test_arithmetic.py b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
new file mode 100644
index 000000000..7b1e1e976
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_arithmetic.py
@@ -0,0 +1,220 @@
+##############################################################################
+# Copyright (c) 2018 Nokia and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import unittest
+import multiprocessing
+import os
+import time
+
+from yardstick.benchmark.runners import arithmetic
+
+
+class ArithmeticRunnerTest(unittest.TestCase):
+ class MyMethod(object):
+ def __init__(self):
+ self.count = 101
+
+ def __call__(self, data):
+ self.count += 1
+ data['my_key'] = self.count
+ return self.count
+
+ def setUp(self):
+ self.scenario_cfg = {
+ 'runner': {
+ 'interval': 0,
+ 'iter_type': 'nested_for_loops',
+ 'iterators': [
+ {
+ 'name': 'stride',
+ 'start': 64,
+ 'stop': 128,
+ 'step': 64
+ },
+ {
+ 'name': 'size',
+ 'start': 500,
+ 'stop': 2000,
+ 'step': 500
+ }
+ ]
+ },
+ 'type': 'some_type'
+ }
+
+ self.benchmark = mock.Mock()
+ self.benchmark_cls = mock.Mock(return_value=self.benchmark)
+
+ def _assert_defaults__worker_process_run_setup_and_teardown(self):
+ self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
+ self.benchmark.setup.assert_called_once()
+ self.benchmark.teardown.assert_called_once()
+
+ @mock.patch.object(os, 'getpid')
+ @mock.patch.object(multiprocessing, 'Process')
+ def test__run_benchmark_called_with(self, mock_multiprocessing_process,
+ mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ runner = arithmetic.ArithmeticRunner({})
+ benchmark_cls = mock.Mock()
+ runner._run_benchmark(benchmark_cls, 'my_method', self.scenario_cfg,
+ {})
+ mock_multiprocessing_process.assert_called_once_with(
+ name='Arithmetic-some_type-101',
+ target=arithmetic._worker_process,
+ args=(runner.result_queue, benchmark_cls, 'my_method',
+ self.scenario_cfg, {}, runner.aborted, runner.output_queue))
+
+ @mock.patch.object(os, 'getpid')
+ def test__worker_process_runner_id(self, mock_os_getpid):
+ mock_os_getpid.return_value = 101
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_nested_for_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 8)
+ self.assertEqual(self.benchmark.my_method.call_count, 8)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 8)
+ self.assertEqual(mock_time_sleep.call_count, 8)
+
+ @mock.patch.object(time, 'sleep')
+ def test__worker_process_calls_tuple_loops(self, mock_time_sleep):
+ self.scenario_cfg['runner']['interval'] = 99
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.benchmark.my_method.assert_has_calls([mock.call({})] * 2)
+ self.assertEqual(self.benchmark.my_method.call_count, 2)
+ mock_time_sleep.assert_has_calls([mock.call(99)] * 2)
+ self.assertEqual(mock_time_sleep.call_count, 2)
+
+ def test__worker_process_stored_options_nested_for_loops(self):
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 2000})
+
+ def test__worker_process_stored_options_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+
+ self.assertDictEqual(self.scenario_cfg['options'],
+ {'stride': 128, 'size': 1000})
+
+ def test__worker_process_aborted_set_early(self):
+ aborted = multiprocessing.Event()
+ aborted.set()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ aborted, mock.Mock())
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.scenario_cfg['options'], {})
+ self.benchmark.my_method.assert_not_called()
+
+ def test__worker_process_output_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103, 104, 105, 106, 107, 108, 109])
+
+ def test__worker_process_output_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ output_queue = multiprocessing.Queue()
+ arithmetic._worker_process(mock.Mock(), self.benchmark_cls,
+ 'my_method', self.scenario_cfg, {},
+ multiprocessing.Event(), output_queue)
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ result = []
+ while not output_queue.empty():
+ result.append(output_queue.get())
+ self.assertListEqual(result, [102, 103])
+
+ def test__worker_process_queue_nested_for_loops(self):
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 109)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
+
+ def test__worker_process_queue_tuple_loops(self):
+ self.scenario_cfg['runner']['iter_type'] = 'tuple_loops'
+ self.benchmark.my_method = self.MyMethod()
+
+ queue = multiprocessing.Queue()
+ timestamp = time.time()
+ arithmetic._worker_process(queue, self.benchmark_cls, 'my_method',
+ self.scenario_cfg, {},
+ multiprocessing.Event(), mock.Mock())
+ time.sleep(0.01)
+
+ self._assert_defaults__worker_process_run_setup_and_teardown()
+ self.assertEqual(self.benchmark.my_method.count, 103)
+ count = 0
+ while not queue.empty():
+ count += 1
+ result = queue.get()
+ self.assertEqual(result['errors'], '')
+ self.assertEqual(result['data'], {'my_key': count + 101})
+ self.assertEqual(result['sequence'], count)
+ self.assertGreater(result['timestamp'], timestamp)
+ timestamp = result['timestamp']
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 4016f5055..5761e2403 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -9,18 +9,22 @@
import mock
import unittest
+import logging
from oslo_serialization import jsonutils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen
from yardstick.common import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
+logging.disable(logging.CRITICAL)
+
+
class PktgenTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -33,636 +37,416 @@ class PktgenTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
+ }
- def test_pktgen_successful_setup(self, mock_ssh):
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.setup()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().run.return_value = 0
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ self.addCleanup(self._stop_mock)
- def test_pktgen_successful_iptables_setup(self, mock_ssh):
+ self.scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ self.scenario.setup()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+
+ def test_setup_successful(self):
+ self.assertIsNotNone(self.scenario.server)
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
- p._iptables_setup()
+ def test_iptables_setup_successful(self):
+ self.scenario.number_of_ports = 10
+ self.scenario._iptables_setup()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo iptables -F; "
"sudo iptables -A INPUT -p udp --dport 1000:%s -j DROP"
% 1010, timeout=60)
- def test_pktgen_unsuccessful_iptables_setup(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_setup)
+ def test_iptables_setup_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- def test_pktgen_successful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_setup()
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ def test_iptables_get_result_successful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.return_value = (0, '150000', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '150000', '')
- result = p._iptables_get_result()
- expected_result = 150000
- self.assertEqual(result, expected_result)
+ result = self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(result, 150000)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo iptables -L INPUT -vnx |"
"awk '/dpts:1000:%s/ {{printf \"%%s\", $1}}'"
% 1010, raise_on_error=True)
- def test_pktgen_unsuccessful_iptables_get_result(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_iptables_get_result_unsuccessful(self):
+ self.scenario.number_of_ports = 10
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- p.server = mock_ssh.SSH.from_node()
- p.number_of_ports = args['options']['number_of_ports']
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._iptables_get_result()
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._iptables_get_result)
+ def test_run_successful_no_sla(self):
+ self.scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_successful_no_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- }
result = {}
+ self.scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_successful_sla(self, mock_ssh):
+ def test_run_successful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 10000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110,
+ "ppm": 3179})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 10000}
- }
result = {}
+ scenario.run(result)
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_unsuccessful_sla(self, mock_ssh):
-
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ def test_run_unsuccessful_sla(self):
+ self.scenario_cfg['sla'] = {'max_ppm': 1000}
+ scenario = pktgen.Pktgen(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149776,
+ "packetsize": 60,
+ "flows": 110})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p._iptables_get_result = mock.Mock(return_value=149300)
+ def test_run_ssh_error_not_caught(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
- def test_pktgen_unsuccessful_script_error(self, mock_ssh):
+ def test_get_vnic_driver_name(self):
+ self.mock_SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
+ vnic_driver_name = self.scenario._get_vnic_driver_name()
- args = {
- 'options': {'packetsize': 60, 'number_of_ports': 10},
- 'sla': {'max_ppm': 1000}
- }
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p.run, result)
-
- def test_pktgen_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
-
- vnic_driver_name = p._get_vnic_driver_name()
self.assertEqual(vnic_driver_name, 'ixgbevf')
- def test_pktgen_unsuccessful_get_vnic_driver_name(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._get_vnic_driver_name)
+ def test_get_vnic_driver_name_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_vnic_driver_name()
- p.queue_number = p._get_sriov_queue_number()
- self.assertEqual(p.queue_number, 2)
+ def test_get_sriov_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
- def test_pktgen_unsuccessful_get_sriov_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ self.scenario.queue_number = self.scenario._get_sriov_queue_number()
+ self.assertEqual(self.scenario.queue_number, 2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_sriov_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_sriov_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_sriov_queue_number()
- def test_pktgen_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_available_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
-
- self.assertEqual(p._get_available_queue_number(), 4)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_available_queue_number(), 4)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | head -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_get_available_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._get_available_queue_number)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_available_queue_number()
- def test_pktgen_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_get_usable_queue_number(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
- self.assertEqual(p._get_usable_queue_number(), 1)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertEqual(self.scenario._get_usable_queue_number(), 1)
+ self.mock_SSH.from_node().execute.assert_called_with(
"sudo ethtool -l eth0 | grep Combined | tail -1 |"
"awk '{printf $2}'", raise_on_error=True)
- def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_get_usable_queue_number_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._get_usable_queue_number()
- self.assertRaises(y_exc.SSHError, p._get_usable_queue_number)
+ def test_enable_ovs_multiqueue(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.mock_SSH.from_node().run.assert_has_calls(
+ (mock.call("sudo ethtool -L eth0 combined 4"),
+ mock.call("sudo ethtool -L eth0 combined 4")))
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ def test_enable_ovs_multiqueue_1q(self):
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.queue_number = self.scenario._enable_ovs_multiqueue()
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 4)
-
- def test_pktgen_enable_ovs_multiqueue_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.mock_SSH.from_node().run.assert_not_called()
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_enable_ovs_multiqueue_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
-
- p.queue_number = p._enable_ovs_multiqueue()
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_unsuccessful_enable_ovs_multiqueue(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._enable_ovs_multiqueue()
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(4)
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
-
- self.assertRaises(y_exc.SSHError, p._enable_ovs_multiqueue)
-
- def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
-
- p._setup_irqmapping_ovs(4)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 8 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_ovs(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 4)
+ def test_setup_irqmapping_ovs_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(4)
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_ovs, 1)
-
- def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_setup_irqmapping_ovs_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_ovs(1)
- p._setup_irqmapping_sriov(2)
+ def test_setup_irqmapping_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 2 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+ def test_setup_irqmapping_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '10', '')
+ self.scenario._setup_irqmapping_sriov(1)
- p._setup_irqmapping_sriov(1)
-
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"echo 1 | sudo tee /proc/irq/10/smp_affinity")
- def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 2)
+ def test_setup_irqmapping_sriov_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(2)
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
+ def test_setup_irqmapping_sriov_1q_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.assertRaises(y_exc.SSHError, p._setup_irqmapping_sriov, 1)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._setup_irqmapping_sriov(1)
- def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
- result = p._is_irqbalance_disabled()
- self.assertFalse(result)
-
- mock_ssh.SSH.from_node().execute.assert_called_with(
+ self.assertFalse(self.scenario._is_irqbalance_disabled())
+ self.mock_SSH.from_node().execute.assert_called_with(
"grep ENABLED /etc/default/irqbalance", raise_on_error=True)
- def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._is_irqbalance_disabled)
-
- def test_pktgen_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ def test_is_irqbalance_disabled_unsuccessful(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- mock_ssh.SSH.from_node().run.return_value = (0, '', '')
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._is_irqbalance_disabled()
- p._disable_irqbalance()
+ def test_disable_irqbalance(self):
+ self.scenario._disable_irqbalance()
- mock_ssh.SSH.from_node().run.assert_called_with(
+ self.mock_SSH.from_node().run.assert_called_with(
"sudo service irqbalance disable")
- def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().run.side_effect = y_exc.SSHError
-
- self.assertRaises(y_exc.SSHError, p._disable_irqbalance)
-
- def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+ def test_disable_irqbalance_unsuccessful(self):
+ self.mock_SSH.from_node().run.side_effect = y_exc.SSHError
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario._disable_irqbalance()
- p.multiqueue_setup()
+ def test_multiqueue_setup_ovs(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '4', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=4)
- self.assertEqual(p.queue_number, 4)
+ self.scenario.multiqueue_setup()
- def test_pktgen_multiqueue_setup_ovs_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.assertEqual(self.scenario.queue_number, 4)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+ def test_multiqueue_setup_ovs_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(
+ return_value="virtio_net")
+ self.scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ self.scenario._get_available_queue_number = mock.Mock(return_value=1)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=1)
+ self.scenario.multiqueue_setup()
- p.multiqueue_setup()
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- self.assertEqual(p.queue_number, 1)
+ def test_multiqueue_setup_sriov(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '2', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- def test_pktgen_multiqueue_setup_sriov(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ self.scenario.multiqueue_setup()
- mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+ self.assertEqual(self.scenario.queue_number, 2)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ def test_multiqueue_setup_sriov_1q(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '1', '')
+ self.scenario._is_irqbalance_disabled = mock.Mock(return_value=False)
+ self.scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p.multiqueue_setup()
+ self.scenario.multiqueue_setup()
- self.assertEqual(p.queue_number, 2)
+ self.assertEqual(self.scenario.queue_number, 1)
+ self.assertTrue(self.scenario.multiqueue_setup_done)
- def test_pktgen_multiqueue_setup_sriov_1q(self, mock_ssh):
- args = {
- 'options': {'packetsize': 60, 'multiqueue': True},
- }
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
-
- p._is_irqbalance_disabled = mock.Mock(return_value=False)
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
-
- p.multiqueue_setup()
-
- self.assertEqual(p.queue_number, 1)
-
- def test_pktgen_run_with_setup_done(self, mock_ssh):
- args = {
+ def test_run_with_setup_done(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
'sla': {
- 'max_ppm': 1}}
- result = {}
- p = pktgen.Pktgen(args, self.ctx)
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
+ 'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.server = self.mock_SSH.from_node()
+ scenario.client = self.mock_SSH.from_node()
+ scenario.setup_done = True
+ scenario.multiqueue_setup_done = True
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.setup_done = True
- p.multiqueue_setup_done = True
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_ovs_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_ovs_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
-
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
- p._get_usable_queue_number = mock.Mock(return_value=1)
- p._get_available_queue_number = mock.Mock(return_value=4)
- p._enable_ovs_multiqueue = mock.Mock(return_value=4)
- p._setup_irqmapping_ovs = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ scenario._get_usable_queue_number = mock.Mock(return_value=1)
+ scenario._get_available_queue_number = mock.Mock(return_value=4)
+ scenario._enable_ovs_multiqueue = mock.Mock(return_value=4)
+ scenario._setup_irqmapping_ovs = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
- def test_pktgen_run_with_sriov_multiqueque(self, mock_ssh):
- args = {
+ def test_run_with_sriov_multiqueque(self):
+ scenario_cfg = {
'options': {
'packetsize': 60,
'number_of_ports': 10,
'duration': 20,
'multiqueue': True},
- 'sla': {
- 'max_ppm': 1}}
- result = {}
-
- p = pktgen.Pktgen(args, self.ctx)
+ 'sla': {'max_ppm': 1}
+ }
+ scenario = pktgen.Pktgen(scenario_cfg, self.context_cfg)
+ scenario.setup()
+ scenario._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ scenario._get_sriov_queue_number = mock.Mock(return_value=2)
+ scenario._setup_irqmapping_sriov = mock.Mock()
+ scenario._iptables_get_result = mock.Mock(return_value=149300)
+
+ sample_output = jsonutils.dumps({"packets_per_second": 9753,
+ "errors": 0,
+ "packets_sent": 149300,
+ "flows": 110,
+ "ppm": 0})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.server = mock_ssh.SSH.from_node()
- p.client = mock_ssh.SSH.from_node()
-
- p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
- p._get_sriov_queue_number = mock.Mock(return_value=2)
- p._setup_irqmapping_sriov = mock.Mock()
- p._iptables_get_result = mock.Mock(return_value=149300)
-
- sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110, "ppm": 0}'
- mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ result = {}
+ scenario.run(result)
- p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
expected_result["packetsize"] = 60
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index bcd417830..70cd8ad40 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -9,16 +9,22 @@
import mock
import unittest
+import time
+import logging
import yardstick.common.utils as utils
+from yardstick import ssh
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
from yardstick.common import exceptions as y_exc
+logging.disable(logging.CRITICAL)
+
+
class PktgenDPDKLatencyTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
'host': {
'ip': '172.16.0.137',
'user': 'root',
@@ -31,165 +37,100 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
'ipaddr': '172.16.0.138'
}
}
-
- self._mock_ssh = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.ssh')
- self.mock_ssh = self._mock_ssh.start()
- self._mock_time = mock.patch(
- 'yardstick.benchmark.scenarios.networking.pktgen_dpdk.time')
- self.mock_time = self._mock_time.start()
-
- self.addCleanup(self._stop_mock)
-
- def _stop_mock(self):
- self._mock_ssh.stop()
- self._mock_time.stop()
-
- def test_pktgen_dpdk_successful_setup(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.setup()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.assertIsNotNone(p.server)
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- def test_pktgen_dpdk_successful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
+ self.scenario_cfg = {
+ 'options': {'packetsize': 60}
}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- utils.get_port_ip(p.server, "eth1")
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig eth1 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ")
-
- def test_pktgen_dpdk_unsuccessful_get_port_ip(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self._mock_utils_get_port_ip = mock.patch.object(utils, 'get_port_ip')
+ self.mock_utils_get_port_ip = self._mock_utils_get_port_ip.start()
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_ip, p.server, "eth1")
+ self._mock_utils_get_port_mac = mock.patch.object(utils,
+ 'get_port_mac')
+ self.mock_utils_get_port_mac = self._mock_utils_get_port_mac.start()
- def test_pktgen_dpdk_successful_get_port_mac(self):
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
- args = {
- 'options': {'packetsize': 60},
- }
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.addCleanup(self._stop_mock)
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ self.scenario.server = self.mock_SSH.from_node()
+ self.scenario.client = self.mock_SSH.from_node()
- utils.get_port_mac(p.server, "eth1")
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_time_sleep.stop()
+ self._mock_utils_get_port_ip.stop()
+ self._mock_utils_get_port_mac.stop()
- self.mock_ssh.SSH.from_node().execute.assert_called_with(
- "ifconfig |grep HWaddr |grep eth1 |awk '{print $5}' ")
+ def test_setup(self):
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
+ scenario.setup()
- def test_pktgen_dpdk_unsuccessful_get_port_mac(self):
+ self.assertIsNotNone(scenario.server)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
- args = {
- 'options': {'packetsize': 60},
- }
+ def test_run_get_port_ip_command(self):
+ self.scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
- p.server = self.mock_ssh.SSH.from_node()
+ self.mock_utils_get_port_ip.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, utils.get_port_mac, p.server, "eth1")
+ def test_get_port_mac_command(self):
+ self.scenario.run({})
- def test_pktgen_dpdk_successful_no_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- }
-
- result = {}
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.mock_utils_get_port_mac.assert_has_calls(
+ [mock.call(self.scenario.server, 'ens5'),
+ mock.call(self.scenario.server, 'ens4'),
+ mock.call(self.scenario.server, 'ens5')])
+ def test_run_no_sla(self):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- p.run(result)
+ result = {}
+ self.scenario.run(result)
# with python 3 we get float, might be due python division changes
# AssertionError: {'avg_latency': 132.33333333333334} != {
# 'avg_latency': 132}
delta = result['avg_latency'] - 132
self.assertLessEqual(delta, 1)
- def test_pktgen_dpdk_successful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_sla(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n100\n100\n100\n100\n100\n100\n100\n100\n100\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
-
- p.run(result)
-
- self.assertEqual(result, {"avg_latency": 100})
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_unsuccessful_sla(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
result = {}
+ scenario.run(result)
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ self.assertEqual(result, {"avg_latency": 100})
- p.server = self.mock_ssh.SSH.from_node()
- p.client = self.mock_ssh.SSH.from_node()
+ def test_run_sla_error(self):
+ self.scenario_cfg['sla'] = {'max_latency': 100}
+ scenario = pktgen_dpdk.PktgenDPDKLatency(self.scenario_cfg,
+ self.context_cfg)
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
- self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(y_exc.SLAValidationError, p.run, result)
+ self.mock_SSH.from_node().execute.return_value = (0, sample_output, '')
- def test_pktgen_dpdk_run_unsuccessful_get_port_mac(self):
-
- args = {
- 'options': {'packetsize': 60},
- 'sla': {'max_latency': 100}
- }
- result = {}
-
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
-
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
- self.assertRaises(RuntimeError, p.run, result)
-
- def test_pktgen_dpdk_run_unsuccessful_script_error(self):
- args = {
- 'options': {'packetsize': 60}
- }
+ with self.assertRaises(y_exc.SLAValidationError):
+ scenario.run({})
- p = pktgen_dpdk.PktgenDPDKLatency(args, self.ctx)
+ def test_run_last_command_raise_on_error(self):
+ self.mock_SSH.from_node().execute.side_effect = y_exc.SSHError
- self.mock_ssh.SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- (0, '', ''),
- y_exc.SSHError)
- self.assertRaises(y_exc.SSHError, p.run, {})
- self.assertEqual(self.mock_ssh.SSH.from_node().execute.call_count, 6)
+ with self.assertRaises(y_exc.SSHError):
+ self.scenario.run({})
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index cdb91f66d..49578b383 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -358,35 +358,49 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.assertIsNotNone(self.topology)
def test__get_ip_flow_range_string(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
result = '152.16.100.2-152.16.100.254'
self.assertEqual(result, self.s._get_ip_flow_range(
'152.16.100.2-152.16.100.254'))
- def test__get_ip_flow_range(self):
- self.scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
- result = '152.16.100.2-152.16.100.254'
- self.assertEqual(result, self.s._get_ip_flow_range({"tg__1": 'xe0'}))
+ def test__get_ip_flow_range_no_nodes(self):
+ self.assertEqual('0.0.0.0', self.s._get_ip_flow_range({}))
- @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.ipaddress')
- def test__get_ip_flow_range_no_node_data(self, mock_ipaddress):
- scenario_cfg = deepcopy(self.scenario_cfg)
- scenario_cfg["traffic_options"]["flow"] = \
- self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
+ def test__get_ip_flow_range_no_node_data(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {}
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('0.0.0.2-0.0.0.254', result)
- mock_ipaddress.ip_network.return_value = ipaddr = mock.Mock()
- ipaddr.hosts.return_value = []
+ def test__et_ip_flow_range_ipv4(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15',
+ 'netmask': '255.255.255.128'}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.2-192.168.1.126', result)
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({"tg__2": 'xe0'})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv4_mask_30(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '192.168.1.15', 'netmask': 30}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('192.168.1.15', result)
- def test__get_ip_flow_range_no_nodes(self):
- expected = '0.0.0.0'
- result = self.s._get_ip_flow_range({})
- self.assertEqual(result, expected)
+ def test__get_ip_flow_range_ipv6(self):
+ node_data = {'tg__0': 'xe0'}
+ self.s.context_cfg['nodes']['tg__0'] = {
+ 'interfaces': {
+ 'xe0': {'local_ip': '2001::11', 'netmask': 64}
+ }
+ }
+ result = self.s._get_ip_flow_range(node_data)
+ self.assertEqual('2001::2-2001::ffff:ffff:ffff:fffe', result)
def test___get_traffic_flow(self):
self.scenario_cfg["traffic_options"]["flow"] = \
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index a606543e5..a1c27f5fb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -54,7 +54,8 @@ class VsperfTestCase(unittest.TestCase):
self._mock_SSH = mock.patch.object(ssh, 'SSH')
self.mock_SSH = self._mock_SSH.start()
- self.mock_SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
@@ -104,40 +105,23 @@ class VsperfTestCase(unittest.TestCase):
def test_run_ok(self):
self.scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
def test_run_ok_setup_not_done(self):
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
result = {}
self.scenario.run(result)
self.assertTrue(self.scenario.setup_done)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (1, '', ''))
+ def test_run_ssh_command_call_counts(self):
+ self.scenario.run({})
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
-
- def test_run_failed_csv_report(self):
- self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
- (0, '', ''),
- (1, '', ''))
-
- with self.assertRaises(RuntimeError):
- self.scenario.run({})
- self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
+ self.mock_SSH.from_node().run.assert_called_once()
def test_run_sla_fail(self):
self.mock_SSH.from_node().execute.return_value = (
@@ -160,14 +144,21 @@ class VsperfTestCase(unittest.TestCase):
self.assertTrue('throughput_rx_fps was not collected by VSPERF'
in str(raised.exception))
+ def test_run_faulty_result_csv(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
+
def test_run_sla_fail_metric_not_defined_in_sla(self):
del self.scenario_cfg['sla']['throughput_rx_fps']
scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
scenario.setup()
- self.mock_SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
with self.assertRaises(y_exc.SLAValidationError) as raised:
scenario.run({})
self.assertTrue('throughput_rx_fps is not defined in SLA'
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index db6f9cc89..8bbe6911e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -20,6 +20,8 @@ import unittest
from yardstick import exceptions as y_exc
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+from yardstick.common import exceptions as y_exc
+from yardstick import ssh
class VsperfDPDKTestCase(unittest.TestCase):
@@ -56,80 +58,51 @@ class VsperfDPDKTestCase(unittest.TestCase):
'action': 'monitor',
}
}
-
- self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- self._mock_ssh = mock.patch.object(vsperf_dpdk, 'ssh')
+ self._mock_ssh = mock.patch.object(ssh, 'SSH')
self.mock_ssh = self._mock_ssh.start()
self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
self.mock_subprocess_call = self._mock_subprocess_call.start()
+ mock_call_obj = mock.Mock()
+ mock_call_obj.execute.return_value = None
+ self.mock_subprocess_call.return_value = mock_call_obj
+
self._mock_log_info = mock.patch.object(vsperf_dpdk.LOG, 'info')
self.mock_log_info = self._mock_log_info.start()
+
self.addCleanup(self._cleanup)
+ self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
+ self.scenario.setup()
+
def _cleanup(self):
self._mock_ssh.stop()
self._mock_subprocess_call.stop()
self._mock_log_info.stop()
def test_setup(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
self.assertIsNotNone(self.scenario.client)
self.assertTrue(self.scenario.setup_done)
def test_teardown(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
self.scenario.teardown()
self.assertFalse(self.scenario.setup_done)
def test_is_dpdk_setup_no(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+ self.mock_ssh.from_node().execute.return_value = (0, 'dummy', '')
- result = self.scenario._is_dpdk_setup()
- self.assertFalse(result)
+ self.assertFalse(self.scenario._is_dpdk_setup())
def test_is_dpdk_setup_yes(self):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_ssh.from_node().execute.return_value = (0, '', '')
- result = self.scenario._is_dpdk_setup()
- self.assertTrue(result)
+ self.assertTrue(self.scenario._is_dpdk_setup())
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_first(self, *args):
- # setup() specific mocks
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+ self.mock_ssh.from_node().execute.return_value = (0, 'dummy', '')
self.scenario.dpdk_setup()
self.assertFalse(self.scenario._is_dpdk_setup())
@@ -137,89 +110,26 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
def test_dpdk_setup_next(self, *args):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
+ self.mock_ssh.from_node().execute.return_value = (0, '', '')
self.scenario.dpdk_setup()
self.assertTrue(self.scenario._is_dpdk_setup())
self.assertTrue(self.scenario.dpdk_setup_done)
- @mock.patch.object(time, 'sleep')
- def test_dpdk_setup_runtime_error(self, *args):
-
- # setup specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- self.assertTrue(self.scenario.setup_done)
-
- self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
-
@mock.patch.object(subprocess, 'check_output')
- @mock.patch('time.sleep')
def test_run_ok(self, *args):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
# run() specific mocks
- self.mock_subprocess_call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
result = {}
self.scenario.run(result)
-
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_run_failed_vsperf_execution(self):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, self.scenario.run, result)
-
- def test_run_falied_csv_report(self):
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_subprocess_call().execute.return_value = None
-
- self.scenario.setup()
- self.assertIsNotNone(self.scenario.client)
- self.assertTrue(self.scenario.setup_done)
-
- # run() specific mocks
- self.mock_subprocess_call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, self.scenario.run, result)
-
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail(self, *args):
- self.scenario.setup()
-
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n123456.000\r\n', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
@@ -232,10 +142,22 @@ class VsperfDPDKTestCase(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(subprocess, 'check_output')
def test_vsperf_run_sla_fail_metric_not_collected(self, *args):
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected_faulty_csv(self, *args):
self.scenario.setup()
- self.mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+ self.mock_ssh.from_node().execute.return_value = (
+ 0, 'faulty output not csv', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
self.scenario.run({})
@@ -249,7 +171,7 @@ class VsperfDPDKTestCase(unittest.TestCase):
del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
self.scenario.setup()
- self.mock_ssh.SSH.from_node().execute.return_value = (
+ self.mock_ssh.from_node().execute.return_value = (
0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
with self.assertRaises(y_exc.SLAValidationError) as raised:
diff --git a/yardstick/tests/unit/common/test_ansible_common.py b/yardstick/tests/unit/common/test_ansible_common.py
index 48d8a60c8..bf82f6288 100644
--- a/yardstick/tests/unit/common/test_ansible_common.py
+++ b/yardstick/tests/unit/common/test_ansible_common.py
@@ -12,28 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from __future__ import absolute_import
-
-import os
-import tempfile
+import collections
import shutil
-from collections import defaultdict
+import subprocess
+import tempfile
import mock
-import unittest
-
-from six.moves.configparser import ConfigParser
-from six.moves import StringIO
+from six import moves
+from six.moves import configparser
from yardstick.common import ansible_common
+from yardstick.tests.unit import base as ut_base
-PREFIX = 'yardstick.common.ansible_common'
+class OverwriteDictTestCase(ut_base.BaseUnitTestCase):
-class OverwriteDictTestCase(unittest.TestCase):
def test_overwrite_dict_cfg(self):
- c = ConfigParser(allow_no_value=True)
+ c = configparser.ConfigParser(allow_no_value=True)
d = {
"section_a": "empty_value",
"section_b": {"key_c": "Val_d", "key_d": "VAL_D"},
@@ -43,86 +38,78 @@ class OverwriteDictTestCase(unittest.TestCase):
# Python3 and Python2 convert empty values into None or ''
# we don't really care but we need to compare correctly for unittest
self.assertTrue(c.has_option("section_a", "empty_value"))
- self.assertEqual(sorted(c.items("section_b")), [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
+ self.assertEqual(sorted(c.items("section_b")),
+ [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
self.assertTrue(c.has_option("section_c", "key_c"))
self.assertTrue(c.has_option("section_c", "key_d"))
-class FilenameGeneratorTestCase(unittest.TestCase):
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
+class FilenameGeneratorTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
def test__handle_existing_file(self, _):
- ansible_common.FileNameGenerator._handle_existing_file("/dev/null")
+ ansible_common.FileNameGenerator._handle_existing_file('/dev/null')
def test_get_generator_from_file(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "", "")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', '')
def test_get_generator_from_file_middle(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "",
- "null")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', '', 'null')
def test_get_generator_from_file_prefix(self):
- ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "null",
- "middle")
+ ansible_common.FileNameGenerator.get_generator_from_filename(
+ '/dev/null', '', 'null', 'middle')
-class AnsibleNodeTestCase(unittest.TestCase):
- def test_ansible_node(self):
- ansible_common.AnsibleNode()
+class AnsibleNodeTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_len(self):
- a = ansible_common.AnsibleNode()
- len(a)
+ self.assertEqual(0, len(ansible_common.AnsibleNode()))
def test_ansible_node_repr(self):
- a = ansible_common.AnsibleNode()
- repr(a)
+ self.assertEqual('AnsibleNode<{}>', repr(ansible_common.AnsibleNode()))
def test_ansible_node_iter(self):
- a = ansible_common.AnsibleNode()
- for _ in a:
- pass
+ node = ansible_common.AnsibleNode(data={'a': 1, 'b': 2, 'c': 3})
+ for key in node:
+ self.assertIn(key, ('a', 'b', 'c'))
def test_is_role(self):
- a = ansible_common.AnsibleNode()
- self.assertFalse(a.is_role("", default="foo"))
+ node = ansible_common.AnsibleNode()
+ self.assertFalse(node.is_role('', default='foo'))
def test_ansible_node_get_tuple(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertEqual(a.get_tuple(), ('name', a))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(node.get_tuple(), ('name', node))
def test_gen_inventory_line(self):
- a = ansible_common.AnsibleNode(defaultdict(str))
+ a = ansible_common.AnsibleNode(collections.defaultdict(str))
self.assertEqual(a.gen_inventory_line(), "")
def test_ansible_node_delitem(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- del a['name']
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertEqual(1, len(node))
+ del node['name']
+ self.assertEqual(0, len(node))
def test_ansible_node_getattr(self):
- a = ansible_common.AnsibleNode({"name": "name"})
- self.assertIsNone(getattr(a, "nosuch", None))
+ node = ansible_common.AnsibleNode({'name': 'name'})
+ self.assertIsNone(getattr(node, 'nosuch', None))
-class AnsibleNodeDictTestCase(unittest.TestCase):
- def test_ansible_node_dict(self):
- n = ansible_common.AnsibleNode
- ansible_common.AnsibleNodeDict(n, {})
+class AnsibleNodeDictTestCase(ut_base.BaseUnitTestCase):
def test_ansible_node_dict_len(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- len(a)
+ self.assertEqual(0, len(a))
def test_ansible_node_dict_repr(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
- repr(a)
-
- def test_ansible_node_dict_iter(self):
- n = ansible_common.AnsibleNode
- a = ansible_common.AnsibleNodeDict(n, {})
- for _ in a:
- pass
+ self.assertEqual('{}', repr(a))
def test_ansible_node_dict_get(self):
n = ansible_common.AnsibleNode
@@ -144,12 +131,15 @@ class AnsibleNodeDictTestCase(unittest.TestCase):
["name ansible_ssh_pass=PASS ansible_user=user"])
-class AnsibleCommonTestCase(unittest.TestCase):
- def test_get_timeouts(self):
- self.assertAlmostEqual(ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
+class AnsibleCommonTestCase(ut_base.BaseUnitTestCase):
- def test__init__(self):
- ansible_common.AnsibleCommon({})
+ @staticmethod
+ def _delete_tmpdir(dir):
+ shutil.rmtree(dir)
+
+ def test_get_timeouts(self):
+ self.assertAlmostEqual(
+ ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
def test_reset(self):
a = ansible_common.AnsibleCommon({})
@@ -184,81 +174,68 @@ class AnsibleCommonTestCase(unittest.TestCase):
a.deploy_dir = "d"
self.assertEqual(a.deploy_dir, "d")
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list(self, _):
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_inventory_file(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_inventory_file(self, *args):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon(nodes)
- a.gen_inventory_ini_dict()
- inv_context = a._gen_ansible_inventory_file(d)
- with inv_context:
- c = StringIO()
- inv_context.write_func(c)
- self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test__gen_ansible_playbook_file_list_multiple(self, _, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon(nodes)
+ a.gen_inventory_ini_dict()
+ inv_context = a._gen_ansible_inventory_file(d)
+ with inv_context:
+ c = moves.StringIO()
+ inv_context.write_func(c)
+ self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ def test__gen_ansible_playbook_file_list_multiple(self, *args):
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a._gen_ansible_playbook_file(["a", "b"], d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_do_install_tmp_dir(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a._gen_ansible_playbook_file(["a", "b"], d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(subprocess, 'Popen')
+ @mock.patch.object(moves.builtins, 'open')
+ def test_do_install_tmp_dir(self, _, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.do_install('', d)
- finally:
- os.rmdir(d)
-
- @mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
- @mock.patch('{}.Popen'.format(PREFIX))
- @mock.patch('{}.open'.format(PREFIX))
- def test_execute_ansible_check(self, _, mock_popen, __):
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.do_install('', d)
+
+ @mock.patch.object(tempfile, 'NamedTemporaryFile')
+ @mock.patch.object(moves.builtins, 'open')
+ @mock.patch.object(subprocess, 'Popen')
+ def test_execute_ansible_check(self, mock_popen, *args):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
- try:
- a = ansible_common.AnsibleCommon({})
- a.execute_ansible('', d, ansible_check=True, verbose=True)
- finally:
- os.rmdir(d)
+ self.addCleanup(self._delete_tmpdir, d)
+ a = ansible_common.AnsibleCommon({})
+ a.execute_ansible('', d, ansible_check=True, verbose=True)
def test_get_sut_info(self):
d = tempfile.mkdtemp()
a = ansible_common.AnsibleCommon({})
- try:
+ self.addCleanup(self._delete_tmpdir, d)
+ with mock.patch.object(a, '_exec_get_sut_info_cmd'):
a.get_sut_info(d)
- finally:
- shutil.rmtree(d)
def test_get_sut_info_not_exist(self):
a = ansible_common.AnsibleCommon({})
- try:
+ with self.assertRaises(OSError):
a.get_sut_info('/hello/world')
- except OSError:
- pass
diff --git a/yardstick/tests/unit/common/test_kubernetes_utils.py b/yardstick/tests/unit/common/test_kubernetes_utils.py
index bdc2c12d5..ba6b5f388 100644
--- a/yardstick/tests/unit/common/test_kubernetes_utils.py
+++ b/yardstick/tests/unit/common/test_kubernetes_utils.py
@@ -121,6 +121,23 @@ class DeleteCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
mock_delete_crd.delete_custom_resource_definition.\
assert_called_once_with('name', 'del_obj')
+ @mock.patch.object(client, 'V1DeleteOptions', return_value='del_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api, mock_delobj):
+ mock_delete_crd = mock.Mock()
+ mock_delete_crd.delete_custom_resource_definition.side_effect = rest.ApiException(
+ status=404)
+
+ mock_get_api.return_value = mock_delete_crd
+ kubernetes_utils.delete_custom_resource_definition('name', skip_codes=[404])
+
+ mock_delobj.assert_called_once()
+ mock_delete_crd.delete_custom_resource_definition.assert_called_once_with(
+ 'name', 'del_obj')
+
+ mock_log.info.assert_called_once()
+
class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
@@ -159,7 +176,7 @@ class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
kubernetes_utils.get_custom_resource_definition('kind')
-class CreateNetworkTestCase(base.BaseUnitTestCase):
+class GetNetworkTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
def test_execute_correct(self, mock_get_api):
mock_api = mock.Mock()
@@ -167,29 +184,97 @@ class CreateNetworkTestCase(base.BaseUnitTestCase):
group = 'group.com'
version = mock.Mock()
plural = 'networks'
+ name = 'net_one'
+
+ kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ mock_api.get_cluster_custom_object.assert_called_once_with(
+ group, version, plural, name)
+
+ mock_api.reset_mock()
+ kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ mock_api.get_namespaced_custom_object.assert_called_once_with(
+ group, version, 'default', plural, name)
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.get_cluster_custom_object.side_effect = rest.ApiException(404)
+ mock_api.get_namespaced_custom_object.side_effect = rest.ApiException(404)
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ name = 'net_one'
+
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+ mock_api.reset_mock()
+ network_obj = kubernetes_utils.get_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ self.assertIsNone(network_obj)
+
+
+class CreateNetworkTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_correct(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
body = mock.Mock()
+ name = 'net_one'
kubernetes_utils.create_network(
- constants.SCOPE_CLUSTER, group, version, plural, body)
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
mock_api.create_cluster_custom_object.assert_called_once_with(
group, version, plural, body)
mock_api.reset_mock()
kubernetes_utils.create_network(
- constants.SCOPE_NAMESPACED, group, version, plural, body)
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
mock_api.create_namespaced_custom_object.assert_called_once_with(
group, version, 'default', plural, body)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_network_already_created(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = mock.Mock
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ body = mock.Mock()
+ name = 'net_one'
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_CLUSTER, group, version, plural, body, name)
+ mock_api.create_cluster_custom_object.assert_not_called()
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, body, name)
+ mock_api.create_namespaced_custom_object.assert_not_called()
@mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
- def test_execute_exception(self, mock_get_api):
+ @mock.patch.object(kubernetes_utils, 'get_network')
+ def test_execute_exception(self, mock_get_net, mock_get_api):
+ mock_get_net.return_value = None
mock_api = mock.Mock()
mock_api.create_cluster_custom_object.side_effect = rest.ApiException
mock_get_api.return_value = mock_api
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.create_network(
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
- mock.ANY)
+ mock.ANY, mock.ANY)
class DeleteNetworkTestCase(base.BaseUnitTestCase):
@@ -223,6 +308,19 @@ class DeleteNetworkTestCase(base.BaseUnitTestCase):
constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
mock.ANY)
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_cluster_custom_object.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_network(
+ constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
class DeletePodTestCase(base.BaseUnitTestCase):
@mock.patch.object(kubernetes_utils, 'get_core_api')
@@ -243,10 +341,107 @@ class DeletePodTestCase(base.BaseUnitTestCase):
with self.assertRaises(exceptions.KubernetesApiException):
kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+ @mock.patch.object(kubernetes_utils, 'LOG')
@mock.patch.object(kubernetes_utils, 'get_core_api')
- def test_execute_skip_exception(self, mock_get_api):
+ def test_execute_skip_exception(self, mock_get_api, *args):
mock_api = mock.Mock()
mock_api.delete_namespaced_pod.side_effect = rest.ApiException(status=404)
mock_get_api.return_value = mock_api
kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+
+
+class DeleteServiceTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(client, "V1DeleteOptions")
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api, mock_options):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ mock_options.return_value = None
+ kubernetes_utils.delete_service("name", "default", None)
+ mock_api.delete_namespaced_service.assert_called_once_with(
+ "name", 'default', None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_service.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_skip_exception(self, mock_get_api, *args):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_service.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_service(mock.ANY, skip_codes=[404])
+
+
+class DeleteReplicationControllerTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(
+ "name", "default", body=None)
+
+ mock_api.delete_namespaced_replication_controller.assert_called_once_with(
+ "name", "default", None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=200)
+ )
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_replication_controller.side_effect = (
+ rest.ApiException(status=404)
+ )
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_replication_controller(mock.ANY, skip_codes=[404])
+
+ mock_log.info.assert_called_once()
+
+
+class DeleteConfigMapTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map("name", body=None)
+ mock_api.delete_namespaced_config_map.assert_called_once_with(
+ "name", "default", None
+ )
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ @mock.patch.object(kubernetes_utils, 'LOG')
+ def test_execute_skip_exception(self, mock_log, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_config_map.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_config_map(mock.ANY, skip_codes=[404])
+ mock_log.info.assert_called_once()
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 446afdd38..ef4142148 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -12,12 +12,14 @@ import errno
import importlib
import ipaddress
from itertools import product, chain
-import mock
import os
-import six
-from six.moves import configparser
import socket
import time
+import threading
+
+import mock
+import six
+from six.moves import configparser
import unittest
import yardstick
@@ -194,6 +196,14 @@ class TestMacAddressToHex(ut_base.BaseUnitTestCase):
self.assertEqual(utils.mac_address_to_hex_list("ea:3e:e1:9a:99:e8"),
['0xea', '0x3e', '0xe1', '0x9a', '0x99', '0xe8'])
+ def test_mac_address_to_hex_list_too_short_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("ea:3e:e1:9a")
+
+ def test_mac_address_to_hex_list_no_int_mac(self):
+ with self.assertRaises(exceptions.InvalidMacAddress):
+ utils.mac_address_to_hex_list("invalid_mac")
+
class TranslateToStrTestCase(ut_base.BaseUnitTestCase):
@@ -1113,6 +1123,19 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
u'123:4567:89ab:cdef:123:4567:89ab:cdef/129',
]
+ def test_make_ipv4_address(self):
+ for addr in self.GOOD_IP_V4_ADDRESS_STR_LIST:
+ # test with no mask
+ expected = ipaddress.IPv4Address(addr)
+ self.assertEqual(utils.make_ipv4_address(addr), expected, addr)
+
+ def test_make_ipv4_address_error(self):
+ addr_list = self.INVALID_IP_ADDRESS_STR_LIST +\
+ self.GOOD_IP_V6_ADDRESS_STR_LIST
+ for addr in addr_list:
+ self.assertRaises(Exception, utils.make_ipv4_address, addr)
+
+
def test_safe_ip_address(self):
addr_list = self.GOOD_IP_V4_ADDRESS_STR_LIST
for addr in addr_list:
@@ -1196,6 +1219,20 @@ class TestUtilsIpAddrMethods(ut_base.BaseUnitTestCase):
for value in chain(value_iter, self.INVALID_IP_ADDRESS_STR_LIST):
self.assertEqual(utils.ip_to_hex(value), value)
+ def test_get_mask_from_ip_range_ipv4(self):
+ ip_str = '1.1.1.1'
+ for mask in range(8, 30):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
+ def test_get_mask_from_ip_range_ipv6(self):
+ ip_str = '2001::1'
+ for mask in range(8, 120):
+ ip = ipaddress.ip_network(ip_str + '/' + str(mask), strict=False)
+ result = utils.get_mask_from_ip_range(ip[2], ip[-2])
+ self.assertEqual(mask, result)
+
class SafeDecodeUtf8TestCase(ut_base.BaseUnitTestCase):
@@ -1263,6 +1300,10 @@ class TimerTestCase(ut_base.BaseUnitTestCase):
time.sleep(1.1)
self.assertEqual(2, len(iterations))
+ def test_delta_time_sec(self):
+ with utils.Timer() as timer:
+ self.assertIsInstance(timer.delta_time_sec(), float)
+
class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
@@ -1284,6 +1325,15 @@ class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
utils.wait_until_true(lambda: False, timeout=1, sleep=1,
exception=MyTimeoutException))
+ def _run_thread(self):
+ with self.assertRaises(exceptions.WaitTimeout):
+ utils.wait_until_true(lambda: False, timeout=1, sleep=1)
+
+ def test_timeout_no_main_thread(self):
+ new_thread = threading.Thread(target=self._run_thread)
+ new_thread.start()
+ new_thread.join(timeout=3)
+
class SendSocketCommandTestCase(unittest.TestCase):
@@ -1309,3 +1359,35 @@ class SendSocketCommandTestCase(unittest.TestCase):
mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
mock_socket_obj.close.assert_called_once()
+
+
+class GetPortMacTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_mac(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig |grep HWaddr |grep 99 |awk '{print $5}' ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_mac(self.ssh_client, 99))
+
+
+class GetPortIPTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ssh_client = mock.Mock()
+ self.ssh_client.execute.return_value = (0, 'foo ', '')
+
+ def test_ssh_client_execute_called(self):
+ utils.get_port_ip(self.ssh_client, 99)
+ self.ssh_client.execute.assert_called_once_with(
+ "ifconfig 99 |grep 'inet addr' |awk '{print $2}' |cut -d ':' -f2 ",
+ raise_on_error=True)
+
+ def test_return_value(self):
+ self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 541855aa8..2416aee61 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -28,36 +28,29 @@ TRAFFIC_PARAMETERS = {
'id': 1,
'bidir': 'False',
'duration': 60,
- 'iload': '100',
+ 'rate': 10000.5,
+ 'rate_unit': 'fps',
'outer_l2': {
'framesize': {'64B': '25', '256B': '75'}
},
'outer_l3': {
'count': 512,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.40.20',
'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'dscp': 0,
- 'dstip4': '152.16.40.20',
- 'proto': 'udp',
- 'srcip4': '152.16.100.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '152.16.40.20',
+ 'srcip': '152.16.100.20',
+ 'dstmask': 24,
+ 'srcmask': 24
},
'outer_l4': {
- 'dstport': '2001',
- 'srcport': '1234'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 2001,
+ 'srcport': 1234,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
},
@@ -65,37 +58,29 @@ TRAFFIC_PARAMETERS = {
'id': 2,
'bidir': 'False',
'duration': 60,
- 'iload': '100',
+ 'rate': 75.2,
+ 'rate_unit': '%',
'outer_l2': {
'framesize': {'128B': '35', '1024B': '65'}
},
'outer_l3': {
'count': 1024,
+ 'seed': 1,
'dscp': 0,
- 'dstip4': '152.16.100.20',
'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v4': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
- },
- 'outer_l3v6': {
- 'count': 1024,
- 'dscp': 0,
- 'dstip4': '152.16.100.20',
- 'proto': 'udp',
- 'srcip4': '152.16.40.20',
- 'ttl': 32
+ 'ttl': 32,
+ 'dstip': '2001::10',
+ 'srcip': '2021::10',
+ 'dstmask': 64,
+ 'srcmask': 64
},
'outer_l4': {
- 'dstport': '1234',
- 'srcport': '2001'
+ 'seed': 1,
+ 'count': 1,
+ 'dstport': 1234,
+ 'srcport': 2001,
+ 'srcportmask': 0,
+ 'dstportmask': 0
},
'traffic_type': 'continuous'
}
@@ -108,6 +93,8 @@ class TestIxNextgen(unittest.TestCase):
self.ixnet = mock.Mock()
self.ixnet.execute = mock.Mock()
self.ixnet.getRoot.return_value = 'my_root'
+ self.ixnet_gen = ixnet_api.IxNextgen()
+ self.ixnet_gen._ixnet = self.ixnet
def test_get_config(self):
tg_cfg = {
@@ -145,64 +132,50 @@ class TestIxNextgen(unittest.TestCase):
self.assertEqual(result, expected)
def test__get_config_element_by_flow_group_name(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertEqual('traffic_item/configElement:flow_group_01', output)
def test__get_config_element_by_flow_group_name_no_match(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
- ['fg_01']]
- ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
- output = ixnet_gen._get_config_element_by_flow_group_name(
+ self.ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ self.ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
+ output = self.ixnet_gen._get_config_element_by_flow_group_name(
'flow_group_01')
self.assertIsNone(output)
def test__get_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
+ self.ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name') as \
+ self.ixnet_gen, '_get_config_element_by_flow_group_name') as \
mock_get_cfg_element:
mock_get_cfg_element.return_value = 'cfg_element'
- output = ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
+ output = self.ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
self.assertEqual(['tcp1', 'tcp2'], output)
def test__get_stack_item_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
+ self.ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
def test__get_field_in_stack_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
- output = ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ output = self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
self.assertEqual('field2', output)
def test__get_field_in_stack_item_no_field_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ self.ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
with self.assertRaises(exceptions.IxNetworkFieldNotPresentInStackItem):
- ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
+ self.ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
def test__parse_framesize(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
framesize = {'64B': '75', '512b': '25'}
- output = ixnet_gen._parse_framesize(framesize)
+ output = self.ixnet_gen._parse_framesize(framesize)
self.assertEqual(2, len(output))
self.assertIn([64, 64, 75], output)
self.assertIn([512, 512, 25], output)
@@ -210,55 +183,44 @@ class TestIxNextgen(unittest.TestCase):
@mock.patch.object(IxNetwork, 'IxNet')
def test_connect(self, mock_ixnet):
mock_ixnet.return_value = self.ixnet
- ixnet_gen = ixnet_api.IxNextgen()
- with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ with mock.patch.object(self.ixnet_gen, 'get_config') as mock_config:
mock_config.return_value = {'machine': 'machine_fake',
'port': 'port_fake',
'version': 12345}
- ixnet_gen.connect(mock.ANY)
+ self.ixnet_gen.connect(mock.ANY)
self.ixnet.connect.assert_called_once_with(
'machine_fake', '-port', 'port_fake', '-version', '12345')
mock_config.assert_called_once()
def test_connect_invalid_config_no_machine(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'port': 'port_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_port(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'version': '12345'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_invalid_config_no_version(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={
+ self.ixnet_gen.get_config = mock.Mock(return_value={
'machine': 'machine_fake',
'port': 'port_fake'})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_connect_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.get_config = mock.Mock(return_value={})
- self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet_gen.get_config = mock.Mock(return_value={})
+ self.assertRaises(KeyError, self.ixnet_gen.connect, mock.ANY)
self.ixnet.connect.assert_not_called()
def test_clear_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.clear_config()
+ self.ixnet_gen.clear_config()
self.ixnet.execute.assert_called_once_with('newConfig')
@mock.patch.object(ixnet_api, 'log')
@@ -268,11 +230,9 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['2', '2']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
+ self.ixnet_gen._cfg = config
- self.assertIsNone(ixnet_gen.assign_ports())
+ self.assertIsNone(self.ixnet_gen.assign_ports())
self.assertEqual(self.ixnet.execute.call_count, 2)
self.assertEqual(self.ixnet.commit.call_count, 4)
self.assertEqual(self.ixnet.getAttribute.call_count, 2)
@@ -284,25 +244,19 @@ class TestIxNextgen(unittest.TestCase):
'chassis': '1.1.1.1',
'cards': ['1', '2'],
'ports': ['3', '4']}
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = config
- ixnet_gen.assign_ports()
+ self.ixnet_gen._cfg = config
+ self.ixnet_gen.assign_ports()
mock_log.warning.assert_called()
def test_assign_ports_no_config(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._cfg = {}
- self.assertRaises(KeyError, ixnet_gen.assign_ports)
+ self.ixnet_gen._cfg = {}
+ self.assertRaises(KeyError, self.ixnet_gen.assign_ports)
def test__create_traffic_item(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
self.ixnet.add.return_value = 'my_new_traffic_item'
self.ixnet.remapIds.return_value = ['my_traffic_item_id']
- ixnet_gen._create_traffic_item()
+ self.ixnet_gen._create_traffic_item()
self.ixnet.add.assert_called_once_with(
'my_root/traffic', 'trafficItem')
self.ixnet.setMultiAttribute.assert_called_once_with(
@@ -313,41 +267,35 @@ class TestIxNextgen(unittest.TestCase):
'-trackBy', 'trafficGroupId0')
def test__create_flow_groups(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
- ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
- ixnet_gen._create_flow_groups()
- ixnet_gen.ixnet.add.assert_has_calls([
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
+ self.ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
+ self.ixnet_gen._create_flow_groups()
+ self.ixnet_gen.ixnet.add.assert_has_calls([
mock.call('traffic_item', 'endpointSet'),
mock.call('traffic_item', 'endpointSet')])
- ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
mock.call('endp1', '-name', '1', '-sources', ['1/protocols'],
'-destinations', ['2/protocols']),
mock.call('endp2', '-name', '2', '-sources', ['2/protocols'],
'-destinations', ['1/protocols'])])
def test__append_protocol_to_stack(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
+ self.ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
self.ixnet.execute.assert_called_with(
'append', 'prev_element',
'my_root/traffic/protocolTemplate:"my_protocol"')
def test__setup_config_elements(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
+ self.ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
['cfg_element']]
- with mock.patch.object(ixnet_gen, '_append_procotol_to_stack') as \
+ with mock.patch.object(self.ixnet_gen, '_append_procotol_to_stack') as \
mock_append_proto:
- ixnet_gen._setup_config_elements()
+ self.ixnet_gen._setup_config_elements()
mock_append_proto.assert_has_calls([
mock.call(ixnet_api.PROTO_UDP, 'cfg_element/stack:"ethernet-1"'),
mock.call(ixnet_api.PROTO_IPV4, 'cfg_element/stack:"ethernet-1"')])
- ixnet_gen.ixnet.setAttribute.assert_has_calls([
+ self.ixnet_gen.ixnet.setAttribute.assert_has_calls([
mock.call('cfg_element/frameRateDistribution', '-portDistribution',
'splitRateEvenly'),
mock.call('cfg_element/frameRateDistribution',
@@ -359,150 +307,174 @@ class TestIxNextgen(unittest.TestCase):
def test_create_traffic_model(self, mock__setup_config_elements,
mock__create_flow_groups,
mock__create_traffic_item):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen.create_traffic_model()
+ self.ixnet_gen.create_traffic_model()
mock__create_traffic_item.assert_called_once()
mock__create_flow_groups.assert_called_once()
mock__setup_config_elements.assert_called_once()
def test__update_frame_mac(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item') as \
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item') as \
mock_get_field:
mock_get_field.return_value = 'field_descriptor'
- ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
+ self.ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
mock_get_field.assert_called_once_with('ethernet_descriptor', 'field')
- ixnet_gen.ixnet.setMultiAttribute(
+ self.ixnet_gen.ixnet.setMultiAttribute(
'field_descriptor', '-singleValue', 'mac', '-fieldValue', 'mac',
'-valueType', 'singleValue')
- ixnet_gen.ixnet.commit.assert_called_once()
+ self.ixnet_gen.ixnet.commit.assert_called_once()
def test_update_frame(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value='cfg_element'), \
- mock.patch.object(ixnet_gen, '_update_frame_mac') as \
+ mock.patch.object(self.ixnet_gen, '_update_frame_mac') as \
mock_update_frame, \
- mock.patch.object(ixnet_gen, '_get_stack_item') as \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item') as \
mock_get_stack_item:
mock_get_stack_item.side_effect = [['item1'], ['item2'],
['item3'], ['item4']]
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 50)
- self.assertEqual(6, len(ixnet_gen.ixnet.setMultiAttribute.mock_calls))
+ self.assertEqual(6, len(self.ixnet_gen.ixnet.setMultiAttribute.mock_calls))
self.assertEqual(4, len(mock_update_frame.mock_calls))
def test_update_frame_flow_not_present(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
with mock.patch.object(
- ixnet_gen, '_get_config_element_by_flow_group_name',
+ self.ixnet_gen, '_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_frame(TRAFFIC_PARAMETERS, 40)
def test_get_statistics(self):
- ixnet_gen = ixnet_api.IxNextgen()
port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
- with mock.patch.object(ixnet_gen, '_build_stats_map') as \
+ with mock.patch.object(self.ixnet_gen, '_build_stats_map') as \
mock_build_stats:
- ixnet_gen.get_statistics()
+ self.ixnet_gen.get_statistics()
mock_build_stats.assert_has_calls([
- mock.call(port_statistics, ixnet_gen.PORT_STATS_NAME_MAP),
- mock.call(flow_statistics, ixnet_gen.LATENCY_NAME_MAP)])
+ mock.call(port_statistics, self.ixnet_gen.PORT_STATS_NAME_MAP),
+ mock.call(flow_statistics, self.ixnet_gen.LATENCY_NAME_MAP)])
def test__update_ipv4_address(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_get_field_in_stack_item',
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
return_value='field_desc'):
- ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
- 100, '255.255.255.0', 25)
- ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ self.ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
+ 100, 26, 25)
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
'field_desc', '-seed', 100, '-fixedBits', '192.168.1.1',
- '-randomMask', '255.255.255.0', '-valueType', 'random',
+ '-randomMask', '0.0.0.63', '-valueType', 'random',
'-countValue', 25)
+ def test__update_udp_port(self):
+ with mock.patch.object(self.ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ self.ixnet_gen._update_udp_port(mock.ANY, mock.ANY, 1234,
+ 2, 0, 2)
+
+ self.ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc',
+ '-auto', 'false',
+ '-seed', 1,
+ '-fixedBits', 1234,
+ '-randomMask', 0,
+ '-valueType', 'random',
+ '-countValue', 1)
+
def test_update_ip_packet(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen, '_update_ipv4_address') as \
+ with mock.patch.object(self.ixnet_gen, '_update_ipv4_address') as \
mock_update_add, \
- mock.patch.object(ixnet_gen, '_get_stack_item'), \
- mock.patch.object(ixnet_gen,
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name', return_value='celm'):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
self.assertEqual(4, len(mock_update_add.mock_calls))
def test_update_ip_packet_exception_no_config_element(self):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- with mock.patch.object(ixnet_gen,
+ with mock.patch.object(self.ixnet_gen,
'_get_config_element_by_flow_group_name',
return_value=None):
with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
- ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+ self.ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+
+ def test_update_l4(self):
+ with mock.patch.object(self.ixnet_gen, '_update_udp_port') as \
+ mock_update_udp, \
+ mock.patch.object(self.ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name', return_value='celm'):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(4, len(mock_update_udp.mock_calls))
+
+ def test_update_l4_exception_no_config_element(self):
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ self.ixnet_gen.update_l4(TRAFFIC_PARAMETERS)
+
+ def test_update_l4_exception_no_supported_proto(self):
+ traffic_parameters = {
+ UPLINK: {
+ 'id': 1,
+ 'outer_l3': {
+ 'proto': 'unsupported',
+ },
+ },
+ }
+ with mock.patch.object(self.ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value='celm'):
+ with self.assertRaises(exceptions.IXIAUnsupportedProtocol):
+ self.ixnet_gen.update_l4(traffic_parameters)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic(self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'started', 'started', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_traffic_running(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_to_stop(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'started', 'started', 'started', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(4, self.ixnet_gen._ixnet.execute.call_count)
@mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
def test_start_traffic_wait_for_traffic_start(
self, mock_ixnextgen_get_traffic_state):
- ixnet_gen = ixnet_api.IxNextgen()
- ixnet_gen._ixnet = self.ixnet
- ixnet_gen._ixnet.getList.return_value = [0]
+ self.ixnet_gen._ixnet.getList.return_value = [0]
mock_ixnextgen_get_traffic_state.side_effect = [
'stopped', 'stopped', 'stopped', 'started']
- result = ixnet_gen.start_traffic()
+ result = self.ixnet_gen.start_traffic()
self.assertIsNone(result)
self.ixnet.getList.assert_called_once()
- self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+ self.assertEqual(3, self.ixnet_gen._ixnet.execute.call_count)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
index 55276af58..0dc3e0579 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_base.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -80,9 +80,33 @@ class TrafficProfileConfigTestCase(unittest.TestCase):
tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual({'64B': 100}, tp_config_obj.packet_sizes)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
+ tp_config_obj.duration)
+
+ def test__init_set_duration(self):
+ tp_config = {'traffic_profile': {'duration': 15}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
self.assertEqual(base.TrafficProfileConfig.DEFAULT_SCHEMA,
tp_config_obj.schema)
- self.assertEqual(base.TrafficProfileConfig.DEFAULT_FRAME_RATE,
+ self.assertEqual(float(base.TrafficProfileConfig.DEFAULT_FRAME_RATE),
tp_config_obj.frame_rate)
- self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
- tp_config_obj.duration)
+ self.assertEqual(15, tp_config_obj.duration)
+
+ def test__parse_rate(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ self.assertEqual((100.0, 'fps'), tp_config_obj._parse_rate('100 '))
+ self.assertEqual((200.5, 'fps'), tp_config_obj._parse_rate('200.5'))
+ self.assertEqual((300.8, 'fps'), tp_config_obj._parse_rate('300.8fps'))
+ self.assertEqual((400.2, 'fps'),
+ tp_config_obj._parse_rate('400.2 fps'))
+ self.assertEqual((500.3, '%'), tp_config_obj._parse_rate('500.3%'))
+ self.assertEqual((600.1, '%'), tp_config_obj._parse_rate('600.1 %'))
+
+ def test__parse_rate_exception(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ with self.assertRaises(exceptions.TrafficProfileRate):
+ tp_config_obj._parse_rate('100Fps')
+ with self.assertRaises(exceptions.TrafficProfileRate):
+ tp_config_obj._parse_rate('100 kbps')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 3bb8b9192..6f76eb77c 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -446,6 +446,38 @@ class TestIXIARFC2544Profile(unittest.TestCase):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(t_profile_data)
self.assertEqual(12345678, r_f_c2544_profile.rate)
+ def test__get_ip_and_mask_range(self):
+ ip_range = '1.2.0.2-1.2.255.254'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('1.2.0.2', ip)
+ self.assertEqual(16, mask)
+
+ def test__get_ip_and_mask_single(self):
+ ip_range = '192.168.1.10'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ ip, mask = r_f_c2544_profile._get_ip_and_mask(ip_range)
+ self.assertEqual('192.168.1.10', ip)
+ self.assertIsNone(mask)
+
+ def test__get_fixed_and_mask_range(self):
+ fixed_mask = '8-48'
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(8, fixed)
+ self.assertEqual(48, mask)
+
+ def test__get_fixed_and_mask_single(self):
+ fixed_mask = 1234
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ fixed, mask = r_f_c2544_profile._get_fixed_and_mask(fixed_mask)
+ self.assertEqual(1234, fixed)
+ self.assertEqual(0, mask)
+
def test__get_ixia_traffic_profile_default_args(self):
r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
self.TRAFFIC_PROFILE)
@@ -554,7 +586,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertTrue(completed)
self.assertEqual(23.0, samples['TxThroughput'])
self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
def test_get_drop_percentage_over_drop_percentage(self):
samples = {'iface_name_1':
@@ -571,7 +603,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertFalse(completed)
self.assertEqual(23.0, samples['TxThroughput'])
self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
def test_get_drop_percentage_under_drop_percentage(self):
@@ -589,7 +621,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertFalse(completed)
self.assertEqual(23.0, samples['TxThroughput'])
self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
@mock.patch.object(ixia_rfc2544.LOG, 'info')
@@ -625,5 +657,5 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertTrue(completed)
self.assertEqual(23.0, samples['TxThroughput'])
self.assertEqual(21.0, samples['RxThroughput'])
- self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(0.099651, samples['DropPercentage'])
self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
new file mode 100644
index 000000000..08542b4f1
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_pktgen.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from yardstick.common import utils
+from yardstick.network_services.traffic_profile import pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class TestIXIARFC2544Profile(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._tp_config = {'traffic_profile': {}}
+ self._host = 'localhost'
+ self._port = '12345'
+ self.tp = pktgen.PktgenTrafficProfile(self._tp_config)
+ self.tp.init(self._host, self._port)
+ self._mock_send_socket_command = mock.patch.object(
+ utils, 'send_socket_command', return_value=0)
+ self.mock_send_socket_command = self._mock_send_socket_command.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_send_socket_command.stop()
+
+ def test_start(self):
+ self.tp.start()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.start("0")')
+
+ def test_stop(self):
+ self.tp.stop()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'pktgen.stop("0")')
+
+ def test_rate(self):
+ rate = 75
+ self.tp.rate(rate)
+ command = 'pktgen.set("0", "rate", 75)'
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, command)
+
+ def test_clear_all_stats(self):
+ self.tp.clear_all_stats()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'clr')
+
+ def test_help(self):
+ self.tp.help()
+ self.mock_send_socket_command.assert_called_once_with(
+ self._host, self._port, 'help')
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
index 0cf93f9ae..a4fdc8d04 100644
--- a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -266,6 +266,7 @@ class PortPgIDMapTestCase(base.BaseUnitTestCase):
port_pg_id_map.increase_pg_id()
self.assertEqual([1, 2], port_pg_id_map.get_pg_ids(10))
self.assertEqual([3], port_pg_id_map.get_pg_ids(20))
+ self.assertEqual([], port_pg_id_map.get_pg_ids(30))
def test_increase_pg_id_no_port(self):
port_pg_id_map = rfc2544.PortPgIDMap()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index 01fc19aa0..69a5fb484 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -450,8 +450,8 @@ class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
# duplicate config and add invald action
acl_config = copy.deepcopy(self.ACL_CONFIG)
acl_config['access-list-entries'][0]["actions"].append({"xnat": {}})
- self.assertRaises(exceptions.AclUknownActionTemplate,
- setup_helper.get_flows_config, acl_config)
+ self.assertRaises(exceptions.AclUnknownActionTemplate,
+ setup_helper.get_flows_config, acl_config)
@mock.patch.object(AclApproxSetupEnvSetupEnvHelper, 'get_default_flows')
def test_get_flows_config_invalid_action_param(self, get_default_flows):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
new file mode 100644
index 000000000..d341b970b
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_pktgen.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import uuid
+
+import mock
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+from yardstick.network_services.vnf_generic.vnf import tg_pktgen
+from yardstick.tests.unit import base as ut_base
+
+
+class PktgenTrafficGenTestCase(ut_base.BaseUnitTestCase):
+
+ SERVICE_PORTS = [{'port': constants.LUA_PORT,
+ 'node_port': '34501'}]
+ VNFD = {'mgmt-interface': {'ip': '1.2.3.4',
+ 'service_ports': SERVICE_PORTS},
+ 'vdu': [{'external-interface': 'interface'}],
+ 'benchmark': {'kpi': 'fake_kpi'}
+ }
+
+ def setUp(self):
+ self._id = uuid.uuid1().int
+ self._mock_vnf_consumer = mock.patch.object(vnf_base,
+ 'GenericVNFConsumer')
+ self.mock_vnf_consumer = self._mock_vnf_consumer.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_vnf_consumer.stop()
+
+ def test__init(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ self.assertTrue(isinstance(tg, (vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint)))
+
+ def test_run_traffic(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ mock_tp = mock.Mock()
+ with mock.patch.object(tg, '_is_running', return_value=True):
+ tg.run_traffic(mock_tp)
+
+ mock_tp.init.assert_called_once_with(tg._node_ip, tg._lua_node_port)
+
+ def test__get_lua_node_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': constants.LUA_PORT,
+ 'node_port': '12345'}]
+ self.assertEqual(12345, tg._get_lua_node_port(service_ports))
+
+ def test__get_lua_node_port_no_lua_port(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ service_ports = [{'port': '333'}]
+ self.assertIsNone(tg._get_lua_node_port(service_ports))
+
+ def test__is_running(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile'):
+ self.assertTrue(tg._is_running())
+
+ def test__is_running_exception(self):
+ tg = tg_pktgen.PktgenTrafficGen('name1', self.VNFD, self._id)
+ with mock.patch.object(tg, '_traffic_profile') as mock_tp:
+ mock_tp.help.side_effect = exceptions.PktgenActionError()
+ self.assertFalse(tg._is_running())
diff --git a/yardstick/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py
index a73a4a132..2d5c4a26f 100644
--- a/yardstick/tests/unit/orchestrator/test_kubernetes.py
+++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py
@@ -529,9 +529,10 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._version = 'version'
net_obj._plural = 'plural'
net_obj._template = 'template'
+ net_obj._name = 'fake_name'
net_obj.create()
mock_create_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'template')
+ 'scope', 'group', 'version', 'plural', 'template', 'fake_name')
@mock.patch.object(kubernetes_utils, 'delete_network')
def test_delete(self, mock_delete_network):
@@ -543,7 +544,7 @@ class NetworkObjectTestCase(base.BaseUnitTestCase):
net_obj._name = 'name'
net_obj.delete()
mock_delete_network.assert_called_once_with(
- 'scope', 'group', 'version', 'plural', 'name')
+ 'scope', 'group', 'version', 'plural', 'name', skip_codes=[404])
class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
@@ -611,7 +612,8 @@ class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
def test_delete(self, mock_delete_service):
nodeport_object = kubernetes.ServiceNodePortObject('fake_name')
nodeport_object.delete()
- mock_delete_service.assert_called_once_with('fake_name-service')
+ mock_delete_service.assert_called_once_with('fake_name-service',
+ skip_codes=[404])
class KubernetesTemplate(base.BaseUnitTestCase):
diff --git a/yardstick/tests/unit/service/test_environment.py b/yardstick/tests/unit/service/test_environment.py
index be4882e30..779e6eaa0 100644
--- a/yardstick/tests/unit/service/test_environment.py
+++ b/yardstick/tests/unit/service/test_environment.py
@@ -9,9 +9,8 @@
import mock
-from yardstick.common.exceptions import UnsupportedPodFormatError
-from yardstick.service.environment import Environment
-from yardstick.service.environment import AnsibleCommon
+from yardstick.common import exceptions
+from yardstick.service import environment
from yardstick.tests.unit import base as ut_base
@@ -31,15 +30,17 @@ class EnvironmentTestCase(ut_base.BaseUnitTestCase):
]
}
- with mock.patch.object(AnsibleCommon, 'gen_inventory_ini_dict'), \
- mock.patch.object(AnsibleCommon, 'get_sut_info',
- return_value={'node1': {}}):
- env = Environment(pod=pod_info)
+ with mock.patch.object(environment.AnsibleCommon,
+ 'gen_inventory_ini_dict'), \
+ mock.patch.object(environment.AnsibleCommon, 'get_sut_info',
+ return_value={'node1': {}}), \
+ mock.patch.object(environment.Environment, '_format_sut_info'):
+ env = environment.Environment(pod=pod_info)
env.get_sut_info()
def test_get_sut_info_pod_str(self):
pod_info = 'nodes'
- env = Environment(pod=pod_info)
- with self.assertRaises(UnsupportedPodFormatError):
+ env = environment.Environment(pod=pod_info)
+ with self.assertRaises(exceptions.UnsupportedPodFormatError):
env.get_sut_info()
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index b727e821d..71929f1a2 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -617,3 +617,26 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put_file('a', 'b')
mock_put_sftp.assert_called_once()
+
+ def test_execute(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(return_value=0)
+ exit_code, _, _ = auto_connect_ssh.execute('')
+ self.assertEqual(exit_code, 0)
+
+ def _mock_run(self, *args, **kwargs):
+ if args[0] == 'ls':
+ if kwargs.get('raise_on_error'):
+ raise exceptions.SSHError(error_msg='Command error')
+ return 1
+ return 0
+
+ def test_execute_command_error(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ auto_connect_ssh._client = mock.Mock()
+ auto_connect_ssh.run = mock.Mock(side_effect=self._mock_run)
+ self.assertRaises(exceptions.SSHError, auto_connect_ssh.execute, 'ls',
+ raise_on_error=True)
+ exit_code, _, _ = auto_connect_ssh.execute('ls')
+ self.assertNotEqual(exit_code, 0)