summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc2
-rw-r--r--ansible/install.yaml2
-rw-r--r--ansible/roles/configure_uwsgi/templates/yardstick.ini.j22
-rw-r--r--ansible/roles/download_dpdk/tasks/main.yml14
-rw-r--r--ansible/roles/infra_create_vms/tasks/main.yml11
-rw-r--r--ansible/roles/infra_create_vms/tasks/validate_vms.yml54
-rw-r--r--ansible/roles/infra_prepare_vms/tasks/main.yml3
-rw-r--r--ansible/roles/install_dpdk/tasks/main.yml9
-rw-r--r--ansible/roles/install_samplevnf/vars/main.yml2
-rw-r--r--ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml2
-rw-r--r--ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml2
-rw-r--r--docker/Dockerfile2
-rw-r--r--docker/Dockerfile.aarch64.patch7
-rw-r--r--samples/ping_bottlenecks.yaml5
-rw-r--r--samples/vnf_samples/nsut/acl/acl_rules.yaml.sample (renamed from samples/vnf_samples/nsut/acl/acl_rules.yaml)10
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_iterationipc.yaml96
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc087.yaml6
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml6
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml6
-rw-r--r--yardstick/benchmark/contexts/__init__.py20
-rw-r--r--yardstick/benchmark/contexts/dummy.py7
-rw-r--r--yardstick/benchmark/contexts/heat.py3
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py112
-rw-r--r--yardstick/benchmark/contexts/node.py4
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py9
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py9
-rw-r--r--yardstick/benchmark/core/task.py36
-rwxr-xr-xyardstick/benchmark/runners/base.py44
-rw-r--r--yardstick/benchmark/runners/iteration_ipc.py205
-rw-r--r--yardstick/benchmark/scenarios/base.py4
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py35
-rw-r--r--yardstick/common/constants.py12
-rw-r--r--yardstick/common/exceptions.py68
-rw-r--r--yardstick/common/httpClient.py4
-rw-r--r--yardstick/common/kubernetes_utils.py97
-rw-r--r--yardstick/common/messaging/__init__.py27
-rw-r--r--yardstick/common/messaging/consumer.py11
-rw-r--r--yardstick/common/messaging/payloads.py20
-rw-r--r--yardstick/common/messaging/producer.py13
-rw-r--r--yardstick/common/utils.py22
-rw-r--r--yardstick/network_services/collector/subscriber.py11
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py9
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py7
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py92
-rw-r--r--yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py8
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py7
-rw-r--r--yardstick/network_services/vnf_generic/vnf/router_vnf.py6
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py39
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ixload.py16
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ping.py10
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_prox.py6
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py10
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py8
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py9
-rw-r--r--yardstick/network_services/vnf_generic/vnf/udp_replay.py9
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vfw_vnf.py7
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py11
-rw-r--r--yardstick/orchestrator/kubernetes.py351
-rw-r--r--yardstick/tests/functional/common/messaging/test_messaging.py22
-rw-r--r--yardstick/tests/functional/common/test_utils.py38
-rw-r--r--yardstick/tests/functional/network_services/__init__.py0
-rw-r--r--yardstick/tests/functional/network_services/vnf_generic/__init__.py0
-rw-r--r--yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py0
-rw-r--r--yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py103
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py3
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py3
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_kubernetes.py179
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py8
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_base.py56
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py136
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py6
-rw-r--r--yardstick/tests/unit/common/messaging/test_payloads.py36
-rw-r--r--yardstick/tests/unit/common/messaging/test_producer.py7
-rw-r--r--yardstick/tests/unit/common/test_kubernetes_utils.py252
-rw-r--r--yardstick/tests/unit/common/test_utils.py27
-rw-r--r--yardstick/tests/unit/network_services/collector/test_subscriber.py43
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py10
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py33
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py126
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py16
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py21
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py21
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py96
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py159
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py30
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py28
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py27
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py17
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py32
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py37
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py29
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py22
-rw-r--r--yardstick/tests/unit/orchestrator/test_kubernetes.py438
94 files changed, 3028 insertions, 654 deletions
diff --git a/.coveragerc b/.coveragerc
index f67853192..3ad7f68a6 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -14,4 +14,4 @@ source = yardstick
[report]
ignore_errors = True
precision = 3
-omit = yardstick/vTC/*
+omit = yardstick/vTC/*,yardstick/tests/functional/*
diff --git a/ansible/install.yaml b/ansible/install.yaml
index e93232d06..ae9f8587f 100644
--- a/ansible/install.yaml
+++ b/ansible/install.yaml
@@ -88,7 +88,7 @@
- role: install_samplevnf
vnf_name: FW
- role: install_samplevnf
- vnf_name: CGNATP
+ vnf_name: CGNAPT
# build shared DPDK for collectd only, required DPDK downloaded already
- install_dpdk_shared
- install_rabbitmq
diff --git a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
index 044f42acb..495febb19 100644
--- a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
+++ b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
@@ -12,7 +12,7 @@ chmod-socket = 666
callable = app_wrapper
enable-threads = true
close-on-exec = 1
-daemonize = {{ log_dir }}uwsgi.log
+logto = {{ log_dir }}/uwsgi.log
socket = {{ socket_file }}
{# If virtual environment, we need to add:
virtualenv = <virtual_env> #}
diff --git a/ansible/roles/download_dpdk/tasks/main.yml b/ansible/roles/download_dpdk/tasks/main.yml
index bea3febed..55b466cb7 100644
--- a/ansible/roles/download_dpdk/tasks/main.yml
+++ b/ansible/roles/download_dpdk/tasks/main.yml
@@ -37,8 +37,20 @@
path: "{{ dpdk_dest }}/{{ dpdk_file }}"
state: absent
+- name: find unzipped DPDK folder
+ find:
+ paths: "{{ dpdk_dest }}"
+ patterns: "^dpdk-.*{{ dpdk_version }}$"
+ file_type: directory
+ use_regex: yes
+ register: dpdk_folder_match
+
+- fail:
+ msg: "Cannot find unzipped DPDK folder or more than one found"
+ when: dpdk_folder_match.matched != 1
+
- set_fact:
- dpdk_path: "{{ dpdk_dest }}/{{ dpdk_unarchive }}"
+ dpdk_path: "{{ dpdk_folder_match.files[0].path }}"
- set_fact:
RTE_SDK: "{{ dpdk_path }}"
diff --git a/ansible/roles/infra_create_vms/tasks/main.yml b/ansible/roles/infra_create_vms/tasks/main.yml
index 4d47f44ff..b422a9205 100644
--- a/ansible/roles/infra_create_vms/tasks/main.yml
+++ b/ansible/roles/infra_create_vms/tasks/main.yml
@@ -32,3 +32,14 @@
loop_control:
loop_var: node_item
with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Create list of dictionaries with vm name, ip address
+ set_fact:
+ vm_name_ip: "{{ vm_name_ip|default([]) + [{item.hostname: item.interfaces[1].ip}] }}"
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Make sure VM is reachable
+ include_tasks: validate_vms.yml
+ loop_control:
+ loop_var: name_ip
+ with_items: "{{ vm_name_ip }}"
diff --git a/ansible/roles/infra_create_vms/tasks/validate_vms.yml b/ansible/roles/infra_create_vms/tasks/validate_vms.yml
new file mode 100644
index 000000000..ce5eff211
--- /dev/null
+++ b/ansible/roles/infra_create_vms/tasks/validate_vms.yml
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Validate if VM is responding
+ wait_for:
+ host: "{{ item.value }}"
+ port: 22
+ timeout: 10
+ with_dict: "{{ name_ip }}"
+ register: result
+ ignore_errors: yes
+
+- name: Stop VM if it is not responding
+ virt:
+ name: "{{ item.key }}"
+ command: destroy
+ when: result is failed
+ with_dict: "{{ name_ip }}"
+
+- name: Wait for VM is being stopped
+ pause:
+ seconds: 10
+ when: result is failed
+
+- name: Start VM
+ virt:
+ name: "{{ item.key }}"
+ command: start
+ when: result is failed
+ with_dict: "{{ name_ip }}"
+
+- name: Validate if VM is responding
+ wait_for:
+ host: "{{ item.value }}"
+ port: 22
+ timeout: 10
+ when: result is failed
+ with_dict: "{{ name_ip }}"
+ register: result
+
+- fail:
+ msg: "FAILED on {{ name_ip }}"
+ when: result is failed
diff --git a/ansible/roles/infra_prepare_vms/tasks/main.yml b/ansible/roles/infra_prepare_vms/tasks/main.yml
index d7ed08511..4e0d9c373 100644
--- a/ansible/roles/infra_prepare_vms/tasks/main.yml
+++ b/ansible/roles/infra_prepare_vms/tasks/main.yml
@@ -100,6 +100,3 @@
secondary_ip: "{{ item.interfaces[1].ip }}"
when: item.hostname == 'yardstickvm'
with_items: "{{ infra_deploy_vars.nodes }}"
-
-- name: Workaround, not all VMs are ready by that time
- pause: seconds=20
diff --git a/ansible/roles/install_dpdk/tasks/main.yml b/ansible/roles/install_dpdk/tasks/main.yml
index 5bcfb50b1..f89a43cae 100644
--- a/ansible/roles/install_dpdk/tasks/main.yml
+++ b/ansible/roles/install_dpdk/tasks/main.yml
@@ -114,9 +114,16 @@
path: "{{ INSTALL_BIN_PATH }}"
state: directory
+- set_fact:
+ major: "{{ dpdk_version.split('.')[0] }}"
+ minor: "{{ dpdk_version.split('.')[1] }}"
+
+- set_fact:
+ major_minor_version: "{{ major }}.{{ minor }}"
+
- name: copy dpdk-devbind.py to correct location
copy:
- src: "{{ dpdk_devbind_usertools if dpdk_version|float >= 17.02 else dpdk_devbind_tools }}"
+ src: "{{ dpdk_devbind_usertools if major_minor_version|float >= 17.02 else dpdk_devbind_tools }}"
dest: "{{ INSTALL_BIN_PATH }}/dpdk-devbind.py"
remote_src: yes
force: yes
diff --git a/ansible/roles/install_samplevnf/vars/main.yml b/ansible/roles/install_samplevnf/vars/main.yml
index c92a9b09f..e2a37377a 100644
--- a/ansible/roles/install_samplevnf/vars/main.yml
+++ b/ansible/roles/install_samplevnf/vars/main.yml
@@ -48,13 +48,11 @@ vnf_build_dirs:
ACL: vACL
FW: vFW
CGNAPT: vCGNAPT
- PE: vPE
UDP_Replay: UDP_Replay
PROX: DPPD-PROX
vnf_app_names:
ACL: vACL
FW: vFW
CGNAPT: vCGNAPT
- PE: vPE
UDP_Replay: UDP_Replay
PROX: prox
diff --git a/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml
index d858257b1..3a29a8a90 100644
--- a/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml
+++ b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml
@@ -45,8 +45,6 @@
vnf_name: FW
- role: install_samplevnf
vnf_name: CGNAPT
- - role: install_samplevnf
- vnf_name: PE
# build shared DPDK for collectd only, required DPDK downloaded already
- install_dpdk_shared
- install_rabbitmq
diff --git a/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml b/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
index aab5a741c..b27933bd1 100644
--- a/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
+++ b/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
@@ -55,8 +55,6 @@
vnf_name: FW
- role: install_samplevnf
vnf_name: CGNAPT
- - role: install_samplevnf
- vnf_name: PE
# build shared DPDK for collectd only, required DPDK downloaded already
- install_dpdk_shared
- install_rabbitmq
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 097bc3c3f..71ce6b584 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -51,6 +51,8 @@ EXPOSE 5000 5672
ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR}
ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR}
+# For developers: when `docker build ...` is running from YARDSTICK_REPO_DIR, please change
+# path `./exec_tests.sh` -> `./docker/exec_tests.sh``.
COPY ./exec_tests.sh /usr/local/bin/
ENV NSB_DIR="/opt/nsb_bin"
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index ef41cba03..472310f96 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -8,7 +8,7 @@ Signed-off-by: ting wu <ting.wu@enea.com>
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 62ea0d0..f2f41771 100644
+index 71ce6b58..952d0f78 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,9 +7,9 @@
@@ -42,6 +42,5 @@ index 62ea0d0..f2f41771 100644
+ADD http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img ${IMAGE_DIR}
+ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img ${IMAGE_DIR}
- COPY ./exec_tests.sh /usr/local/bin/
-
-
+ # For developers: when `docker build ...` is running from YARDSTICK_REPO_DIR, please change
+ # path `./exec_tests.sh` -> `./docker/exec_tests.sh``.
diff --git a/samples/ping_bottlenecks.yaml b/samples/ping_bottlenecks.yaml
index 625d4501a..096d70e67 100644
--- a/samples/ping_bottlenecks.yaml
+++ b/samples/ping_bottlenecks.yaml
@@ -19,6 +19,7 @@ run_in_parallel: true
{% set cpu_num = cpu_num or 1 %}
{% set ram_num = ram_num or 512 %}
{% set disk_num = disk_num or 7 %}
+{% set dpdk_enabled = dpdk_enabled or False %}
scenarios:
{% for num in range(stack_num) %}
@@ -43,6 +44,10 @@ contexts:
vcpus: {{cpu_num}}
ram: {{ram_num}}
disk: {{disk_num}}
+ {% if dpdk_enabled %}
+ extra_specs:
+ hw:mem_page_size: "large"
+ {% endif %}
user: ubuntu
placement_groups:
diff --git a/samples/vnf_samples/nsut/acl/acl_rules.yaml b/samples/vnf_samples/nsut/acl/acl_rules.yaml.sample
index 49066e924..4c425d44f 100644
--- a/samples/vnf_samples/nsut/acl/acl_rules.yaml
+++ b/samples/vnf_samples/nsut/acl/acl_rules.yaml.sample
@@ -14,7 +14,12 @@
---
access-list-entries:
-
- actions: [drop,count]
+ actions:
+ - count
+ - nat:
+ port: 1
+ - fwd:
+ port: 0
matches:
destination-ipv4-network: 152.16.40.20/24
destination-port-range:
@@ -24,6 +29,9 @@ access-list-entries:
source-port-range:
lower-port: 0
upper-port: 65535
+ protocol-mask: 255
+ protocol: 127
+ priority: 1
rule-name: rule1588
-
actions: [drop,count]
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_iterationipc.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_iterationipc.yaml
new file mode 100644
index 000000000..184ed6881
--- /dev/null
+++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_iterationipc.yaml
@@ -0,0 +1,96 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+{% set provider = provider or none %}
+{% set physical_networks = physical_networks or ['physnet1', 'physnet2'] %}
+{% set segmentation_id = segmentation_id or none %}
+
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
+ topology: vfw-tg-topology.yaml
+ nodes:
+ tg__0: trafficgen_1.yardstick
+ vnf__0: vnf.yardstick
+ options:
+ hugepages_gb: 8
+ framesize:
+ uplink: {64B: 100}
+ downlink: {64B: 100}
+ flow:
+ src_ip: [{'tg__0': 'xe0'}]
+ dst_ip: [{'tg__0': 'xe1'}]
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+ vnf__0:
+ rules: acl_1rule.yaml
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+ runner:
+ type: IterationIPC
+ iterations: 10
+ timeout: 60
+context:
+ # put node context first, so we don't HEAT deploy if node has errors
+ name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 12288
+ disk: 6
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ trafficgen_1:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ xe0:
+ cidr: '10.0.2.0/24'
+ gateway_ip: 'null'
+ {% if provider %}
+ provider: {{ provider }}
+ physical_network: {{ physical_networks[0] }}
+ {% if segmentation_id %}
+ segmentation_id: {{ segmentation_id }}
+ {% endif %}
+ {% endif %}
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ xe1:
+ cidr: '10.0.3.0/24'
+ gateway_ip: 'null'
+ {% if provider %}
+ provider: {{ provider }}
+ physical_network: {{ physical_networks[1] }}
+ {% if segmentation_id %}
+ segmentation_id: {{ segmentation_id }}
+ {% endif %}
+ {% endif %}
+ port_security_enabled: False
+ enable_dhcp: 'false'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc087.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc087.yaml
index d7441836d..13125ade8 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc087.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc087.yaml
@@ -74,7 +74,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
@@ -172,7 +172,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
type: "GeneralHA"
@@ -239,7 +239,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
contexts:
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
index 85ec510df..f2996bcc6 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
@@ -73,7 +73,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
type: "GeneralHA"
@@ -170,7 +170,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
type: "GeneralHA"
@@ -237,7 +237,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
contexts:
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
index a034471aa..27e78a451 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
@@ -75,7 +75,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
@@ -208,7 +208,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
-
type: "GeneralHA"
@@ -274,7 +274,7 @@ scenarios:
type: Duration
duration: 1
sla:
- action: monitor
+ action: assert
contexts:
diff --git a/yardstick/benchmark/contexts/__init__.py b/yardstick/benchmark/contexts/__init__.py
index e69de29bb..d50f08cc3 100644
--- a/yardstick/benchmark/contexts/__init__.py
+++ b/yardstick/benchmark/contexts/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CONTEXT_DUMMY = "Dummy"
+CONTEXT_HEAT = "Heat"
+CONTEXT_KUBERNETES = "Kubernetes"
+CONTEXT_NODE = "Node"
+CONTEXT_STANDALONEOVSDPDK = "StandaloneOvsDpdk"
+CONTEXT_STANDALONESRIOV = "StandaloneSriov"
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index 36e8854e8..9faca4c63 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -7,17 +7,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
-class DummyContext(Context):
+class DummyContext(base.Context):
"""Class that handle dummy info.
This class is also used to test the abstract class Context because it
provides a minimal concrete implementation of a subclass.
"""
- __context_type__ = "Dummy"
+ __context_type__ = contexts.CONTEXT_DUMMY
def deploy(self):
"""Don't need to deploy"""
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index ac85b6ffe..f118ffc32 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -16,6 +16,7 @@ from collections import OrderedDict
import ipaddress
import pkg_resources
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.model import Network
from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
@@ -47,7 +48,7 @@ def h_join(*args):
class HeatContext(Context):
"""Class that represents a context in the logical model"""
- __context_type__ = "Heat"
+ __context_type__ = contexts.CONTEXT_HEAT
def __init__(self):
self.stack = None
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 916f4b12f..7534c4ea5 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -7,26 +7,31 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
+import collections
import logging
-import time
import pkg_resources
+import time
import paramiko
-from yardstick.benchmark.contexts.base import Context
-from yardstick.orchestrator import kubernetes
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as ctx_base
+from yardstick.benchmark.contexts import model
+from yardstick.common import constants
+from yardstick.common import exceptions
from yardstick.common import kubernetes_utils as k8s_utils
from yardstick.common import utils
+from yardstick.orchestrator import kubernetes
+
LOG = logging.getLogger(__name__)
BITS_LENGTH = 2048
-class KubernetesContext(Context):
+class KubernetesContext(ctx_base.Context):
"""Class that handle nodes info"""
- __context_type__ = "Kubernetes"
+ __context_type__ = contexts.CONTEXT_KUBERNETES
def __init__(self):
self.ssh_key = ''
@@ -38,15 +43,21 @@ class KubernetesContext(Context):
def init(self, attrs):
super(KubernetesContext, self).init(attrs)
+ networks = attrs.get('networks', {})
self.template = kubernetes.KubernetesTemplate(self.name, attrs)
self.ssh_key = '{}-key'.format(self.name)
self.key_path = self._get_key_path()
self.public_key_path = '{}.pub'.format(self.key_path)
+ self._networks = collections.OrderedDict(
+ (net_name, model.Network(net_name, self, network))
+ for net_name, network in networks.items())
def deploy(self):
LOG.info('Creating ssh key')
self._set_ssh_key()
+ self._create_crd()
+ self._create_networks()
LOG.info('Launch containers')
self._create_rcs()
self._create_services()
@@ -60,6 +71,8 @@ class KubernetesContext(Context):
self._delete_rcs()
self._delete_pods()
self._delete_services()
+ self._delete_networks()
+ self._delete_crd()
super(KubernetesContext, self).undeploy()
@@ -86,7 +99,7 @@ class KubernetesContext(Context):
obj.delete()
def _create_rcs(self):
- for obj in self.template.k8s_objs:
+ for obj in self.template.rc_objs:
self._create_rc(obj.get_template())
def _create_rc(self, template):
@@ -104,7 +117,27 @@ class KubernetesContext(Context):
self._delete_pod(pod)
def _delete_pod(self, pod):
- k8s_utils.delete_pod(pod)
+ k8s_utils.delete_pod(pod, skip_codes=[404])
+
+ def _create_crd(self):
+ LOG.info('Create Custom Resource Definition elements')
+ for crd in self.template.crd:
+ crd.create()
+
+ def _delete_crd(self):
+ LOG.info('Delete Custom Resource Definition elements')
+ for crd in self.template.crd:
+ crd.delete()
+
+ def _create_networks(self): # pragma: no cover
+ LOG.info('Create Network elements')
+ for net in self.template.network_objs:
+ net.create()
+
+ def _delete_networks(self): # pragma: no cover
+ LOG.info('Create Network elements')
+ for net in self.template.network_objs:
+ net.delete()
def _get_key_path(self):
task_id = self.name.split('-')[-1]
@@ -131,28 +164,71 @@ class KubernetesContext(Context):
utils.remove_file(self.public_key_path)
def _get_server(self, name):
- service_name = '{}-service'.format(name)
- service = k8s_utils.get_service_by_name(service_name).ports[0]
-
- host = {
- 'name': service.name,
+ node_ports = self._get_service_ports(name)
+ for sn_port in (sn_port for sn_port in node_ports
+ if sn_port['port'] == constants.SSH_PORT):
+ node_port = sn_port['node_port']
+ break
+ else:
+ raise exceptions.KubernetesSSHPortNotDefined()
+
+ return {
+ 'name': name,
'ip': self._get_node_ip(),
'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip,
- 'ssh_port': service.node_port,
+ 'ssh_port': node_port,
'user': 'root',
'key_filename': self.key_path,
+ 'interfaces': self._get_interfaces(name),
+ 'service_ports': node_ports
}
- return host
+ def _get_network(self, net_name):
+ """Retrieves the network object, searching by name
+
+ :param net_name: (str) replication controller name
+ :return: (dict) network information (name)
+ """
+ network = self._networks.get(net_name)
+ if not network:
+ return
+ return {'name': net_name}
+
+ def _get_interfaces(self, rc_name):
+ """Retrieves the network list of a replication controller
+
+ :param rc_name: (str) replication controller name
+ :return: (dict) names and information of the networks used in this
+ replication controller; those networks must be defined in the
+ Kubernetes cluster
+ """
+ rc = self.template.get_rc_by_name(rc_name)
+ if not rc:
+ return {}
+ return {name: {'network_name': name,
+ 'local_mac': None,
+ 'local_ip': None}
+ for name in rc.networks}
def _get_node_ip(self):
return k8s_utils.get_node_list().items[0].status.addresses[0].address
- def _get_network(self, attr_name):
- return None
-
def _get_physical_nodes(self):
return None
def _get_physical_node_for_server(self, server_name):
return None
+
+ def _get_service_ports(self, name):
+ service_name = '{}-service'.format(name)
+ service = k8s_utils.get_service_by_name(service_name)
+ if not service:
+ raise exceptions.KubernetesServiceObjectNotDefined()
+ ports = []
+ for port in service.ports:
+ ports.append({'name': port.name,
+ 'node_port': port.node_port,
+ 'port': port.port,
+ 'protocol': port.protocol,
+ 'target_port': port.target_port})
+ return ports
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index d3af98920..d233e02ae 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -7,7 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
import subprocess
import os
import collections
@@ -18,6 +17,7 @@ import six
import pkg_resources
from yardstick import ssh
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
from yardstick.common.ansible_common import AnsibleCommon
@@ -31,7 +31,7 @@ DEFAULT_DISPATCH = 'script'
class NodeContext(Context):
"""Class that handle nodes info"""
- __context_type__ = "Node"
+ __context_type__ = contexts.CONTEXT_NODE
def __init__(self):
self.file_path = None
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index e6a6f99e4..88ad598c3 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -20,11 +20,12 @@ import re
import time
from yardstick import ssh
-from yardstick.network_services.utils import get_nsb_option
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.common import exceptions
from yardstick.network_services import utils
+from yardstick.network_services.utils import get_nsb_option
LOG = logging.getLogger(__name__)
@@ -32,12 +33,12 @@ LOG = logging.getLogger(__name__)
MAIN_BRIDGE = 'br0'
-class OvsDpdkContext(Context):
+class OvsDpdkContext(base.Context):
""" This class handles OVS standalone nodes - VM running on Non-Managed NFVi
Configuration: ovs_dpdk
"""
- __context_type__ = "StandaloneOvsDpdk"
+ __context_type__ = contexts.CONTEXT_STANDALONEOVSDPDK
SUPPORTED_OVS_TO_DPDK_MAP = {
'2.6.0': '16.07.1',
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 05fac0218..3da12a9a8 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -18,20 +18,21 @@ import logging
import collections
from yardstick import ssh
-from yardstick.network_services.utils import get_nsb_option
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
+from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.utils import PciAddress
LOG = logging.getLogger(__name__)
-class SriovContext(Context):
+class SriovContext(base.Context):
""" This class handles SRIOV standalone nodes - VM running on Non-Managed NFVi
Configuration: sr-iov
"""
- __context_type__ = "StandaloneSriov"
+ __context_type__ = contexts.CONTEXT_STANDALONESRIOV
def __init__(self):
self.file_path = None
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index f050e8d0f..1dfd6c31e 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -22,7 +22,8 @@ import collections
from six.moves import filter
from jinja2 import Environment
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as base_context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.constants import CONF_FILE
from yardstick.common.yaml_loader import yaml_load
@@ -359,7 +360,7 @@ class Task(object): # pragma: no cover
if is_ip_addr(target):
context_cfg['target'] = {"ipaddr": target}
else:
- context_cfg['target'] = Context.get_server(target)
+ context_cfg['target'] = base_context.Context.get_server(target)
if self._is_same_context(cfg["host"], target):
context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"]
else:
@@ -367,7 +368,7 @@ class Task(object): # pragma: no cover
host_name = server_name.get('host', scenario_cfg.get('host'))
if host_name:
- context_cfg['host'] = Context.get_server(host_name)
+ context_cfg['host'] = base_context.Context.get_server(host_name)
for item in [server_name, scenario_cfg]:
try:
@@ -384,7 +385,8 @@ class Task(object): # pragma: no cover
ip_list.append(target)
context_cfg['target'] = {}
else:
- context_cfg['target'] = Context.get_server(target)
+ context_cfg['target'] = (
+ base_context.Context.get_server(target))
if self._is_same_context(scenario_cfg["host"],
target):
ip_list.append(context_cfg["target"]["private_ip"])
@@ -412,7 +414,8 @@ class Task(object): # pragma: no cover
with attribute name mapping when using external heat templates
"""
for context in self.contexts:
- if context.__context_type__ not in {"Heat", "Kubernetes"}:
+ if context.__context_type__ not in {contexts.CONTEXT_HEAT,
+ contexts.CONTEXT_KUBERNETES}:
continue
host = context._get_server(host_attr)
@@ -553,19 +556,19 @@ class TaskParser(object): # pragma: no cover
elif "contexts" in cfg:
context_cfgs = cfg["contexts"]
else:
- context_cfgs = [{"type": "Dummy"}]
+ context_cfgs = [{"type": contexts.CONTEXT_DUMMY}]
- contexts = []
+ _contexts = []
for cfg_attrs in context_cfgs:
cfg_attrs['task_id'] = task_id
# default to Heat context because we are testing OpenStack
- context_type = cfg_attrs.get("type", "Heat")
- context = Context.get(context_type)
+ context_type = cfg_attrs.get("type", contexts.CONTEXT_HEAT)
+ context = base_context.Context.get(context_type)
context.init(cfg_attrs)
# Update the name in case the context has used the name_suffix
cfg_attrs['name'] = context.name
- contexts.append(context)
+ _contexts.append(context)
run_in_parallel = cfg.get("run_in_parallel", False)
@@ -578,17 +581,17 @@ class TaskParser(object): # pragma: no cover
# relative to task path
scenario["task_path"] = os.path.dirname(self.path)
- self._change_node_names(scenario, contexts)
+ self._change_node_names(scenario, _contexts)
# TODO we need something better here, a class that represent the file
return {'scenarios': cfg['scenarios'],
'run_in_parallel': run_in_parallel,
'meet_precondition': meet_precondition,
- 'contexts': contexts,
+ 'contexts': _contexts,
'rendered': rendered}
@staticmethod
- def _change_node_names(scenario, contexts):
+ def _change_node_names(scenario, _contexts):
"""Change the node names in a scenario, depending on the context config
The nodes (VMs or physical servers) are referred in the context section
@@ -627,7 +630,7 @@ class TaskParser(object): # pragma: no cover
target: target-k8s
"""
def qualified_name(name):
- for context in contexts:
+ for context in _contexts:
host_name, ctx_name = context.split_host_name(name)
if context.assigned_name == ctx_name:
return '{}{}{}'.format(host_name,
@@ -718,7 +721,8 @@ def _is_background_scenario(scenario):
def parse_nodes_with_context(scenario_cfg):
"""parse the 'nodes' fields in scenario """
# ensure consistency in node instantiation order
- return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename]))
+ return OrderedDict((nodename, base_context.Context.get_server(
+ scenario_cfg["nodes"][nodename]))
for nodename in sorted(scenario_cfg["nodes"]))
@@ -734,7 +738,7 @@ def get_networks_from_nodes(nodes):
network_name = interface.get('network_name')
if not network_name:
continue
- network = Context.get_network(network_name)
+ network = base_context.Context.get_network(network_name)
if network:
networks[network['name']] = network
return networks
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index fbdf6c281..af2557441 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -12,27 +12,26 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
+# This is a modified copy of ``rally/rally/benchmark/runners/base.py``
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/runners/base.py
-
-from __future__ import absolute_import
-
+import importlib
import logging
import multiprocessing
import subprocess
import time
import traceback
-from subprocess import CalledProcessError
-
-import importlib
-from six.moves.queue import Empty
+from six import moves
-import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.common import utils
from yardstick.dispatcher.base import Base as DispatcherBase
+
log = logging.getLogger(__name__)
@@ -41,7 +40,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except CalledProcessError:
+ except subprocess.CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -245,7 +244,7 @@ class Runner(object):
log.debug("output_queue size %s", self.output_queue.qsize())
try:
result.update(self.output_queue.get(True, 1))
- except Empty:
+ except moves.queue.Empty:
pass
return result
@@ -259,7 +258,7 @@ class Runner(object):
log.debug("result_queue size %s", self.result_queue.qsize())
try:
one_record = self.result_queue.get(True, 1)
- except Empty:
+ except moves.queue.Empty:
pass
else:
if output_in_influxdb:
@@ -272,3 +271,22 @@ class Runner(object):
dispatchers = DispatcherBase.get(self.config['output_config'])
dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
+
+
+class RunnerProducer(producer.MessagingProducer):
+ """Class implementing the message producer for runners"""
+
+ def __init__(self, _id):
+ super(RunnerProducer, self).__init__(messaging.TOPIC_RUNNER, _id=_id)
+
+ def start_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_START_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
+
+ def stop_iteration(self, version=1, data=None):
+ data = {} if not data else data
+ self.send_message(
+ messaging.RUNNER_METHOD_STOP_ITERATION,
+ payloads.RunnerPayload(version=version, data=data))
diff --git a/yardstick/benchmark/runners/iteration_ipc.py b/yardstick/benchmark/runners/iteration_ipc.py
new file mode 100644
index 000000000..a0335fdc7
--- /dev/null
+++ b/yardstick/benchmark/runners/iteration_ipc.py
@@ -0,0 +1,205 @@
+# Copyright 2018: Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A runner that runs a configurable number of times before it returns. Each
+ iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs
+ to listen the messages from are given in the scenario "setup" method.
+"""
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common import exceptions
+from yardstick.common import messaging
+from yardstick.common import utils
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+
+
+LOG = logging.getLogger(__name__)
+
+QUEUE_PUT_TIMEOUT = 10
+ITERATION_TIMEOUT = 180
+
+
+class RunnerIterationIPCEndpoint(consumer.NotificationHandler):
+ """Endpoint class for ``RunnerIterationIPCConsumer``"""
+
+ def tg_method_started(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_STARTED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)},
+ QUEUE_PUT_TIMEOUT)
+
+ def tg_method_finished(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_FINISHED,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+ def tg_method_iteration(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'id': ctxt['id'],
+ 'action': messaging.TG_METHOD_ITERATION,
+ 'payload': payloads.TrafficGeneratorPayload.dict_to_obj(
+ kwargs)})
+
+
+class RunnerIterationIPCConsumer(consumer.MessagingConsumer):
+ """MQ consumer for "IterationIPC" runner"""
+
+ def __init__(self, _id, ctx_ids):
+ self._id = _id
+ self._queue = multiprocessing.Queue()
+ endpoints = [RunnerIterationIPCEndpoint(_id, ctx_ids, self._queue)]
+ super(RunnerIterationIPCConsumer, self).__init__(
+ messaging.TOPIC_TG, ctx_ids, endpoints)
+ self._kpi_per_id = {ctx: [] for ctx in ctx_ids}
+ self.iteration_index = None
+
+ def is_all_kpis_received_in_iteration(self):
+ """Check if all producers registered have sent the ITERATION msg
+
+ During the present iteration, all producers (traffic generators) must
+ start and finish the traffic injection, and at the end of the traffic
+ injection a TG_METHOD_ITERATION must be sent. This function will check
+ all KPIs in the present iteration are received. E.g.:
+ self.iteration_index = 2
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1]} --> return False
+
+ self._kpi_per_id = {
+ 'ctx1': [kpi0, kpi1, kpi2],
+ 'ctx2': [kpi0, kpi1, kpi2]} --> return True
+ """
+ while not self._queue.empty():
+ msg = self._queue.get(True, 1)
+ if msg['action'] == messaging.TG_METHOD_ITERATION:
+ id_iter_list = self._kpi_per_id[msg['id']]
+ id_iter_list.append(msg['payload'].kpi)
+
+ return all(len(id_iter_list) == self.iteration_index
+ for id_iter_list in self._kpi_per_id.values())
+
+
+def _worker_process(queue, cls, method_name, scenario_cfg,
+ context_cfg, aborted, output_queue): # pragma: no cover
+ runner_cfg = scenario_cfg['runner']
+
+ timeout = runner_cfg.get('timeout', ITERATION_TIMEOUT)
+ iterations = runner_cfg.get('iterations', 1)
+ run_step = runner_cfg.get('run_step', 'setup,run,teardown')
+ LOG.info('Worker START. Iterations %d times, class %s', iterations, cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ benchmark = cls(scenario_cfg, context_cfg)
+ method = getattr(benchmark, method_name)
+
+ if 'setup' not in run_step:
+ raise exceptions.RunnerIterationIPCSetupActionNeeded()
+ benchmark.setup()
+ producer_ctxs = benchmark.get_mq_ids()
+ if not producer_ctxs:
+ raise exceptions.RunnerIterationIPCNoCtxs()
+
+ mq_consumer = RunnerIterationIPCConsumer(os.getpid(), producer_ctxs)
+ mq_consumer.start_rpc_server()
+ mq_producer = base_runner.RunnerProducer(scenario_cfg['task_id'])
+
+ iteration_index = 1
+ while 'run' in run_step:
+ LOG.debug('runner=%(runner)s seq=%(sequence)s START',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+ data = {}
+ result = None
+ errors = ''
+ mq_consumer.iteration_index = iteration_index
+ mq_producer.start_iteration()
+
+ try:
+ utils.wait_until_true(
+ mq_consumer.is_all_kpis_received_in_iteration,
+ timeout=timeout, sleep=2)
+ result = method(data)
+ except Exception: # pylint: disable=broad-except
+ errors = traceback.format_exc()
+ LOG.exception(errors)
+
+ mq_producer.stop_iteration()
+
+ if result:
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+ benchmark_output = {'timestamp': time.time(),
+ 'sequence': iteration_index,
+ 'data': data,
+ 'errors': errors}
+ queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
+
+ LOG.debug('runner=%(runner)s seq=%(sequence)s END',
+ {'runner': runner_cfg['runner_id'],
+ 'sequence': iteration_index})
+
+ iteration_index += 1
+ if iteration_index > iterations or aborted.is_set():
+ LOG.info('"IterationIPC" worker END')
+ break
+
+ if 'teardown' in run_step:
+ try:
+ benchmark.teardown()
+ except Exception:
+ LOG.exception('Exception during teardown process')
+ mq_consumer.stop_rpc_server()
+ raise SystemExit(1)
+
+ LOG.debug('Data queue size = %s', queue.qsize())
+ LOG.debug('Output queue size = %s', output_queue.qsize())
+ mq_consumer.stop_rpc_server()
+
+
+class IterationIPCRunner(base_runner.Runner):
+ """Run a scenario for a configurable number of times.
+
+ Each iteration has a configurable timeout. The loop control depends on the
+ feedback received from the running VNFs. The context PIDs from the VNFs to
+ listen the messages from are given in the scenario "setup" method.
+ """
+ __execution_type__ = 'IterationIPC'
+
+ def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = '{}-{}-{}'.format(
+ self.__execution_type__, scenario_cfg.get('type'), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, scenario_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 30ac1bea9..90a87ac29 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -119,3 +119,7 @@ class Scenario(object):
except TypeError:
dic[k] = v
return dic
+
+ def get_mq_ids(self): # pragma: no cover
+ """Return stored MQ producer IDs, if defined"""
+ pass
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index eb62d6222..7a11d3e76 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -50,7 +50,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
__scenario_type__ = "NSPerf"
- def __init__(self, scenario_cfg, context_cfg): # Yardstick API
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
super(NetworkServiceTestCase, self).__init__()
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -61,6 +61,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.traffic_profile = None
self.node_netdevs = {}
self.bin_path = get_nsb_option('bin_path', '')
+ self._mq_ids = []
def _get_ip_flow_range(self, ip_start_range):
@@ -168,18 +169,18 @@ class NetworkServiceTestCase(scenario_base.Scenario):
topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
- def _find_vnf_name_from_id(self, vnf_id):
+ def _find_vnf_name_from_id(self, vnf_id): # pragma: no cover
return next((vnfd["vnfd-id-ref"]
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
- def _find_vnfd_from_vnf_idx(self, vnf_id):
+ def _find_vnfd_from_vnf_idx(self, vnf_id): # pragma: no cover
return next((vnfd
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
- def find_node_if(nodes, name, if_name, vld_id):
+ def find_node_if(nodes, name, if_name, vld_id): # pragma: no cover
try:
# check for xe0, xe1
intf = nodes[name]["interfaces"][if_name]
@@ -272,14 +273,14 @@ class NetworkServiceTestCase(scenario_base.Scenario):
node0_if["peer_intf"] = node1_copy
node1_if["peer_intf"] = node0_copy
- def _update_context_with_topology(self):
+ def _update_context_with_topology(self): # pragma: no cover
for vnfd in self.topology["constituent-vnfd"]:
vnf_idx = vnfd["member-vnf-index"]
vnf_name = self._find_vnf_name_from_id(vnf_idx)
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- def _generate_pod_yaml(self):
+ def _generate_pod_yaml(self): # pragma: no cover
context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
# convert OrderedDict to a list
# pod.yaml nodes is a list
@@ -293,7 +294,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
explicit_start=True)
@staticmethod
- def _serialize_node(node):
+ def _serialize_node(node): # pragma: no cover
new_node = copy.deepcopy(node)
# name field is required
# remove context suffix
@@ -315,7 +316,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self._update_context_with_topology()
@classmethod
- def get_vnf_impl(cls, vnf_model_id):
+ def get_vnf_impl(cls, vnf_model_id): # pragma: no cover
""" Find the implementing class from vnf_model["vnf"]["name"] field
:param vnf_model_id: parsed vnfd model ID field
@@ -343,7 +344,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
raise exceptions.IncorrectConfig(error_msg=message)
@staticmethod
- def create_interfaces_from_node(vnfd, node):
+ def create_interfaces_from_node(vnfd, node): # pragma: no cover
ext_intfs = vnfd["vdu"][0]["external-interface"] = []
# have to sort so xe0 goes first
for intf_name, intf in sorted(node['interfaces'].items()):
@@ -405,17 +406,14 @@ class NetworkServiceTestCase(scenario_base.Scenario):
pass
self.create_interfaces_from_node(vnfd, node)
vnf_impl = self.get_vnf_impl(vnfd['id'])
- vnf_instance = vnf_impl(node_name, vnfd)
+ vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
vnfs.append(vnf_instance)
self.vnfs = vnfs
return vnfs
def setup(self):
- """ Setup infrastructure, provission VNFs & start traffic
-
- :return:
- """
+ """Setup infrastructure, provission VNFs & start traffic"""
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure()
# 1a. Load VNF models
@@ -457,6 +455,11 @@ class NetworkServiceTestCase(scenario_base.Scenario):
for traffic_gen in traffic_runners:
LOG.info("Starting traffic on %s", traffic_gen.name)
traffic_gen.run_traffic(self.traffic_profile)
+ self._mq_ids.append(traffic_gen.get_mq_producer_id())
+
+ def get_mq_ids(self): # pragma: no cover
+ """Return stored MQ producer IDs"""
+ return self._mq_ids
def run(self, result): # yardstick API
""" Yardstick calls run() at intervals defined in the yaml and
@@ -495,10 +498,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
LOG.exception("")
raise RuntimeError("Error in teardown")
- def pre_run_wait_time(self, time_seconds):
+ def pre_run_wait_time(self, time_seconds): # pragma: no cover
"""Time waited before executing the run method"""
time.sleep(time_seconds)
- def post_run_wait_time(self, time_seconds):
+ def post_run_wait_time(self, time_seconds): # pragma: no cover
"""Time waited after executing the run method"""
pass
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 1ebd32509..4ed40f8af 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -6,7 +6,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
import errno
import os
@@ -14,11 +13,9 @@ from functools import reduce
import pkg_resources
-# this module must only import other modules that do
-# not require loggers to be created, so this cannot
-# include yardstick.common.utils
from yardstick.common.yaml_loader import yaml_load
+
dirname = os.path.dirname
abspath = os.path.abspath
join = os.path.join
@@ -171,3 +168,10 @@ TESTSUITE_PRE = 'opnfv_'
# OpenStack cloud default config parameters
OS_CLOUD_DEFAULT_CONFIG = {'verify': False}
+
+# Kubernetes
+SCOPE_NAMESPACED = 'Namespaced'
+SCOPE_CLUSTER = 'Cluster'
+
+# VNF definition
+SSH_PORT = 22
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 935c77866..cbb294989 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -14,6 +14,8 @@
from oslo_utils import excutils
+from yardstick.common import constants
+
class ProcessExecutionError(RuntimeError):
def __init__(self, message, returncode):
@@ -191,6 +193,15 @@ class TaskRenderError(YardstickException):
message = 'Failed to render template:\n%(input_task)s'
+class RunnerIterationIPCSetupActionNeeded(YardstickException):
+ message = ('IterationIPC needs the "setup" action to retrieve the VNF '
+ 'handling processes PIDs to receive the messages sent')
+
+
+class RunnerIterationIPCNoCtxs(YardstickException):
+ message = 'Benchmark "setup" action did not return any VNF process PID'
+
+
class TimerTimeout(YardstickException):
message = 'Timer timeout expired, %(timeout)s seconds'
@@ -199,10 +210,67 @@ class WaitTimeout(YardstickException):
message = 'Wait timeout while waiting for condition'
+class KubernetesApiException(YardstickException):
+ message = ('Kubernetes API errors. Action: %(action)s, '
+ 'resource: %(resource)s')
+
+
+class KubernetesConfigFileNotFound(YardstickException):
+ message = 'Config file (%s) not found' % constants.K8S_CONF_FILE
+
+
class KubernetesTemplateInvalidVolumeType(YardstickException):
message = 'No valid "volume" types present in %(volume)s'
+class KubernetesSSHPortNotDefined(YardstickException):
+ message = 'Port 22 needs to be defined'
+
+
+class KubernetesServiceObjectNotDefined(YardstickException):
+ message = 'ServiceObject is not defined'
+
+
+class KubernetesServiceObjectDefinitionError(YardstickException):
+ message = ('Kubernetes Service object definition error, missing '
+ 'parameters: %(missing_parameters)s')
+
+
+class KubernetesServiceObjectNameError(YardstickException):
+ message = ('Kubernetes Service object name "%(name)s" does not comply'
+ 'naming convention')
+
+
+class KubernetesCRDObjectDefinitionError(YardstickException):
+ message = ('Kubernetes Custom Resource Definition Object error, missing '
+ 'parameters: %(missing_parameters)s')
+
+
+class KubernetesNetworkObjectDefinitionError(YardstickException):
+ message = ('Kubernetes Network object definition error, missing '
+ 'parameters: %(missing_parameters)s')
+
+
+class KubernetesNetworkObjectKindMissing(YardstickException):
+ message = 'Kubernetes kind "Network" is not defined'
+
+
+class KubernetesWrongRestartPolicy(YardstickException):
+ message = 'Restart policy "%(rpolicy)s" is not valid'
+
+
+class KubernetesContainerPortNotDefined(YardstickException):
+ message = 'Container port not defined in "%(port)s"'
+
+
+class KubernetesContainerWrongImagePullPolicy(YardstickException):
+ message = 'Image pull policy must be "Always", "IfNotPresent" or "Never"'
+
+
+class KubernetesContainerCommandType(YardstickException):
+ message = '"args" and "command" must be string or list of strings'
+
+
class ScenarioCreateNetworkError(YardstickException):
message = 'Create Neutron Network Scenario failed'
diff --git a/yardstick/common/httpClient.py b/yardstick/common/httpClient.py
index 54f7be670..5b7831144 100644
--- a/yardstick/common/httpClient.py
+++ b/yardstick/common/httpClient.py
@@ -26,10 +26,11 @@ class HttpClient(object):
while True:
try:
response = requests.post(url, data=data, headers=headers)
+ response.raise_for_status()
result = response.json()
logger.debug('The result is: %s', result)
return result
- except Exception:
+ except Exception: # pylint: disable=broad-except
if time.time() > t_end:
logger.exception('')
raise
@@ -37,4 +38,5 @@ class HttpClient(object):
def get(self, url):
response = requests.get(url)
+ response.raise_for_status()
return response.json()
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index ee8e8edcd..35e590f2b 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -13,6 +13,8 @@ from kubernetes import config
from kubernetes.client.rest import ApiException
from yardstick.common import constants as consts
+from yardstick.common import exceptions
+
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
@@ -22,12 +24,26 @@ def get_core_api(): # pragma: no cover
try:
config.load_kube_config(config_file=consts.K8S_CONF_FILE)
except IOError:
- LOG.exception('config file not found')
- raise
-
+ raise exceptions.KubernetesConfigFileNotFound()
return client.CoreV1Api()
+def get_extensions_v1beta_api():
+ try:
+ config.load_kube_config(config_file=consts.K8S_CONF_FILE)
+ except IOError:
+ raise exceptions.KubernetesConfigFileNotFound()
+ return client.ApiextensionsV1beta1Api()
+
+
+def get_custom_objects_api():
+ try:
+ config.load_kube_config(config_file=consts.K8S_CONF_FILE)
+ except IOError:
+ raise exceptions.KubernetesConfigFileNotFound()
+ return client.CustomObjectsApi()
+
+
def get_node_list(**kwargs): # pragma: no cover
core_v1_api = get_core_api()
try:
@@ -120,8 +136,10 @@ def delete_replication_controller(name,
def delete_pod(name,
namespace='default',
wait=False,
+ skip_codes=None,
**kwargs): # pragma: no cover
# pylint: disable=unused-argument
+ skip_codes = [] if not skip_codes else skip_codes
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -130,9 +148,12 @@ def delete_pod(name,
namespace,
body,
**kwargs)
- except ApiException:
- LOG.exception('Delete pod failed')
- raise
+ except ApiException as e:
+ if e.status in skip_codes:
+ LOG.info(e.reason)
+ else:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Pod')
def read_pod(name,
@@ -187,6 +208,70 @@ def delete_config_map(name,
raise
+def create_custom_resource_definition(body):
+ api = get_extensions_v1beta_api()
+ body_obj = client.V1beta1CustomResourceDefinition(
+ spec=body['spec'], metadata=body['metadata'])
+ try:
+ api.create_custom_resource_definition(body_obj)
+ except ValueError:
+ # NOTE(ralonsoh): bug in kubernetes-client/python 6.0.0
+ # https://github.com/kubernetes-client/python/issues/491
+ pass
+ except ApiException:
+ raise exceptions.KubernetesApiException(
+ action='create', resource='CustomResourceDefinition')
+
+
+def delete_custom_resource_definition(name):
+ api = get_extensions_v1beta_api()
+ body_obj = client.V1DeleteOptions()
+ try:
+ api.delete_custom_resource_definition(name, body_obj)
+ except ApiException:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='CustomResourceDefinition')
+
+
+def get_custom_resource_definition(kind):
+ api = get_extensions_v1beta_api()
+ try:
+ crd_list = api.list_custom_resource_definition()
+ for crd_obj in (crd_obj for crd_obj in crd_list.items
+ if crd_obj.spec.names.kind == kind):
+ return crd_obj
+ return None
+ except ApiException:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='CustomResourceDefinition')
+
+
+def create_network(scope, group, version, plural, body, namespace='default'):
+ api = get_custom_objects_api()
+ try:
+ if scope == consts.SCOPE_CLUSTER:
+ api.create_cluster_custom_object(group, version, plural, body)
+ else:
+ api.create_namespaced_custom_object(
+ group, version, namespace, plural, body)
+ except ApiException:
+ raise exceptions.KubernetesApiException(
+ action='create', resource='Custom Object: Network')
+
+
+def delete_network(scope, group, version, plural, name, namespace='default'):
+ api = get_custom_objects_api()
+ try:
+ if scope == consts.SCOPE_CLUSTER:
+ api.delete_cluster_custom_object(group, version, plural, name, {})
+ else:
+ api.delete_namespaced_custom_object(
+ group, version, namespace, plural, name, {})
+ except ApiException:
+ raise exceptions.KubernetesApiException(
+ action='delete', resource='Custom Object: Network')
+
+
def get_pod_list(namespace='default'): # pragma: no cover
core_v1_api = get_core_api()
try:
diff --git a/yardstick/common/messaging/__init__.py b/yardstick/common/messaging/__init__.py
index f0f012ec3..bd700d9b1 100644
--- a/yardstick/common/messaging/__init__.py
+++ b/yardstick/common/messaging/__init__.py
@@ -1,14 +1,3 @@
-# Copyright (c) 2018 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@@ -28,9 +17,17 @@ TRANSPORT_URL = (MQ_SERVICE + '://' + MQ_USER + ':' + MQ_PASS + '@' + SERVER +
RPC_SERVER_EXECUTOR = 'threading'
# Topics.
-RUNNER = 'runner'
+TOPIC_TG = 'topic_traffic_generator'
+TOPIC_RUNNER = 'topic_runner'
# Methods.
-# RUNNER methods:
-RUNNER_INFO = 'runner_info'
-RUNNER_LOOP = 'runner_loop'
+# Traffic generator consumers methods. Names must match the methods implemented
+# in the consumer endpoint class.
+TG_METHOD_STARTED = 'tg_method_started'
+TG_METHOD_FINISHED = 'tg_method_finished'
+TG_METHOD_ITERATION = 'tg_method_iteration'
+
+# Runner consumers methods. Names must match the methods implemented in the
+# consumer endpoint class.
+RUNNER_METHOD_START_ITERATION = "runner_method_start_iteration"
+RUNNER_METHOD_STOP_ITERATION = "runner_method_stop_iteration"
diff --git a/yardstick/common/messaging/consumer.py b/yardstick/common/messaging/consumer.py
index 24ec6f184..7ce9bdaf7 100644
--- a/yardstick/common/messaging/consumer.py
+++ b/yardstick/common/messaging/consumer.py
@@ -29,9 +29,10 @@ LOG = logging.getLogger(__name__)
class NotificationHandler(object):
"""Abstract class to define a endpoint object for a MessagingConsumer"""
- def __init__(self, _id, ctx_pids, queue):
+ def __init__(self, _id, ctx_ids, queue):
+ super(NotificationHandler, self).__init__()
self._id = _id
- self._ctx_pids = ctx_pids
+ self._ctx_ids = ctx_ids
self._queue = queue
@@ -43,11 +44,11 @@ class MessagingConsumer(object):
the messages published by a `MessagingNotifier`.
"""
- def __init__(self, topic, pids, endpoints, fanout=True):
+ def __init__(self, topic, ids, endpoints, fanout=True):
"""Init function.
:param topic: (string) MQ exchange topic
- :param pids: (list of int) list of PIDs of the processes implementing
+ :param ids: (list of int) list of IDs of the processes implementing
the MQ Notifier which will be in the message context
:param endpoints: (list of class) list of classes implementing the
methods (see `MessagingNotifier.send_message) used by
@@ -58,7 +59,7 @@ class MessagingConsumer(object):
:returns: `MessagingConsumer` class object
"""
- self._pids = pids
+ self._ids = ids
self._endpoints = endpoints
self._transport = oslo_messaging.get_rpc_transport(
cfg.CONF, url=messaging.TRANSPORT_URL)
diff --git a/yardstick/common/messaging/payloads.py b/yardstick/common/messaging/payloads.py
index d29d79808..8ede1e58e 100644
--- a/yardstick/common/messaging/payloads.py
+++ b/yardstick/common/messaging/payloads.py
@@ -51,3 +51,23 @@ class Payload(object):
def dict_to_obj(cls, _dict):
"""Returns a Payload object built from the dictionary elements"""
return cls(**_dict)
+
+
+class TrafficGeneratorPayload(Payload):
+ """Base traffic generator payload class"""
+ REQUIRED_FIELDS = {
+ 'version', # (str) version of the payload transmitted.
+ 'iteration', # (int) iteration index during the traffic injection,
+ # starting from 1.
+ 'kpi' # (dict) collection of KPIs collected from the traffic
+ # injection. The content will depend on the generator and the
+ # traffic type.
+ }
+
+
+class RunnerPayload(Payload):
+ """Base runner payload class"""
+ REQUIRED_FIELDS = {
+ 'version', # (str) version of the payload transmitted.
+ 'data' # (dict) generic container of data to be used if needed.
+ }
diff --git a/yardstick/common/messaging/producer.py b/yardstick/common/messaging/producer.py
index b6adc0c17..aadab649d 100644
--- a/yardstick/common/messaging/producer.py
+++ b/yardstick/common/messaging/producer.py
@@ -34,18 +34,18 @@ class MessagingProducer(object):
messages in a message queue.
"""
- def __init__(self, topic, pid=os.getpid(), fanout=True):
+ def __init__(self, topic, _id=os.getpid(), fanout=True):
"""Init function.
:param topic: (string) MQ exchange topic
- :param pid: (int) PID of the process implementing this MQ Notifier
+ :param id: (int) ID of the process implementing this MQ Notifier
:param fanout: (bool) MQ clients may request that a copy of the message
be delivered to all servers listening on a topic by
setting fanout to ``True``, rather than just one of them
:returns: `MessagingNotifier` class object
"""
self._topic = topic
- self._pid = pid
+ self._id = _id
self._fanout = fanout
self._transport = oslo_messaging.get_rpc_transport(
cfg.CONF, url=messaging.TRANSPORT_URL)
@@ -65,6 +65,11 @@ class MessagingProducer(object):
consumer endpoints
:param payload: (subclass `Payload`) payload content
"""
- self._notifier.cast({'pid': self._pid},
+ self._notifier.cast({'id': self._id},
method,
**payload.obj_to_dict())
+
+ @property
+ def id(self):
+ """Return MQ producer ID"""
+ return self._id
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index f9fe0e336..85cecc714 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -527,3 +527,25 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
if exception and issubclass(exception, Exception):
raise exception # pylint: disable=raising-bad-type
raise exceptions.WaitTimeout
+
+
+def send_socket_command(host, port, command):
+ """Send a string command to a specific port in a host
+
+ :param host: (str) ip or hostname of the host
+ :param port: (int) port number
+ :param command: (str) command to send
+ :return: 0 if success, error number if error
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ ret = 0
+ try:
+ err_number = sock.connect_ex((host, int(port)))
+ if err_number != 0:
+ return err_number
+ sock.sendall(six.b(command))
+ except Exception: # pylint: disable=broad-except
+ ret = 1
+ finally:
+ sock.close()
+ return ret
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 937c266a6..0c6d97771 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module implements stub for publishing results in yardstick format."""
+
import logging
from yardstick.network_services.nfvi.resource import ResourceProfile
@@ -31,12 +31,13 @@ class Collector(object):
self.bin_path = get_nsb_option('bin_path', '')
self.resource_profiles = {}
- for ctx_name, nodes in contexts_nodes.items():
- for node in (node for node in nodes if node.get('collectd')):
+ for ctx_name, nodes in ((ctx_name, nodes) for (ctx_name, nodes)
+ in contexts_nodes.items() if nodes):
+ for node in (node for node in nodes
+ if node and node.get('collectd')):
name = ".".join([node['name'], ctx_name])
self.resource_profiles.update(
- {name: ResourceProfile.make_from_node(node, timeout)}
- )
+ {name: ResourceProfile.make_from_node(node, timeout)})
def start(self):
for resource in self.resource_profiles.values():
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
index 393f60f7c..74deeecb5 100644
--- a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -166,9 +166,10 @@ class IxNextgen(object): # pragma: no cover
:return: list of paired frame sizes and weights
"""
weighted_range_pairs = []
- for size, weight in framesize.items():
- weighted_range_pairs.append(int(size.upper().replace('B', '')))
- weighted_range_pairs.append(int(weight))
+ for size, weight in ((s, w) for (s, w) in framesize.items()
+ if int(w) != 0):
+ size = int(size.upper().replace('B', ''))
+ weighted_range_pairs.append([size, size, int(weight)])
return weighted_range_pairs
def iter_over_get_lists(self, x1, x2, y2, offset=0):
@@ -339,7 +340,7 @@ class IxNextgen(object): # pragma: no cover
"percentLineRate" no used)
- Frame size: custom IMIX [1] definition; a list of packet size in
bytes and the weight. E.g.:
- [64, 10, 128, 15, 512, 5]
+ [[64, 64, 10], [128, 128, 15], [512, 512, 5]]
[1] https://en.wikipedia.org/wiki/Internet_Mix
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index 1357f6b26..8e9bc87e1 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -246,8 +246,9 @@ class AclApproxVnf(SampleVNF):
'packets_dropped': 2,
}
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = AclApproxSetupEnvSetupEnvHelper
-
- super(AclApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
+ super(AclApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index 9ceac3167..0fb310075 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -11,13 +11,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class implementation for generic vnf implementation """
import abc
import logging
import six
+from yardstick.common import messaging
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
from yardstick.network_services.helpers.samplevnf_helper import PortPairs
@@ -138,6 +141,70 @@ class VnfdHelper(dict):
yield port_name, port_num
+class TrafficGeneratorProducer(producer.MessagingProducer):
+ """Class implementing the message producer for traffic generators
+
+ This message producer must be instantiated in the process created
+ "run_traffic" process.
+ """
+ def __init__(self, _id):
+ super(TrafficGeneratorProducer, self).__init__(messaging.TOPIC_TG,
+ _id=_id)
+
+ def tg_method_started(self, version=1):
+ """Send a message to inform the traffic generation has started"""
+ self.send_message(
+ messaging.TG_METHOD_STARTED,
+ payloads.TrafficGeneratorPayload(version=version, iteration=0,
+ kpi={}))
+
+ def tg_method_finished(self, version=1):
+ """Send a message to inform the traffic generation has finished"""
+ self.send_message(
+ messaging.TG_METHOD_FINISHED,
+ payloads.TrafficGeneratorPayload(version=version, iteration=0,
+ kpi={}))
+
+ def tg_method_iteration(self, iteration, version=1, kpi=None):
+ """Send a message, with KPI, once an iteration has finished"""
+ kpi = {} if kpi is None else kpi
+ self.send_message(
+ messaging.TG_METHOD_ITERATION,
+ payloads.TrafficGeneratorPayload(version=version,
+ iteration=iteration, kpi=kpi))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class GenericVNFEndpoint(consumer.NotificationHandler):
+ """Endpoint class for ``GenericVNFConsumer``"""
+
+ @abc.abstractmethod
+ def runner_method_start_iteration(self, ctxt, **kwargs):
+ """Endpoint when RUNNER_METHOD_START_ITERATION is received
+
+ :param ctxt: (dict) {'id': <Producer ID>}
+ :param kwargs: (dict) ``payloads.RunnerPayload`` context
+ """
+
+ @abc.abstractmethod
+ def runner_method_stop_iteration(self, ctxt, **kwargs):
+ """Endpoint when RUNNER_METHOD_STOP_ITERATION is received
+
+ :param ctxt: (dict) {'id': <Producer ID>}
+ :param kwargs: (dict) ``payloads.RunnerPayload`` context
+ """
+
+
+class GenericVNFConsumer(consumer.MessagingConsumer):
+ """MQ consumer for ``GenericVNF`` derived classes"""
+
+ def __init__(self, ctx_ids, endpoints):
+ if not isinstance(endpoints, list):
+ endpoints = [endpoints]
+ super(GenericVNFConsumer, self).__init__(messaging.TOPIC_RUNNER,
+ ctx_ids, endpoints)
+
+
@six.add_metaclass(abc.ABCMeta)
class GenericVNF(object):
"""Class providing file-like API for generic VNF implementation
@@ -150,8 +217,9 @@ class GenericVNF(object):
UPLINK = PortPairs.UPLINK
DOWNLINK = PortPairs.DOWNLINK
- def __init__(self, name, vnfd):
+ def __init__(self, name, vnfd, task_id):
self.name = name
+ self._task_id = task_id
self.vnfd_helper = VnfdHelper(vnfd)
# List of statistics we can obtain from this VNF
# - ETSI MANO 6.3.1.1 monitoring_parameter
@@ -210,12 +278,13 @@ class GenericVNF(object):
@six.add_metaclass(abc.ABCMeta)
class GenericTrafficGen(GenericVNF):
- """ Class providing file-like API for generic traffic generator """
+ """Class providing file-like API for generic traffic generator"""
- def __init__(self, name, vnfd):
- super(GenericTrafficGen, self).__init__(name, vnfd)
+ def __init__(self, name, vnfd, task_id):
+ super(GenericTrafficGen, self).__init__(name, vnfd, task_id)
self.runs_traffic = True
self.traffic_finished = False
+ self._mq_producer = None
@abc.abstractmethod
def run_traffic(self, traffic_profile):
@@ -286,3 +355,16 @@ class GenericTrafficGen(GenericVNF):
:return: True/False
"""
pass
+
+ @staticmethod
+ def _setup_mq_producer(id):
+ """Setup the TG MQ producer to send messages between processes
+
+ :return: (derived class from ``MessagingProducer``) MQ producer object
+ """
+ return TrafficGeneratorProducer(id)
+
+ def get_mq_producer_id(self):
+ """Return the MQ producer ID if initialized"""
+ if self._mq_producer:
+ return self._mq_producer.id
diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
index bfe628f09..14f1e2e97 100644
--- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
@@ -85,12 +85,12 @@ class CgnaptApproxVnf(SampleVNF):
"packets_dropped": 4,
}
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = CgnaptApproxSetupEnvHelper
-
- super(CgnaptApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(CgnaptApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def _vnf_up_post(self):
super(CgnaptApproxVnf, self)._vnf_up_post()
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 6d28f4750..3241719e8 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -969,7 +969,7 @@ class ProxResourceHelper(ClientResourceHelper):
self._test_type = self.setup_helper.find_in_section('global', 'name', None)
return self._test_type
- def run_traffic(self, traffic_profile):
+ def run_traffic(self, traffic_profile, *args):
self._queue.cancel_join_thread()
self.lower = 0.0
self.upper = 100.0
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index bc810ecb3..839f30967 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -34,7 +34,8 @@ class ProxApproxVnf(SampleVNF):
VNF_PROMPT = "PROX started"
LUA_PARAMETER_NAME = "sut"
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = ProxDpdkVnfSetupEnvHelper
@@ -45,8 +46,8 @@ class ProxApproxVnf(SampleVNF):
self.prev_packets_sent = 0
self.prev_tsc = 0
self.tsc_hz = 0
- super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(ProxApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def _vnf_up_post(self):
self.resource_helper.up_post()
diff --git a/yardstick/network_services/vnf_generic/vnf/router_vnf.py b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
index 90b7b215e..e99de9cb3 100644
--- a/yardstick/network_services/vnf_generic/vnf/router_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
@@ -34,7 +34,8 @@ class RouterVNF(SampleVNF):
WAIT_TIME = 1
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = DpdkVnfSetupEnvHelper
@@ -42,7 +43,8 @@ class RouterVNF(SampleVNF):
vnfd['mgmt-interface'].pop("pkey", "")
vnfd['mgmt-interface']['password'] = 'password'
- super(RouterVNF, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
+ super(RouterVNF, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def instantiate(self, scenario_cfg, context_cfg):
self.scenario_helper.scenario_cfg = scenario_cfg
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 1ee71aa25..3ef7c33c5 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -14,14 +14,15 @@
import logging
from multiprocessing import Queue, Value, Process
-
import os
import posixpath
import re
-import six
+import uuid
import subprocess
import time
+import six
+
from trex_stl_lib.trex_stl_client import LoggerApi
from trex_stl_lib.trex_stl_client import STLClient
from trex_stl_lib.trex_stl_exceptions import STLError
@@ -408,12 +409,13 @@ class ClientResourceHelper(ResourceHelper):
time.sleep(self.QUEUE_WAIT_TIME)
self._queue.put(samples)
- def run_traffic(self, traffic_profile):
+ def run_traffic(self, traffic_profile, mq_producer):
# if we don't do this we can hang waiting for the queue to drain
# have to do this in the subprocess
self._queue.cancel_join_thread()
# fixme: fix passing correct trex config file,
# instead of searching the default path
+ mq_producer.tg_method_started()
try:
self._build_ports()
self.client = self._connect()
@@ -421,8 +423,11 @@ class ClientResourceHelper(ResourceHelper):
self.client.remove_all_streams(self.all_ports) # remove all streams
traffic_profile.register_generator(self)
+ iteration_index = 0
while self._terminated.value == 0:
+ iteration_index += 1
self._run_traffic_once(traffic_profile)
+ mq_producer.tg_method_iteration(iteration_index)
self.client.stop(self.all_ports)
self.client.disconnect()
@@ -433,6 +438,8 @@ class ClientResourceHelper(ResourceHelper):
return # return if trex/tg server is stopped.
raise
+ mq_producer.tg_method_finished()
+
def terminate(self):
self._terminated.value = 1 # stop client
@@ -612,6 +619,7 @@ class ScenarioHelper(object):
test_timeout = self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT)
return test_duration if test_duration > test_timeout else test_timeout
+
class SampleVNF(GenericVNF):
""" Class providing file-like API for generic VNF implementation """
@@ -621,8 +629,9 @@ class SampleVNF(GenericVNF):
APP_NAME = "SampleVNF"
# we run the VNF interactively, so the ssh command will timeout after this long
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
- super(SampleVNF, self).__init__(name, vnfd)
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ super(SampleVNF, self).__init__(name, vnfd, task_id)
self.bin_path = get_nsb_option('bin_path', '')
self.scenario_helper = ScenarioHelper(self.name)
@@ -853,8 +862,9 @@ class SampleVNFTrafficGen(GenericTrafficGen):
APP_NAME = 'Sample'
RUN_WAIT = 1
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
- super(SampleVNFTrafficGen, self).__init__(name, vnfd)
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
+ super(SampleVNFTrafficGen, self).__init__(name, vnfd, task_id)
self.bin_path = get_nsb_option('bin_path', '')
self.scenario_helper = ScenarioHelper(self.name)
@@ -911,12 +921,13 @@ class SampleVNFTrafficGen(GenericTrafficGen):
LOG.info("%s TG Server is up and running.", self.APP_NAME)
return self._tg_process.exitcode
- def _traffic_runner(self, traffic_profile):
+ def _traffic_runner(self, traffic_profile, mq_id):
# always drop connections first thing in new processes
# so we don't get paramiko errors
self.ssh_helper.drop_connection()
LOG.info("Starting %s client...", self.APP_NAME)
- self.resource_helper.run_traffic(traffic_profile)
+ self._mq_producer = self._setup_mq_producer(mq_id)
+ self.resource_helper.run_traffic(traffic_profile, self._mq_producer)
def run_traffic(self, traffic_profile):
""" Generate traffic on the wire according to the given params.
@@ -926,10 +937,12 @@ class SampleVNFTrafficGen(GenericTrafficGen):
:param traffic_profile:
:return: True/False
"""
- name = "{}-{}-{}-{}".format(self.name, self.APP_NAME, traffic_profile.__class__.__name__,
+ name = '{}-{}-{}-{}'.format(self.name, self.APP_NAME,
+ traffic_profile.__class__.__name__,
os.getpid())
- self._traffic_process = Process(name=name, target=self._traffic_runner,
- args=(traffic_profile,))
+ self._traffic_process = Process(
+ name=name, target=self._traffic_runner,
+ args=(traffic_profile, uuid.uuid1().int))
self._traffic_process.start()
# Wait for traffic process to start
while self.resource_helper.client_started.value == 0:
@@ -938,8 +951,6 @@ class SampleVNFTrafficGen(GenericTrafficGen):
if not self._traffic_process.is_alive():
break
- return self._traffic_process.is_alive()
-
def collect_kpi(self):
# check if the tg processes have exited
physical_node = Context.get_physical_node_from_server(
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
index 102c66f78..e0fc47dbf 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py
@@ -12,14 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
+import collections
import csv
import glob
import logging
import os
import shutil
-
-from collections import OrderedDict
import subprocess
from yardstick.common import utils
@@ -65,7 +63,7 @@ class IxLoadResourceHelper(ClientResourceHelper):
RESULTS_MOUNT = "/mnt/Results"
- KPI_LIST = OrderedDict((
+ KPI_LIST = collections.OrderedDict((
('http_throughput', 'HTTP Total Throughput (Kbps)'),
('simulated_users', 'HTTP Simulated Users'),
('concurrent_connections', 'HTTP Concurrent Connections'),
@@ -75,7 +73,8 @@ class IxLoadResourceHelper(ClientResourceHelper):
def __init__(self, setup_helper):
super(IxLoadResourceHelper, self).__init__(setup_helper)
- self.result = OrderedDict((key, ResourceDataHelper()) for key in self.KPI_LIST)
+ self.result = collections.OrderedDict((key, ResourceDataHelper())
+ for key in self.KPI_LIST)
self.resource_file_name = ''
self.data = None
@@ -124,12 +123,13 @@ class IxLoadResourceHelper(ClientResourceHelper):
class IxLoadTrafficGen(SampleVNFTrafficGen):
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if resource_helper_type is None:
resource_helper_type = IxLoadResourceHelper
- super(IxLoadTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(IxLoadTrafficGen, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
self._result = {}
def run_traffic(self, traffic_profile):
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ping.py b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
index a989543f5..a3b5afa39 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_ping.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
@@ -71,7 +71,7 @@ class PingResourceHelper(ClientResourceHelper):
self._queue = Queue()
self._parser = PingParser(self._queue)
- def run_traffic(self, traffic_profile):
+ def run_traffic(self, traffic_profile, *args):
# drop the connection in order to force a new one
self.ssh_helper.drop_connection()
@@ -103,14 +103,14 @@ class PingTrafficGen(SampleVNFTrafficGen):
APP_NAME = 'Ping'
RUN_WAIT = 4
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = PingSetupEnvHelper
if resource_helper_type is None:
resource_helper_type = PingResourceHelper
-
- super(PingTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(PingTrafficGen, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
self._result = {}
def _check_status(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
index 282dd92c5..854319a21 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py
@@ -30,9 +30,11 @@ class ProxTrafficGen(SampleVNFTrafficGen):
LUA_PARAMETER_NAME = "gen"
WAIT_TIME = 1
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
# don't call superclass, use custom wrapper of ProxApproxVnf
- self._vnf_wrapper = ProxApproxVnf(name, vnfd, setup_env_helper_type, resource_helper_type)
+ self._vnf_wrapper = ProxApproxVnf(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
self.bin_path = get_nsb_option('bin_path', '')
self.name = self._vnf_wrapper.name
self.ssh_helper = self._vnf_wrapper.ssh_helper
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index a1f9fbeb4..4d3bc2ce5 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -102,7 +102,7 @@ class IxiaResourceHelper(ClientResourceHelper):
self.client.assign_ports()
self.client.create_traffic_model()
- def run_traffic(self, traffic_profile):
+ def run_traffic(self, traffic_profile, *args):
if self._terminated.value:
return
@@ -157,12 +157,12 @@ class IxiaTrafficGen(SampleVNFTrafficGen):
APP_NAME = 'Ixia'
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if resource_helper_type is None:
resource_helper_type = IxiaResourceHelper
-
- super(IxiaTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(IxiaTrafficGen, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
self._ixia_traffic_gen = None
self.ixia_file_name = ''
self.vnf_port_pairs = []
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index 07cec6745..cdbb41485 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -64,9 +64,9 @@ class TrexTrafficGenRFC(tg_trex.TrexTrafficGen):
traffic for rfc2544 testcase.
"""
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if resource_helper_type is None:
resource_helper_type = TrexRfcResourceHelper
-
- super(TrexTrafficGenRFC, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(TrexTrafficGenRFC, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 80b42e22d..58b73488b 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -198,15 +198,14 @@ class TrexTrafficGen(SampleVNFTrafficGen):
APP_NAME = 'TRex'
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if resource_helper_type is None:
resource_helper_type = TrexResourceHelper
-
if setup_env_helper_type is None:
setup_env_helper_type = TrexDpdkVnfSetupEnvHelper
-
- super(TrexTrafficGen, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(TrexTrafficGen, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def _check_status(self):
return self.resource_helper.check_status()
diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
index fa92744d8..e3fde1a79 100644
--- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py
+++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
@@ -60,15 +60,14 @@ class UdpReplayApproxVnf(SampleVNF):
PIPELINE_COMMAND = REPLAY_PIPELINE_COMMAND
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if resource_helper_type is None:
resource_helper_type = UdpReplayResourceHelper
-
if setup_env_helper_type is None:
setup_env_helper_type = UdpReplaySetupEnvHelper
-
- super(UdpReplayApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
- resource_helper_type)
+ super(UdpReplayApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def _build_pipeline_kwargs(self):
ports = self.vnfd_helper.port_pairs.all_ports
diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
index 432f30a0c..a1523dee3 100644
--- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
@@ -52,8 +52,9 @@ class FWApproxVnf(SampleVNF):
'packets_dropped': 3,
}
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = FWApproxSetupEnvHelper
-
- super(FWApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
+ super(FWApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index 57ea2eee3..b7cf8b35e 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -239,7 +239,7 @@ class ConfigCreate(object):
class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
- APP_NAME = 'vPE'
+ APP_NAME = 'vPE_vnf'
CFG_CONFIG = "/tmp/vpe_config"
CFG_SCRIPT = "/tmp/vpe_script"
TM_CONFIG = "/tmp/full_tm_profile_10G.cfg"
@@ -286,16 +286,17 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper):
class VpeApproxVnf(SampleVNF):
""" This class handles vPE VNF model-driver definitions """
- APP_NAME = 'vPE'
+ APP_NAME = 'vPE_vnf'
APP_WORD = 'vpe'
COLLECT_KPI = VPE_COLLECT_KPI
WAIT_TIME = 20
- def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None):
+ def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
+ resource_helper_type=None):
if setup_env_helper_type is None:
setup_env_helper_type = VpeApproxSetupEnvHelper
-
- super(VpeApproxVnf, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type)
+ super(VpeApproxVnf, self).__init__(
+ name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
def get_stats(self, *args, **kwargs):
raise NotImplementedError
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 8ccb98853..8d9fc41c9 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -8,25 +8,49 @@
##############################################################################
import copy
+import re
+from oslo_serialization import jsonutils
+import six
+
+from yardstick.common import constants
from yardstick.common import exceptions
-from yardstick.common import utils
from yardstick.common import kubernetes_utils as k8s_utils
+from yardstick.common import utils
class ContainerObject(object):
SSH_MOUNT_PATH = '/tmp/.ssh/'
IMAGE_DEFAULT = 'openretriever/yardstick'
- COMMAND_DEFAULT = '/bin/bash'
+ COMMAND_DEFAULT = ['/bin/bash', '-c']
+ RESOURCES = ('requests', 'limits')
+ PORT_OPTIONS = ('containerPort', 'hostIP', 'hostPort', 'name', 'protocol')
+ IMAGE_PULL_POLICY = ('Always', 'IfNotPresent', 'Never')
def __init__(self, name, ssh_key, **kwargs):
self._name = name
self._ssh_key = ssh_key
self._image = kwargs.get('image', self.IMAGE_DEFAULT)
- self._command = [kwargs.get('command', self.COMMAND_DEFAULT)]
- self._args = kwargs.get('args', [])
+ self._command = self._parse_commands(
+ kwargs.get('command', self.COMMAND_DEFAULT))
+ self._args = self._parse_commands(kwargs.get('args', []))
self._volume_mounts = kwargs.get('volumeMounts', [])
+ self._security_context = kwargs.get('securityContext')
+ self._env = kwargs.get('env', [])
+ self._resources = kwargs.get('resources', {})
+ self._ports = kwargs.get('ports', [])
+ self._image_pull_policy = kwargs.get('imagePullPolicy')
+ self._tty = kwargs.get('tty')
+ self._stdin = kwargs.get('stdin')
+
+ @staticmethod
+ def _parse_commands(command):
+ if isinstance(command, six.string_types):
+ return [command]
+ elif isinstance(command, list):
+ return command
+ raise exceptions.KubernetesContainerCommandType()
def _create_volume_mounts(self):
"""Return all "volumeMounts" items per container"""
@@ -47,24 +71,63 @@ class ContainerObject(object):
def get_container_item(self):
"""Create a "container" item"""
container_name = '{}-container'.format(self._name)
- return {'args': self._args,
- 'command': self._command,
- 'image': self._image,
- 'name': container_name,
- 'volumeMounts': self._create_volume_mounts()}
-
-
-class KubernetesObject(object):
+ container = {'args': self._args,
+ 'command': self._command,
+ 'image': self._image,
+ 'name': container_name,
+ 'volumeMounts': self._create_volume_mounts()}
+ if self._security_context:
+ container['securityContext'] = self._security_context
+ if self._env:
+ container['env'] = []
+ for env in self._env:
+ container['env'].append({'name': env['name'],
+ 'value': env['value']})
+ if self._ports:
+ container['ports'] = []
+ for port in self._ports:
+ if 'containerPort' not in port.keys():
+ raise exceptions.KubernetesContainerPortNotDefined(
+ port=port)
+ _port = {port_option: value for port_option, value
+ in port.items() if port_option in self.PORT_OPTIONS}
+ container['ports'].append(_port)
+ if self._resources:
+ container['resources'] = {}
+ for res in (res for res in self._resources if
+ res in self.RESOURCES):
+ container['resources'][res] = self._resources[res]
+ if self._image_pull_policy:
+ if self._image_pull_policy not in self.IMAGE_PULL_POLICY:
+ raise exceptions.KubernetesContainerWrongImagePullPolicy()
+ container['imagePullPolicy'] = self._image_pull_policy
+ if self._stdin is not None:
+ container['stdin'] = self._stdin
+ if self._tty is not None:
+ container['tty'] = self._tty
+ return container
+
+
+class ReplicationControllerObject(object):
SSHKEY_DEFAULT = 'yardstick_key'
+ RESTART_POLICY = ('Always', 'OnFailure', 'Never')
+ TOLERATIONS_KEYS = ('key', 'value', 'effect', 'operator')
def __init__(self, name, **kwargs):
- super(KubernetesObject, self).__init__()
+ super(ReplicationControllerObject, self).__init__()
parameters = copy.deepcopy(kwargs)
self.name = name
self.node_selector = parameters.pop('nodeSelector', {})
self.ssh_key = parameters.pop('ssh_key', self.SSHKEY_DEFAULT)
self._volumes = parameters.pop('volumes', [])
+ self._security_context = parameters.pop('securityContext', None)
+ self._networks = parameters.pop('networks', [])
+ self._tolerations = parameters.pop('tolerations', [])
+ self._restart_policy = parameters.pop('restartPolicy', 'Always')
+ if self._restart_policy not in self.RESTART_POLICY:
+ raise exceptions.KubernetesWrongRestartPolicy(
+ rpolicy=self._restart_policy)
containers = parameters.pop('containers', None)
if containers:
@@ -85,14 +148,14 @@ class KubernetesObject(object):
"replicas": 1,
"template": {
"metadata": {
- "labels": {
- "app": name
- }
+ "labels": {"app": name}
},
"spec": {
"containers": [],
"volumes": [],
- "nodeSelector": {}
+ "nodeSelector": {},
+ "restartPolicy": self._restart_policy,
+ "tolerations": []
}
}
}
@@ -102,6 +165,13 @@ class KubernetesObject(object):
self._add_containers()
self._add_node_selector()
self._add_volumes()
+ self._add_security_context()
+ self._add_networks()
+ self._add_tolerations()
+
+ @property
+ def networks(self):
+ return self._networks
def get_template(self):
return self.template
@@ -153,34 +223,225 @@ class KubernetesObject(object):
return {'name': name,
type_name: type_data}
+ def _add_security_context(self):
+ if self._security_context:
+ utils.set_dict_value(self.template,
+ 'spec.template.spec.securityContext',
+ self._security_context)
+
+ def _add_networks(self):
+ networks = []
+ for net in self._networks:
+ networks.append({'name': net})
+
+ if not networks:
+ return
+
+ annotations = {'networks': jsonutils.dumps(networks)}
+ utils.set_dict_value(self.template,
+ 'spec.template.metadata.annotations',
+ annotations)
+
+ def _add_tolerations(self):
+ tolerations = []
+ for tol in self._tolerations:
+ tolerations.append({k: tol[k] for k in tol
+ if k in self.TOLERATIONS_KEYS})
+
+ tolerations = ([{'operator': 'Exists'}] if not tolerations
+ else tolerations)
+ utils.set_dict_value(self.template,
+ 'spec.template.spec.tolerations',
+ tolerations)
-class ServiceObject(object):
- def __init__(self, name):
- self.name = '{}-service'.format(name)
+class ServiceNodePortObject(object):
+
+ MANDATORY_PARAMETERS = {'port', 'name'}
+ NAME_REGEX = re.compile(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')
+
+ def __init__(self, name, **kwargs):
+ """Service kind "NodePort" object
+
+ :param name: (string) name of the Service
+ :param kwargs: (dict) node_ports -> (list) port, name, targetPort,
+ nodePort
+ """
+ self._name = '{}-service'.format(name)
self.template = {
- 'metadata': {
- 'name': '{}-service'.format(name)
- },
+ 'metadata': {'name': '{}-service'.format(name)},
'spec': {
'type': 'NodePort',
- 'ports': [
- {
- 'port': 22,
- 'protocol': 'TCP'
- }
- ],
- 'selector': {
- 'app': name
- }
+ 'ports': [],
+ 'selector': {'app': name}
}
}
+ self._add_port(22, 'ssh', protocol='TCP')
+ node_ports = copy.deepcopy(kwargs.get('node_ports', []))
+ for port in node_ports:
+ if not self.MANDATORY_PARAMETERS.issubset(port.keys()):
+ missing_parameters = ', '.join(
+ str(param) for param in
+ (self.MANDATORY_PARAMETERS - set(port.keys())))
+ raise exceptions.KubernetesServiceObjectDefinitionError(
+ missing_parameters=missing_parameters)
+ port_number = port.pop('port')
+ name = port.pop('name')
+ if not self.NAME_REGEX.match(name):
+ raise exceptions.KubernetesServiceObjectNameError(name=name)
+ self._add_port(port_number, name, **port)
+
+ def _add_port(self, port, name, protocol=None, targetPort=None,
+ nodePort=None):
+ _port = {'port': port,
+ 'name': name}
+ if protocol:
+ _port['protocol'] = protocol
+ if targetPort:
+ _port['targetPort'] = targetPort
+ if nodePort:
+ _port['nodePort'] = nodePort
+ self.template['spec']['ports'].append(_port)
+
def create(self):
k8s_utils.create_service(self.template)
def delete(self):
- k8s_utils.delete_service(self.name)
+ k8s_utils.delete_service(self._name)
+
+
+class CustomResourceDefinitionObject(object):
+
+ MANDATORY_PARAMETERS = {'name'}
+
+ def __init__(self, ctx_name, **kwargs):
+ if not self.MANDATORY_PARAMETERS.issubset(kwargs):
+ missing_parameters = ', '.join(
+ str(param) for param in
+ (self.MANDATORY_PARAMETERS - set(kwargs)))
+ raise exceptions.KubernetesCRDObjectDefinitionError(
+ missing_parameters=missing_parameters)
+
+ singular = kwargs['name']
+ plural = singular + 's'
+ kind = singular.title()
+ version = kwargs.get('version', 'v1')
+ scope = kwargs.get('scope', constants.SCOPE_NAMESPACED)
+ group = ctx_name + '.com'
+ self._name = metadata_name = plural + '.' + group
+
+ self._template = {
+ 'metadata': {
+ 'name': metadata_name
+ },
+ 'spec': {
+ 'group': group,
+ 'version': version,
+ 'scope': scope,
+ 'names': {'plural': plural,
+ 'singular': singular,
+ 'kind': kind}
+ }
+ }
+
+ def create(self):
+ k8s_utils.create_custom_resource_definition(self._template)
+
+ def delete(self):
+ k8s_utils.delete_custom_resource_definition(self._name)
+
+
+class NetworkObject(object):
+
+ MANDATORY_PARAMETERS = {'plugin', 'args'}
+ KIND = 'Network'
+
+ def __init__(self, name, **kwargs):
+ if not self.MANDATORY_PARAMETERS.issubset(kwargs):
+ missing_parameters = ', '.join(
+ str(param) for param in
+ (self.MANDATORY_PARAMETERS - set(kwargs)))
+ raise exceptions.KubernetesNetworkObjectDefinitionError(
+ missing_parameters=missing_parameters)
+
+ self._name = name
+ self._plugin = kwargs['plugin']
+ self._args = kwargs['args']
+ self._crd = None
+ self._template = None
+ self._group = None
+ self._version = None
+ self._plural = None
+ self._scope = None
+
+ @property
+ def crd(self):
+ if self._crd:
+ return self._crd
+ crd = k8s_utils.get_custom_resource_definition(self.KIND)
+ if not crd:
+ raise exceptions.KubernetesNetworkObjectKindMissing()
+ self._crd = crd
+ return self._crd
+
+ @property
+ def group(self):
+ if self._group:
+ return self._group
+ self._group = self.crd.spec.group
+ return self._group
+
+ @property
+ def version(self):
+ if self._version:
+ return self._version
+ self._version = self.crd.spec.version
+ return self._version
+
+ @property
+ def plural(self):
+ if self._plural:
+ return self._plural
+ self._plural = self.crd.spec.names.plural
+ return self._plural
+
+ @property
+ def scope(self):
+ if self._scope:
+ return self._scope
+ self._scope = self.crd.spec.scope
+ return self._scope
+
+ @property
+ def template(self):
+ """"Network" object template
+
+ This template can be rendered only once the CRD "Network" is created in
+ Kubernetes. This function call must be delayed until the creation of
+ the CRD "Network".
+ """
+ if self._template:
+ return self._template
+
+ self._template = {
+ 'apiVersion': '{}/{}'.format(self.group, self.version),
+ 'kind': self.KIND,
+ 'metadata': {
+ 'name': self._name
+ },
+ 'plugin': self._plugin,
+ 'args': self._args
+ }
+ return self._template
+
+ def create(self):
+ k8s_utils.create_network(self.scope, self.group, self.version,
+ self.plural, self.template)
+
+ def delete(self):
+ k8s_utils.delete_network(self.scope, self.group, self.version,
+ self.plural, self._name)
class KubernetesTemplate(object):
@@ -193,16 +454,21 @@ class KubernetesTemplate(object):
"""
context_cfg = copy.deepcopy(context_cfg)
servers_cfg = context_cfg.pop('servers', {})
+ crd_cfg = context_cfg.pop('custom_resources', [])
+ networks_cfg = context_cfg.pop('networks', {})
self.name = name
self.ssh_key = '{}-key'.format(name)
- self.rcs = [self._get_rc_name(rc) for rc in servers_cfg]
- self.k8s_objs = [KubernetesObject(self._get_rc_name(rc),
- ssh_key=self.ssh_key,
- **cfg)
- for rc, cfg in servers_cfg.items()]
- self.service_objs = [ServiceObject(s) for s in self.rcs]
-
+ self.rcs = {self._get_rc_name(rc): cfg
+ for rc, cfg in servers_cfg.items()}
+ self.rc_objs = [ReplicationControllerObject(
+ rc, ssh_key=self.ssh_key, **cfg) for rc, cfg in self.rcs.items()]
+ self.service_objs = [ServiceNodePortObject(rc, **cfg)
+ for rc, cfg in self.rcs.items()]
+ self.crd = [CustomResourceDefinitionObject(self.name, **crd)
+ for crd in crd_cfg]
+ self.network_objs = [NetworkObject(net_name, **net_data)
+ for net_name, net_data in networks_cfg.items()]
self.pods = []
def _get_rc_name(self, rc_name):
@@ -214,3 +480,8 @@ class KubernetesTemplate(object):
if p.metadata.name.startswith(s)]
return self.pods
+
+ def get_rc_by_name(self, rc_name):
+ """Returns a ``ReplicationControllerObject``, searching by name"""
+ for rc in (rc for rc in self.rc_objs if rc.name == rc_name):
+ return rc
diff --git a/yardstick/tests/functional/common/messaging/test_messaging.py b/yardstick/tests/functional/common/messaging/test_messaging.py
index 99874343b..f3e31e718 100644
--- a/yardstick/tests/functional/common/messaging/test_messaging.py
+++ b/yardstick/tests/functional/common/messaging/test_messaging.py
@@ -32,25 +32,25 @@ class DummyPayload(payloads.Payload):
class DummyEndpoint(consumer.NotificationHandler):
def info(self, ctxt, **kwargs):
- if ctxt['pid'] in self._ctx_pids:
- self._queue.put('ID {}, data: {}, pid: {}'.format(
- self._id, kwargs['data'], ctxt['pid']))
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put('Nr {}, data: {}, id: {}'.format(
+ self._id, kwargs['data'], ctxt['id']))
class DummyConsumer(consumer.MessagingConsumer):
- def __init__(self, _id, ctx_pids, queue):
+ def __init__(self, _id, ctx_ids, queue):
self._id = _id
- endpoints = [DummyEndpoint(_id, ctx_pids, queue)]
- super(DummyConsumer, self).__init__(TOPIC, ctx_pids, endpoints)
+ endpoints = [DummyEndpoint(_id, ctx_ids, queue)]
+ super(DummyConsumer, self).__init__(TOPIC, ctx_ids, endpoints)
class DummyProducer(producer.MessagingProducer):
pass
-def _run_consumer(_id, ctx_pids, queue):
- _consumer = DummyConsumer(_id, ctx_pids, queue)
+def _run_consumer(_id, ctx_ids, queue):
+ _consumer = DummyConsumer(_id, ctx_ids, queue)
_consumer.start_rpc_server()
_consumer.wait()
@@ -67,8 +67,8 @@ class MessagingTestCase(base.BaseFunctionalTestCase):
num_consumers = 10
ctx_1 = 100001
ctx_2 = 100002
- producers = [DummyProducer(TOPIC, pid=ctx_1),
- DummyProducer(TOPIC, pid=ctx_2)]
+ producers = [DummyProducer(TOPIC, _id=ctx_1),
+ DummyProducer(TOPIC, _id=ctx_2)]
processes = []
for i in range(num_consumers):
@@ -91,7 +91,7 @@ class MessagingTestCase(base.BaseFunctionalTestCase):
output.append(output_queue.get(True, 1))
self.assertEqual(num_consumers * 4, len(output))
- msg_template = 'ID {}, data: {}, pid: {}'
+ msg_template = 'Nr {}, data: {}, id: {}'
for i in range(num_consumers):
for ctx in [ctx_1, ctx_2]:
for message in ['message 0', 'message 1']:
diff --git a/yardstick/tests/functional/common/test_utils.py b/yardstick/tests/functional/common/test_utils.py
index b5333bbde..b9f1f773a 100644
--- a/yardstick/tests/functional/common/test_utils.py
+++ b/yardstick/tests/functional/common/test_utils.py
@@ -12,8 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import multiprocessing
import unittest
+import socket
import sys
+import time
from yardstick.common import utils
@@ -32,3 +35,38 @@ class ImportModulesFromPackageTestCase(unittest.TestCase):
library_obj = getattr(module_obj, library_name)
class_obj = getattr(library_obj, class_name)
self.assertEqual(class_name, class_obj().__class__.__name__)
+
+
+class SendSocketCommandTestCase(unittest.TestCase):
+
+ @staticmethod
+ def _run_socket_server(port):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('localhost', port))
+ sock.listen(1)
+ conn = None
+ while not conn:
+ conn, _ = sock.accept()
+ sock.close()
+
+ @staticmethod
+ def _terminate_server(socket_server):
+ # Wait until the socket server closes the open port.
+ time.sleep(1)
+ if socket_server and socket_server.is_alive():
+ socket_server.terminate()
+
+ def test_send_command(self):
+ port = 47001
+
+ socket_server = multiprocessing.Process(
+ name='run_socket_server',
+ target=SendSocketCommandTestCase._run_socket_server,
+ args=(port, )).start()
+
+ self.addCleanup(self._terminate_server, socket_server)
+
+ # Wait until the socket is open.
+ time.sleep(0.5)
+ self.assertEqual(
+ 0, utils.send_socket_command('localhost', port, 'test_command'))
diff --git a/yardstick/tests/functional/network_services/__init__.py b/yardstick/tests/functional/network_services/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/functional/network_services/__init__.py
diff --git a/yardstick/tests/functional/network_services/vnf_generic/__init__.py b/yardstick/tests/functional/network_services/vnf_generic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/functional/network_services/vnf_generic/__init__.py
diff --git a/yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py b/yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/functional/network_services/vnf_generic/vnf/__init__.py
diff --git a/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py b/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py
new file mode 100644
index 000000000..e57f8f51c
--- /dev/null
+++ b/yardstick/tests/functional/network_services/vnf_generic/vnf/test_base.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import multiprocessing
+import time
+import uuid
+
+import mock
+
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.network_services.vnf_generic.vnf import base as vnf_base
+from yardstick.tests.functional import base as ft_base
+
+
+class _TrafficGenMQConsumer(vnf_base.GenericTrafficGen,
+ vnf_base.GenericVNFEndpoint):
+
+ def __init__(self, name, vnfd, task_id):
+ vnf_base.GenericTrafficGen.__init__(self, name, vnfd, task_id)
+ self.queue = multiprocessing.Queue()
+ self._id = uuid.uuid1().int
+ vnf_base.GenericVNFEndpoint.__init__(self, self._id, [task_id],
+ self.queue)
+ self._consumer = vnf_base.GenericVNFConsumer([task_id], self)
+ self._consumer.start_rpc_server()
+
+ def run_traffic(self, *args):
+ pass
+
+ def terminate(self):
+ pass
+
+ def collect_kpi(self):
+ pass
+
+ def instantiate(self, *args):
+ pass
+
+ def scale(self, flavor=''):
+ pass
+
+ def runner_method_start_iteration(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'action': messaging.RUNNER_METHOD_START_ITERATION,
+ 'payload': payloads.RunnerPayload.dict_to_obj(kwargs)})
+
+ def runner_method_stop_iteration(self, ctxt, **kwargs):
+ if ctxt['id'] in self._ctx_ids:
+ self._queue.put(
+ {'action': messaging.RUNNER_METHOD_STOP_ITERATION,
+ 'payload': payloads.RunnerPayload.dict_to_obj(kwargs)})
+
+
+class _DummyProducer(producer.MessagingProducer):
+ pass
+
+
+class GenericVNFMQConsumerTestCase(ft_base.BaseFunctionalTestCase):
+
+ def test_fistro(self):
+ vnfd = {'benchmark': {'kpi': mock.ANY},
+ 'vdu': [{'external-interface': 'ext_int'}]
+ }
+ task_id = uuid.uuid1().int
+ tg_obj = _TrafficGenMQConsumer('name_tg', vnfd, task_id)
+ producer = _DummyProducer(messaging.TOPIC_RUNNER, task_id)
+
+ num_messages = 10
+ for i in range(num_messages):
+ pload = payloads.RunnerPayload(version=10, data=i)
+ for method in (messaging.RUNNER_METHOD_START_ITERATION,
+ messaging.RUNNER_METHOD_STOP_ITERATION):
+ producer.send_message(method, pload)
+
+ time.sleep(0.5) # Let consumers attend the calls
+ output = []
+ while not tg_obj.queue.empty():
+ data = tg_obj.queue.get(True, 1)
+ data_dict = {'action': data['action'],
+ 'payload': data['payload'].obj_to_dict()}
+ output.append(data_dict)
+
+ self.assertEqual(num_messages * 2, len(output))
+ for i in range(num_messages):
+ pload = payloads.RunnerPayload(version=10, data=i).obj_to_dict()
+ for method in (messaging.RUNNER_METHOD_START_ITERATION,
+ messaging.RUNNER_METHOD_STOP_ITERATION):
+ reg = {'action': method, 'payload': pload}
+ self.assertIn(reg, output)
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index a4a8359d5..69779d3e0 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -19,6 +19,7 @@ import mock
import six
import unittest
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import ovs_dpdk
@@ -82,7 +83,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test_init(self):
ATTRS = {
- 'name': 'StandaloneOvsDpdk',
+ 'name': contexts.CONTEXT_STANDALONEOVSDPDK,
'task_id': '1234567890',
'file': 'pod',
'flavor': {},
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index 169084607..74c31569c 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -18,6 +18,7 @@ import mock
import unittest
from yardstick import ssh
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import sriov
@@ -30,7 +31,7 @@ class SriovContextTestCase(unittest.TestCase):
NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample.yaml"
ATTRS = {
- 'name': 'StandaloneSriov',
+ 'name': contexts.CONTEXT_STANDALONESRIOV,
'task_id': '1234567890',
'file': 'pod',
'flavor': {},
diff --git a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
index 821b84a1f..b526e7cc7 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -7,16 +7,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import collections
+import time
+
import mock
import unittest
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import kubernetes
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import kubernetes_utils as k8s_utils
from yardstick.orchestrator import kubernetes as orchestrator_kubernetes
CONTEXT_CFG = {
- 'type': 'Kubernetes',
+ 'type': contexts.CONTEXT_KUBERNETES,
'name': 'k8s',
'task_id': '1234567890',
'servers': {
@@ -32,10 +39,42 @@ CONTEXT_CFG = {
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; '
'service ssh restart;while true ; do sleep 10000; done']
}
+ },
+ 'networks': {
+ 'flannel': {
+ 'args': 'flannel_args',
+ 'plugin': 'flannel'
+ },
+ 'sriov01': {
+ 'args': 'sriov_args',
+ 'plugin': 'sriov'
+ },
}
}
-prefix = 'yardstick.benchmark.contexts.kubernetes'
+
+class NodePort(object):
+ def __init__(self):
+ self.node_port = 30000
+ self.port = constants.SSH_PORT
+ self.name = 'port_name'
+ self.protocol = 'TCP'
+ self.target_port = constants.SSH_PORT
+
+
+class Service(object):
+ def __init__(self):
+ self.ports = [NodePort()]
+
+
+class Status(object):
+ def __init__(self):
+ self.pod_ip = '172.16.10.131'
+
+
+class Pod(object):
+ def __init__(self):
+ self.status = Status()
class KubernetesTestCase(unittest.TestCase):
@@ -55,17 +94,19 @@ class KubernetesTestCase(unittest.TestCase):
@mock.patch.object(kubernetes.KubernetesContext, '_delete_ssh_key')
@mock.patch.object(kubernetes.KubernetesContext, '_delete_rcs')
@mock.patch.object(kubernetes.KubernetesContext, '_delete_pods')
- def test_undeploy(self,
- mock_delete_pods,
- mock_delete_rcs,
- mock_delete_ssh,
- mock_delete_services):
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_networks')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_crd')
+ def test_undeploy(self, mock_delete_pods, mock_delete_rcs,
+ mock_delete_ssh, mock_delete_services,
+ mock_delete_networks, mock_delete_crd):
self.k8s_context.undeploy()
mock_delete_ssh.assert_called_once()
mock_delete_rcs.assert_called_once()
mock_delete_pods.assert_called_once()
mock_delete_services.assert_called_once()
+ mock_delete_networks.assert_called_once()
+ mock_delete_crd.assert_called_once()
@mock.patch.object(kubernetes.KubernetesContext, '_create_services')
@mock.patch.object(kubernetes.KubernetesContext, '_wait_until_running')
@@ -73,27 +114,28 @@ class KubernetesTestCase(unittest.TestCase):
'get_rc_pods')
@mock.patch.object(kubernetes.KubernetesContext, '_create_rcs')
@mock.patch.object(kubernetes.KubernetesContext, '_set_ssh_key')
- def test_deploy(self,
- mock_set_ssh_key,
- mock_create_rcs,
- mock_get_rc_pods,
- mock_wait_until_running,
- mock_create_services):
-
- with mock.patch("yardstick.benchmark.contexts.kubernetes.time"):
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_networks')
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_crd')
+ def test_deploy(self, mock_set_ssh_key, mock_create_rcs, mock_get_rc_pods,
+ mock_wait_until_running, mock_create_services,
+ mock_create_networks, mock_create_crd):
+
+ with mock.patch.object(time, 'sleep'):
self.k8s_context.deploy()
mock_set_ssh_key.assert_called_once()
mock_create_rcs.assert_called_once()
mock_create_services.assert_called_once()
mock_get_rc_pods.assert_called_once()
mock_wait_until_running.assert_called_once()
+ mock_create_networks.assert_called_once()
+ mock_create_crd.assert_called_once()
@mock.patch.object(kubernetes, 'paramiko', **{"resource_filename.return_value": ""})
@mock.patch.object(kubernetes, 'pkg_resources', **{"resource_filename.return_value": ""})
@mock.patch.object(kubernetes, 'utils')
@mock.patch.object(kubernetes, 'open', create=True)
- @mock.patch.object(kubernetes.k8s_utils, 'delete_config_map')
- @mock.patch.object(kubernetes.k8s_utils, 'create_config_map')
+ @mock.patch.object(k8s_utils, 'delete_config_map')
+ @mock.patch.object(k8s_utils, 'create_config_map')
def test_ssh_key(self, mock_create, mock_delete, *args):
self.k8s_context._set_ssh_key()
self.k8s_context._delete_ssh_key()
@@ -101,49 +143,32 @@ class KubernetesTestCase(unittest.TestCase):
mock_create.assert_called_once()
mock_delete.assert_called_once()
- @mock.patch.object(kubernetes.k8s_utils, 'read_pod_status')
+ @mock.patch.object(k8s_utils, 'read_pod_status')
def test_wait_until_running(self, mock_read_pod_status):
self.k8s_context.template.pods = ['server']
mock_read_pod_status.return_value = 'Running'
self.k8s_context._wait_until_running()
- @mock.patch.object(kubernetes.k8s_utils, 'get_pod_by_name')
+ @mock.patch.object(k8s_utils, 'get_pod_by_name')
@mock.patch.object(kubernetes.KubernetesContext, '_get_node_ip')
- @mock.patch.object(kubernetes.k8s_utils, 'get_service_by_name')
- def test_get_server(self,
- mock_get_service_by_name,
- mock_get_node_ip,
- mock_get_pod_by_name):
- class Service(object):
- def __init__(self):
- self.name = 'yardstick'
- self.node_port = 30000
-
- class Services(object):
- def __init__(self):
- self.ports = [Service()]
-
- class Status(object):
- def __init__(self):
- self.pod_ip = '172.16.10.131'
-
- class Pod(object):
- def __init__(self):
- self.status = Status()
-
- mock_get_service_by_name.return_value = Services()
+ def test_get_server(self, mock_get_node_ip, mock_get_pod_by_name):
mock_get_pod_by_name.return_value = Pod()
mock_get_node_ip.return_value = '172.16.10.131'
-
- self.assertIsNotNone(self.k8s_context._get_server('server'))
+ with mock.patch.object(self.k8s_context, '_get_service_ports') as \
+ mock_get_sports:
+ mock_get_sports.return_value = [
+ {'port': constants.SSH_PORT, 'node_port': 30000}]
+ server = self.k8s_context._get_server('server_name')
+ self.assertEqual('server_name', server['name'])
+ self.assertEqual(30000, server['ssh_port'])
@mock.patch.object(kubernetes.KubernetesContext, '_create_rc')
def test_create_rcs(self, mock_create_rc):
self.k8s_context._create_rcs()
mock_create_rc.assert_called()
- @mock.patch.object(kubernetes.k8s_utils, 'create_replication_controller')
+ @mock.patch.object(k8s_utils, 'create_replication_controller')
def test_create_rc(self, mock_create_replication_controller):
self.k8s_context._create_rc({})
mock_create_replication_controller.assert_called_once()
@@ -153,22 +178,22 @@ class KubernetesTestCase(unittest.TestCase):
self.k8s_context._delete_rcs()
mock_delete_rc.assert_called()
- @mock.patch.object(kubernetes.k8s_utils, 'delete_replication_controller')
+ @mock.patch.object(k8s_utils, 'delete_replication_controller')
def test_delete_rc(self, mock_delete_replication_controller):
self.k8s_context._delete_rc({})
mock_delete_replication_controller.assert_called_once()
- @mock.patch.object(kubernetes.k8s_utils, 'get_node_list')
+ @mock.patch.object(k8s_utils, 'get_node_list')
def test_get_node_ip(self, mock_get_node_list):
self.k8s_context._get_node_ip()
mock_get_node_list.assert_called_once()
- @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create')
+ @mock.patch.object(orchestrator_kubernetes.ServiceNodePortObject, 'create')
def test_create_services(self, mock_create):
self.k8s_context._create_services()
mock_create.assert_called()
- @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete')
+ @mock.patch.object(orchestrator_kubernetes.ServiceNodePortObject, 'delete')
def test_delete_services(self, mock_delete):
self.k8s_context._delete_services()
mock_delete.assert_called()
@@ -182,6 +207,9 @@ class KubernetesTestCase(unittest.TestCase):
mock_k8stemplate.assert_called_once_with(self.k8s_context.name,
CONTEXT_CFG)
self.assertEqual('fake_template', self.k8s_context.template)
+ self.assertEqual(2, len(self.k8s_context._networks))
+ self.assertIn('flannel', self.k8s_context._networks.keys())
+ self.assertIn('sriov01', self.k8s_context._networks.keys())
def test__get_physical_nodes(self):
result = self.k8s_context._get_physical_nodes()
@@ -190,3 +218,56 @@ class KubernetesTestCase(unittest.TestCase):
def test__get_physical_node_for_server(self):
result = self.k8s_context._get_physical_node_for_server("fake")
self.assertIsNone(result)
+
+ def test__get_network(self):
+ networks = collections.OrderedDict([('n1', 'data1'), ('n2', 'data2')])
+ self.k8s_context._networks = networks
+ self.assertEqual({'name': 'n1'}, self.k8s_context._get_network('n1'))
+ self.assertEqual({'name': 'n2'}, self.k8s_context._get_network('n2'))
+ self.assertIsNone(self.k8s_context._get_network('n3'))
+
+ @mock.patch.object(orchestrator_kubernetes.KubernetesTemplate,
+ 'get_rc_by_name')
+ def test__get_interfaces(self, mock_get_rc):
+ rc = orchestrator_kubernetes.ReplicationControllerObject('rc_name')
+ rc._networks = ['net1', 'net2']
+ mock_get_rc.return_value = rc
+ expected = {'net1': {'network_name': 'net1',
+ 'local_mac': None,
+ 'local_ip': None},
+ 'net2': {'network_name': 'net2',
+ 'local_mac': None,
+ 'local_ip': None}}
+ self.assertEqual(expected, self.k8s_context._get_interfaces('rc_name'))
+
+ @mock.patch.object(orchestrator_kubernetes.KubernetesTemplate,
+ 'get_rc_by_name')
+ def test__get_interfaces_no_networks(self, mock_get_rc):
+ rc = orchestrator_kubernetes.ReplicationControllerObject('rc_name')
+ mock_get_rc.return_value = rc
+ self.assertEqual({}, self.k8s_context._get_interfaces('rc_name'))
+
+ @mock.patch.object(orchestrator_kubernetes.KubernetesTemplate,
+ 'get_rc_by_name', return_value=None)
+ def test__get_interfaces_no_rc(self, *args):
+ self.assertEqual({}, self.k8s_context._get_interfaces('rc_name'))
+
+ @mock.patch.object(k8s_utils, 'get_service_by_name',
+ return_value=Service())
+ def test__get_service_ports(self, mock_get_service_by_name):
+ name = 'rc_name'
+ service_ports = self.k8s_context._get_service_ports(name)
+ mock_get_service_by_name.assert_called_once_with(name + '-service')
+ expected = {'node_port': 30000,
+ 'port': constants.SSH_PORT,
+ 'name': 'port_name',
+ 'protocol': 'TCP',
+ 'target_port': constants.SSH_PORT}
+ self.assertEqual(expected, service_ports[0])
+
+ @mock.patch.object(k8s_utils, 'get_service_by_name',
+ return_value=None)
+ def test__get_service_ports_exception(self, *args):
+ name = 'rc_name'
+ with self.assertRaises(exceptions.KubernetesServiceObjectNotDefined):
+ self.k8s_context._get_service_ports(name)
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 0424c77a3..35236637d 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -28,7 +28,7 @@ from yardstick.common import utils
class TaskTestCase(unittest.TestCase):
- @mock.patch.object(task, 'Context')
+ @mock.patch.object(base, 'Context')
def test_parse_nodes_with_context_same_context(self, mock_context):
scenario_cfg = {
"nodes": {
@@ -69,7 +69,7 @@ class TaskTestCase(unittest.TestCase):
dispatcher2])
self.assertIsNone(t._do_output(output_config, {}))
- @mock.patch.object(task, 'Context')
+ @mock.patch.object(base, 'Context')
def test_parse_networks_from_nodes(self, mock_context):
nodes = {
'node1': {
@@ -133,7 +133,7 @@ class TaskTestCase(unittest.TestCase):
self.assertEqual(mock_context.get_network.call_count, expected_get_network_calls)
self.assertDictEqual(networks, expected)
- @mock.patch.object(task, 'Context')
+ @mock.patch.object(base, 'Context')
@mock.patch.object(task, 'base_runner')
def test_run(self, mock_base_runner, *args):
scenario = {
@@ -156,7 +156,7 @@ class TaskTestCase(unittest.TestCase):
t._run([scenario], False, "yardstick.out")
runner.run.assert_called_once()
- @mock.patch.object(task, 'Context')
+ @mock.patch.object(base, 'Context')
@mock.patch.object(task, 'base_runner')
def test_run_ProxDuration(self, mock_base_runner, *args):
scenario = {
diff --git a/yardstick/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py
index 559c991f3..49ba1efe4 100644
--- a/yardstick/tests/unit/benchmark/runner/test_base.py
+++ b/yardstick/tests/unit/benchmark/runner/test_base.py
@@ -8,12 +8,17 @@
##############################################################################
import time
+import uuid
import mock
+from oslo_config import cfg
+import oslo_messaging
import subprocess
from yardstick.benchmark.runners import base as runner_base
from yardstick.benchmark.runners import iteration
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
from yardstick.tests.unit import base as ut_base
@@ -94,3 +99,54 @@ class RunnerTestCase(ut_base.BaseUnitTestCase):
with self.assertRaises(NotImplementedError):
runner._run_benchmark(mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock())
+
+
+class RunnerProducerTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(cfg, 'CONF')
+ def test__init(self, mock_config, mock_transport, mock_rpcclient,
+ mock_target):
+ _id = uuid.uuid1().int
+ runner_producer = runner_base.RunnerProducer(_id)
+ mock_transport.assert_called_once_with(
+ mock_config, url='rabbit://yardstick:yardstick@localhost:5672/')
+ mock_target.assert_called_once_with(topic=messaging.TOPIC_RUNNER,
+ fanout=True,
+ server=messaging.SERVER)
+ mock_rpcclient.assert_called_once_with('rpc_transport', 'rpc_target')
+ self.assertEqual(_id, runner_producer._id)
+ self.assertEqual(messaging.TOPIC_RUNNER, runner_producer._topic)
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(payloads, 'RunnerPayload', return_value='runner_pload')
+ def test_start_iteration(self, mock_runner_payload, *args):
+ runner_producer = runner_base.RunnerProducer(uuid.uuid1().int)
+ with mock.patch.object(runner_producer,
+ 'send_message') as mock_message:
+ runner_producer.start_iteration(version=10)
+
+ mock_message.assert_called_once_with(
+ messaging.RUNNER_METHOD_START_ITERATION, 'runner_pload')
+ mock_runner_payload.assert_called_once_with(version=10, data={})
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(payloads, 'RunnerPayload', return_value='runner_pload')
+ def test_stop_iteration(self, mock_runner_payload, *args):
+ runner_producer = runner_base.RunnerProducer(uuid.uuid1().int)
+ with mock.patch.object(runner_producer,
+ 'send_message') as mock_message:
+ runner_producer.stop_iteration(version=15)
+
+ mock_message.assert_called_once_with(
+ messaging.RUNNER_METHOD_STOP_ITERATION, 'runner_pload')
+ mock_runner_payload.assert_called_once_with(version=15, data={})
diff --git a/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py b/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py
new file mode 100644
index 000000000..10d14a8a0
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/runner/test_iteration_ipc.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import multiprocessing
+import time
+import os
+import uuid
+
+import mock
+
+from yardstick.benchmark.runners import iteration_ipc
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
+from yardstick.tests.unit import base as ut_base
+
+
+class RunnerIterationIPCEndpointTestCase(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._id = uuid.uuid1().int
+ self._ctx_ids = [uuid.uuid1().int, uuid.uuid1().int]
+ self._queue = multiprocessing.Queue()
+ self.runner = iteration_ipc.RunnerIterationIPCEndpoint(
+ self._id, self._ctx_ids, self._queue)
+ self._kwargs = {'version': 1, 'iteration': 10, 'kpi': {}}
+ self._pload_dict = payloads.TrafficGeneratorPayload.dict_to_obj(
+ self._kwargs).obj_to_dict()
+
+ def test_tg_method_started(self):
+ self._queue.empty()
+ ctxt = {'id': self._ctx_ids[0]}
+ self.runner.tg_method_started(ctxt, **self._kwargs)
+ time.sleep(0.2)
+
+ output = []
+ while not self._queue.empty():
+ output.append(self._queue.get(True, 1))
+
+ self.assertEqual(1, len(output))
+ self.assertEqual(self._ctx_ids[0], output[0]['id'])
+ self.assertEqual(messaging.TG_METHOD_STARTED, output[0]['action'])
+ self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict())
+
+ def test_tg_method_finished(self):
+ self._queue.empty()
+ ctxt = {'id': self._ctx_ids[0]}
+ self.runner.tg_method_finished(ctxt, **self._kwargs)
+ time.sleep(0.2)
+
+ output = []
+ while not self._queue.empty():
+ output.append(self._queue.get(True, 1))
+
+ self.assertEqual(1, len(output))
+ self.assertEqual(self._ctx_ids[0], output[0]['id'])
+ self.assertEqual(messaging.TG_METHOD_FINISHED, output[0]['action'])
+ self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict())
+
+ def test_tg_method_iteration(self):
+ self._queue.empty()
+ ctxt = {'id': self._ctx_ids[0]}
+ self.runner.tg_method_iteration(ctxt, **self._kwargs)
+ time.sleep(0.2)
+
+ output = []
+ while not self._queue.empty():
+ output.append(self._queue.get(True, 1))
+
+ self.assertEqual(1, len(output))
+ self.assertEqual(self._ctx_ids[0], output[0]['id'])
+ self.assertEqual(messaging.TG_METHOD_ITERATION, output[0]['action'])
+ self.assertEqual(self._pload_dict, output[0]['payload'].obj_to_dict())
+
+
+class RunnerIterationIPCConsumerTestCase(ut_base.BaseUnitTestCase):
+
+ def setUp(self):
+ self._id = uuid.uuid1().int
+ self._ctx_ids = [uuid.uuid1().int, uuid.uuid1().int]
+ self.consumer = iteration_ipc.RunnerIterationIPCConsumer(
+ self._id, self._ctx_ids)
+ self.consumer._queue = mock.Mock()
+
+ def test__init(self):
+ self.assertEqual({self._ctx_ids[0]: [], self._ctx_ids[1]: []},
+ self.consumer._kpi_per_id)
+
+ def test_is_all_kpis_received_in_iteration(self):
+ payload = payloads.TrafficGeneratorPayload(
+ version=1, iteration=1, kpi={})
+ msg1 = {'action': messaging.TG_METHOD_ITERATION,
+ 'id': self._ctx_ids[0], 'payload': payload}
+ msg2 = {'action': messaging.TG_METHOD_ITERATION,
+ 'id': self._ctx_ids[1], 'payload': payload}
+ self.consumer.iteration_index = 1
+
+ self.consumer._queue.empty.side_effect = [False, True]
+ self.consumer._queue.get.return_value = msg1
+ self.assertFalse(self.consumer.is_all_kpis_received_in_iteration())
+
+ self.consumer._queue.empty.side_effect = [False, True]
+ self.consumer._queue.get.return_value = msg2
+ self.assertTrue(self.consumer.is_all_kpis_received_in_iteration())
+
+
+class IterationIPCRunnerTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(iteration_ipc, '_worker_process')
+ @mock.patch.object(os, 'getpid', return_value=12345678)
+ @mock.patch.object(multiprocessing, 'Process', return_value=mock.Mock())
+ def test__run_benchmark(self, mock_process, mock_getpid, mock_worker):
+ method = 'method'
+ scenario_cfg = {'type': 'scenario_type'}
+ context_cfg = 'context_cfg'
+ name = '%s-%s-%s' % ('IterationIPC', 'scenario_type', 12345678)
+ runner = iteration_ipc.IterationIPCRunner(mock.ANY)
+ mock_getpid.reset_mock()
+
+ runner._run_benchmark('class', method, scenario_cfg, context_cfg)
+ mock_process.assert_called_once_with(
+ name=name,
+ target=mock_worker,
+ args=(runner.result_queue, 'class', method, scenario_cfg,
+ context_cfg, runner.aborted, runner.output_queue))
+ mock_getpid.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index bb1a7aaca..cdb91f66d 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -436,6 +436,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self._get_file_abspath("tg_trex_tpl.yaml")
self.context_cfg["nodes"]['vnf__1']['VNF model'] = \
self._get_file_abspath("tg_trex_tpl.yaml")
+ self.context_cfg['task_id'] = 'fake_task_id'
vnf = mock.Mock(autospec=GenericVNF)
self.s.get_vnf_impl = mock.Mock(return_value=vnf)
@@ -553,6 +554,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
tgen.verify_traffic = lambda x: verified_dict
tgen.terminate = mock.Mock(return_value=True)
tgen.name = "tgen__1"
+ tgen.run_traffic.return_value = 'tg_id'
vnf = mock.Mock(autospec=GenericVNF)
vnf.runs_traffic = False
vnf.terminate = mock.Mock(return_value=True)
@@ -565,7 +567,6 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.load_vnf_models = mock.Mock(return_value=self.s.vnfs)
self.s._fill_traffic_profile = \
mock.Mock(return_value=TRAFFIC_PROFILE)
- self.assertIsNone(self.s.setup())
def test_setup_exception(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -656,6 +657,9 @@ class TestNetworkServiceTestCase(unittest.TestCase):
)
self.assertEqual(self.s.topology, 'fake_nsd')
+ def test_get_mq_ids(self):
+ self.assertEqual(self.s._mq_ids, self.s.get_mq_ids())
+
def test_teardown(self):
vnf = mock.Mock(autospec=GenericVNF)
vnf.terminate = mock.Mock(return_value=True)
diff --git a/yardstick/tests/unit/common/messaging/test_payloads.py b/yardstick/tests/unit/common/messaging/test_payloads.py
index 00ec220c9..37b1f1926 100644
--- a/yardstick/tests/unit/common/messaging/test_payloads.py
+++ b/yardstick/tests/unit/common/messaging/test_payloads.py
@@ -44,3 +44,39 @@ class PayloadTestCase(ut_base.BaseUnitTestCase):
_dict = {'version': 2, 'key1': 'value100', 'key2': 'value200'}
payload = _DummyPayload.dict_to_obj(_dict)
self.assertEqual(set(_dict.keys()), payload._fields)
+
+
+class TrafficGeneratorPayloadTestCase(ut_base.BaseUnitTestCase):
+
+ def test_init(self):
+ tg_payload = payloads.TrafficGeneratorPayload(
+ version=1, iteration=10, kpi={'key1': 'value1'})
+ self.assertEqual(1, tg_payload.version)
+ self.assertEqual(10, tg_payload.iteration)
+ self.assertEqual({'key1': 'value1'}, tg_payload.kpi)
+ self.assertEqual(3, len(tg_payload._fields))
+
+ def test__init_missing_required_fields(self):
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.TrafficGeneratorPayload(version=1, iteration=10)
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.TrafficGeneratorPayload(iteration=10, kpi={})
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.TrafficGeneratorPayload(iteration=10)
+
+
+class RunnerPayloadTestCase(ut_base.BaseUnitTestCase):
+
+ def test_init(self):
+ runner_payload = payloads.RunnerPayload(version=5,
+ data={'key1': 'value1'})
+ self.assertEqual(5, runner_payload.version)
+ self.assertEqual({'key1': 'value1'}, runner_payload.data)
+
+ def test__init_missing_required_fields(self):
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.RunnerPayload(version=1)
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.RunnerPayload(data=None)
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ payloads.RunnerPayload()
diff --git a/yardstick/tests/unit/common/messaging/test_producer.py b/yardstick/tests/unit/common/messaging/test_producer.py
index 0289689dc..22286e5c3 100644
--- a/yardstick/tests/unit/common/messaging/test_producer.py
+++ b/yardstick/tests/unit/common/messaging/test_producer.py
@@ -44,3 +44,10 @@ class MessagingProducerTestCase(ut_base.BaseUnitTestCase):
topic='test_topic', fanout=True, server=messaging.SERVER)
mock_RPCClient.assert_called_once_with('test_rpc_transport',
'test_Target')
+
+ def test_id(self):
+ with mock.patch.object(oslo_messaging, 'RPCClient'), \
+ mock.patch.object(oslo_messaging, 'get_rpc_transport'), \
+ mock.patch.object(oslo_messaging, 'Target'):
+ msg_producer = _MessagingProducer('topic', 'id_to_check')
+ self.assertEqual('id_to_check', msg_producer.id)
diff --git a/yardstick/tests/unit/common/test_kubernetes_utils.py b/yardstick/tests/unit/common/test_kubernetes_utils.py
new file mode 100644
index 000000000..bdc2c12d5
--- /dev/null
+++ b/yardstick/tests/unit/common/test_kubernetes_utils.py
@@ -0,0 +1,252 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from kubernetes import client
+from kubernetes.client import rest
+from kubernetes import config
+
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.common import kubernetes_utils
+from yardstick.tests.unit import base
+
+
+class GetExtensionsV1betaApiTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(client, 'ApiextensionsV1beta1Api', return_value='api')
+ @mock.patch.object(config, 'load_kube_config')
+ def test_execute_correct(self, mock_load_kube_config, mock_api):
+ self.assertEqual('api', kubernetes_utils.get_extensions_v1beta_api())
+ mock_load_kube_config.assert_called_once_with(
+ config_file=constants.K8S_CONF_FILE)
+ mock_api.assert_called_once()
+
+ @mock.patch.object(config, 'load_kube_config')
+ def test_execute_exception(self, mock_load_kube_config):
+ mock_load_kube_config.side_effect = IOError
+ with self.assertRaises(exceptions.KubernetesConfigFileNotFound):
+ kubernetes_utils.get_extensions_v1beta_api()
+
+
+class GetCustomObjectsApiTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(client, 'CustomObjectsApi', return_value='api')
+ @mock.patch.object(config, 'load_kube_config')
+ def test_execute_correct(self, mock_load_kube_config, mock_api):
+ self.assertEqual('api', kubernetes_utils.get_custom_objects_api())
+ mock_load_kube_config.assert_called_once_with(
+ config_file=constants.K8S_CONF_FILE)
+ mock_api.assert_called_once()
+
+ @mock.patch.object(config, 'load_kube_config')
+ def test_execute_exception(self, mock_load_kube_config):
+ mock_load_kube_config.side_effect = IOError
+ with self.assertRaises(exceptions.KubernetesConfigFileNotFound):
+ kubernetes_utils.get_custom_objects_api()
+
+
+class CreateCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(client, 'V1beta1CustomResourceDefinition',
+ return_value='crd_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_correct(self, mock_get_api, mock_crd):
+ mock_create_crd = mock.Mock()
+ mock_get_api.return_value = mock_create_crd
+ body = {'spec': 'fake_spec', 'metadata': 'fake_metadata'}
+
+ kubernetes_utils.create_custom_resource_definition(body)
+ mock_get_api.assert_called_once()
+ mock_crd.assert_called_once_with(spec='fake_spec',
+ metadata='fake_metadata')
+ mock_create_crd.create_custom_resource_definition.\
+ assert_called_once_with('crd_obj')
+
+ @mock.patch.object(client, 'V1beta1CustomResourceDefinition',
+ return_value='crd_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_exception(self, mock_get_api, mock_crd):
+ mock_create_crd = mock.Mock()
+ mock_create_crd.create_custom_resource_definition.\
+ side_effect = rest.ApiException
+ mock_get_api.return_value = mock_create_crd
+ body = {'spec': 'fake_spec', 'metadata': 'fake_metadata'}
+
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.create_custom_resource_definition(body)
+ mock_get_api.assert_called_once()
+ mock_crd.assert_called_once_with(spec='fake_spec',
+ metadata='fake_metadata')
+ mock_create_crd.create_custom_resource_definition.\
+ assert_called_once_with('crd_obj')
+
+
+class DeleteCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(client, 'V1DeleteOptions', return_value='del_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_correct(self, mock_get_api, mock_delobj):
+ mock_delete_crd = mock.Mock()
+ mock_get_api.return_value = mock_delete_crd
+
+ kubernetes_utils.delete_custom_resource_definition('name')
+ mock_get_api.assert_called_once()
+ mock_delobj.assert_called_once()
+ mock_delete_crd.delete_custom_resource_definition.\
+ assert_called_once_with('name', 'del_obj')
+
+ @mock.patch.object(client, 'V1DeleteOptions', return_value='del_obj')
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_exception(self, mock_get_api, mock_delobj):
+ mock_delete_crd = mock.Mock()
+ mock_delete_crd.delete_custom_resource_definition.\
+ side_effect = rest.ApiException
+ mock_get_api.return_value = mock_delete_crd
+
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_custom_resource_definition('name')
+ mock_delobj.assert_called_once()
+ mock_delete_crd.delete_custom_resource_definition.\
+ assert_called_once_with('name', 'del_obj')
+
+
+class GetCustomResourceDefinitionTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_value(self, mock_get_api):
+ crd_obj = mock.Mock()
+ crd_obj.spec.names.kind = 'some_kind'
+ crd_list = mock.Mock()
+ crd_list.items = [crd_obj]
+ mock_api = mock.Mock()
+ mock_api.list_custom_resource_definition.return_value = crd_list
+ mock_get_api.return_value = mock_api
+ self.assertEqual(
+ crd_obj,
+ kubernetes_utils.get_custom_resource_definition('some_kind'))
+
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_none(self, mock_get_api):
+ crd_obj = mock.Mock()
+ crd_obj.spec.names.kind = 'some_kind'
+ crd_list = mock.Mock()
+ crd_list.items = [crd_obj]
+ mock_api = mock.Mock()
+ mock_api.list_custom_resource_definition.return_value = crd_list
+ mock_get_api.return_value = mock_api
+ self.assertIsNone(
+ kubernetes_utils.get_custom_resource_definition('other_kind'))
+
+ @mock.patch.object(kubernetes_utils, 'get_extensions_v1beta_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.list_custom_resource_definition.\
+ side_effect = rest.ApiException
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.get_custom_resource_definition('kind')
+
+
+class CreateNetworkTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ body = mock.Mock()
+
+ kubernetes_utils.create_network(
+ constants.SCOPE_CLUSTER, group, version, plural, body)
+ mock_api.create_cluster_custom_object.assert_called_once_with(
+ group, version, plural, body)
+
+ mock_api.reset_mock()
+ kubernetes_utils.create_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, body)
+ mock_api.create_namespaced_custom_object.assert_called_once_with(
+ group, version, 'default', plural, body)
+
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.create_cluster_custom_object.side_effect = rest.ApiException
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.create_network(
+ constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY)
+
+
+class DeleteNetworkTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+ group = 'group.com'
+ version = mock.Mock()
+ plural = 'networks'
+ name = 'network'
+
+ kubernetes_utils.delete_network(
+ constants.SCOPE_CLUSTER, group, version, plural, name)
+ mock_api.delete_cluster_custom_object.assert_called_once_with(
+ group, version, plural, name, {})
+
+ mock_api.reset_mock()
+ kubernetes_utils.delete_network(
+ constants.SCOPE_NAMESPACED, group, version, plural, name)
+ mock_api.delete_namespaced_custom_object.assert_called_once_with(
+ group, version, 'default', plural, name, {})
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_objects_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_cluster_custom_object.side_effect = rest.ApiException
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_network(
+ constants.SCOPE_CLUSTER, mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY)
+
+
+class DeletePodTestCase(base.BaseUnitTestCase):
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_correct(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_get_api.return_value = mock_api
+
+ kubernetes_utils.delete_pod("name", body=None)
+ mock_api.delete_namespaced_pod.assert_called_once_with(
+ "name", 'default', None)
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_pod.side_effect = rest.ApiException(status=200)
+
+ mock_get_api.return_value = mock_api
+ with self.assertRaises(exceptions.KubernetesApiException):
+ kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
+
+ @mock.patch.object(kubernetes_utils, 'get_core_api')
+ def test_execute_skip_exception(self, mock_get_api):
+ mock_api = mock.Mock()
+ mock_api.delete_namespaced_pod.side_effect = rest.ApiException(status=404)
+
+ mock_get_api.return_value = mock_api
+ kubernetes_utils.delete_pod(mock.ANY, skip_codes=[404])
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 6247afd18..446afdd38 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -16,6 +16,7 @@ import mock
import os
import six
from six.moves import configparser
+import socket
import time
import unittest
@@ -1282,3 +1283,29 @@ class WaitUntilTrueTestCase(ut_base.BaseUnitTestCase):
self.assertIsNone(
utils.wait_until_true(lambda: False, timeout=1, sleep=1,
exception=MyTimeoutException))
+
+
+class SendSocketCommandTestCase(unittest.TestCase):
+
+ @mock.patch.object(socket, 'socket')
+ def test_execute_correct(self, mock_socket):
+ mock_socket_obj = mock.Mock()
+ mock_socket_obj.connect_ex.return_value = 0
+ mock_socket.return_value = mock_socket_obj
+ self.assertEqual(0, utils.send_socket_command('host', 22, 'command'))
+ mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+ mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
+ mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
+ mock_socket_obj.close.assert_called_once()
+
+ @mock.patch.object(socket, 'socket')
+ def test_execute_exception(self, mock_socket):
+ mock_socket_obj = mock.Mock()
+ mock_socket_obj.connect_ex.return_value = 0
+ mock_socket.return_value = mock_socket_obj
+ mock_socket_obj.sendall.side_effect = socket.error
+ self.assertEqual(1, utils.send_socket_command('host', 22, 'command'))
+ mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+ mock_socket_obj.connect_ex.assert_called_once_with(('host', 22))
+ mock_socket_obj.sendall.assert_called_once_with(six.b('command'))
+ mock_socket_obj.close.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py
index 4271f852c..cffa4d492 100644
--- a/yardstick/tests/unit/network_services/collector/test_subscriber.py
+++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py
@@ -11,10 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-import unittest
+import copy
import mock
+import unittest
from yardstick.network_services.collector import subscriber
from yardstick import ssh
@@ -38,14 +38,15 @@ class MockVnfAprrox(object):
class CollectorTestCase(unittest.TestCase):
- NODES = {'context1': [{'name': 'node1',
- 'ip': '1.2.3.4',
- 'collectd': {
- 'plugins': {'abc': 12, 'def': 34},
- 'interval': 987
- },
- }]
- }
+ NODES = {
+ 'context1': [{'name': 'node1',
+ 'ip': '1.2.3.4',
+ 'collectd': {
+ 'plugins': {'abc': 12, 'def': 34},
+ 'interval': 987}
+ }
+ ]
+ }
def setUp(self):
vnf = MockVnfAprrox()
@@ -61,13 +62,29 @@ class CollectorTestCase(unittest.TestCase):
def tearDown(self):
self.ssh_patch.stop()
- def test___init__(self, *_):
+ def test___init__(self, *args):
vnf = MockVnfAprrox()
collector = subscriber.Collector([vnf], self.NODES)
self.assertEqual(len(collector.vnfs), 1)
self.assertEqual(len(collector.nodes), 1)
- def test_start(self, *_):
+ def test___init__no_node_information(self, *args):
+ vnf = MockVnfAprrox()
+ nodes = copy.deepcopy(self.NODES)
+ nodes['context1'].append(None)
+ collector = subscriber.Collector([vnf], nodes)
+ self.assertEqual(len(collector.vnfs), 1)
+ self.assertEqual(len(collector.nodes), 1)
+
+ def test___init__no_node_information_in_context(self, *args):
+ vnf = MockVnfAprrox()
+ nodes = copy.deepcopy(self.NODES)
+ nodes['context1'] = None
+ collector = subscriber.Collector([vnf], nodes)
+ self.assertEqual(len(collector.vnfs), 1)
+ self.assertEqual(len(collector.nodes), 1)
+
+ def test_start(self, *args):
resource_profile = mock.MagicMock()
self.collector.resource_profiles = {'key': resource_profile}
self.collector.bin_path = 'path'
@@ -92,7 +109,7 @@ class CollectorTestCase(unittest.TestCase):
for resource in self.collector.resource_profiles.values():
resource.stop.assert_called_once()
- def test_get_kpi(self, *_):
+ def test_get_kpi(self, *args):
result = self.collector.get_kpi()
self.assertEqual(2, len(result))
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
index 34afa3d5b..541855aa8 100644
--- a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -203,13 +203,9 @@ class TestIxNextgen(unittest.TestCase):
ixnet_gen._ixnet = self.ixnet
framesize = {'64B': '75', '512b': '25'}
output = ixnet_gen._parse_framesize(framesize)
- for idx in range(len(framesize)):
- if output[idx * 2] == 64:
- self.assertEqual(75, output[idx * 2 + 1])
- elif output[idx * 2] == 512:
- self.assertEqual(25, output[idx * 2 + 1])
- else:
- raise self.failureException('Framesize (64, 512) not present')
+ self.assertEqual(2, len(output))
+ self.assertIn([64, 64, 75], output)
+ self.assertIn([512, 512, 25], output)
@mock.patch.object(IxNetwork, 'IxNet')
def test_connect(self, mock_ixnet):
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index f04d2c617..01fc19aa0 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import unittest
import mock
@@ -19,22 +18,14 @@ import os
import re
import copy
-from yardstick.tests import STL_MOCKS
-from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
from yardstick.common import utils
from yardstick.common import exceptions
from yardstick.benchmark.contexts import base as ctx_base
-
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf import acl_vnf
- from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
- from yardstick.network_services.nfvi.resource import ResourceProfile
- from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf import acl_vnf
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
TEST_FILE_YAML = 'nsb_test_case.yaml'
@@ -246,7 +237,7 @@ class TestAclApproxVnf(unittest.TestCase):
def test___init__(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
self.assertIsNone(acl_approx_vnf._vnf_process)
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@@ -256,7 +247,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {acl_approx_vnf.name: "mock"}
}
@@ -279,7 +270,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf.q_in = mock.MagicMock()
acl_approx_vnf.q_out = mock.MagicMock()
acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -291,7 +282,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf.q_in = mock.MagicMock()
acl_approx_vnf.q_out = mock.MagicMock()
acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -312,7 +303,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf._build_config = mock.MagicMock()
acl_approx_vnf.queue_wrapper = mock.MagicMock()
acl_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
@@ -332,7 +323,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf.deploy_helper = mock.MagicMock()
acl_approx_vnf.resource_helper = mock.MagicMock()
acl_approx_vnf._build_config = mock.MagicMock()
@@ -350,7 +341,7 @@ class TestAclApproxVnf(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd)
+ acl_approx_vnf = acl_vnf.AclApproxVnf(name, vnfd, 'task_id')
acl_approx_vnf._vnf_process = mock.MagicMock()
acl_approx_vnf._vnf_process.terminate = mock.Mock()
acl_approx_vnf.used_drivers = {"01:01.0": "i40e",
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py
index ebedcb451..2ea13a5e0 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py
@@ -11,16 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import multiprocessing
import os
+import uuid
import mock
+from oslo_config import cfg
+import oslo_messaging
import unittest
+from yardstick.common import messaging
+from yardstick.common.messaging import payloads
from yardstick.network_services.vnf_generic.vnf import base
from yardstick.ssh import SSH
+from yardstick.tests.unit import base as ut_base
IP_PIPELINE_CFG_FILE_TPL = ("arp_route_tbl = ({port0_local_ip_hex},"
@@ -140,6 +145,24 @@ VNFD = {
}
+class _DummyGenericTrafficGen(base.GenericTrafficGen): # pragma: no cover
+
+ def run_traffic(self, *args):
+ pass
+
+ def terminate(self):
+ pass
+
+ def collect_kpi(self):
+ pass
+
+ def instantiate(self, *args):
+ pass
+
+ def scale(self, flavor=''):
+ pass
+
+
class FileAbsPath(object):
def __init__(self, module_file):
super(FileAbsPath, self).__init__()
@@ -206,22 +229,23 @@ class TestQueueFileWrapper(unittest.TestCase):
self.assertIsNotNone(queue_file_wrapper.q_out.empty())
-class TestGenericVNF(unittest.TestCase):
+class TestGenericVNF(ut_base.BaseUnitTestCase):
def test_definition(self):
"""Make sure that the abstract class cannot be instantiated"""
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
- base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0])
+ base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0],
+ 'task_id')
- msg = ("Can't instantiate abstract class GenericVNF with abstract methods "
- "collect_kpi, instantiate, scale, start_collect, "
+ msg = ("Can't instantiate abstract class GenericVNF with abstract "
+ "methods collect_kpi, instantiate, scale, start_collect, "
"stop_collect, terminate, wait_for_instantiate")
self.assertEqual(msg, str(exc.exception))
-class TestGenericTrafficGen(unittest.TestCase):
+class GenericTrafficGenTestCase(ut_base.BaseUnitTestCase):
def test_definition(self):
"""Make sure that the abstract class cannot be instantiated"""
@@ -229,8 +253,96 @@ class TestGenericTrafficGen(unittest.TestCase):
name = 'vnf1'
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
- base.GenericTrafficGen(name, vnfd)
+ base.GenericTrafficGen(name, vnfd, 'task_id')
msg = ("Can't instantiate abstract class GenericTrafficGen with "
"abstract methods collect_kpi, instantiate, run_traffic, "
"scale, terminate")
self.assertEqual(msg, str(exc.exception))
+
+ def test_get_mq_producer_id(self):
+ vnfd = {'benchmark': {'kpi': mock.ANY},
+ 'vdu': [{'external-interface': 'ext_int'}]
+ }
+ tg = _DummyGenericTrafficGen('name', vnfd, 'task_id')
+ tg._mq_producer = mock.Mock()
+ tg._mq_producer.id = 'fake_id'
+ self.assertEqual('fake_id', tg.get_mq_producer_id())
+
+
+class TrafficGeneratorProducerTestCase(ut_base.BaseUnitTestCase):
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(cfg, 'CONF')
+ def test__init(self, mock_config, mock_transport, mock_rpcclient,
+ mock_target):
+ _id = uuid.uuid1().int
+ tg_producer = base.TrafficGeneratorProducer(_id)
+ mock_transport.assert_called_once_with(
+ mock_config, url='rabbit://yardstick:yardstick@localhost:5672/')
+ mock_target.assert_called_once_with(topic=messaging.TOPIC_TG,
+ fanout=True,
+ server=messaging.SERVER)
+ mock_rpcclient.assert_called_once_with('rpc_transport', 'rpc_target')
+ self.assertEqual(_id, tg_producer._id)
+ self.assertEqual(messaging.TOPIC_TG, tg_producer._topic)
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(payloads, 'TrafficGeneratorPayload',
+ return_value='tg_pload')
+ def test_tg_method_started(self, mock_tg_payload, *args):
+ tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int)
+ with mock.patch.object(tg_producer, 'send_message') as mock_message:
+ tg_producer.tg_method_started(version=10)
+
+ mock_message.assert_called_once_with(messaging.TG_METHOD_STARTED,
+ 'tg_pload')
+ mock_tg_payload.assert_called_once_with(version=10, iteration=0,
+ kpi={})
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(payloads, 'TrafficGeneratorPayload',
+ return_value='tg_pload')
+ def test_tg_method_finished(self, mock_tg_payload, *args):
+ tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int)
+ with mock.patch.object(tg_producer, 'send_message') as mock_message:
+ tg_producer.tg_method_finished(version=20)
+
+ mock_message.assert_called_once_with(messaging.TG_METHOD_FINISHED,
+ 'tg_pload')
+ mock_tg_payload.assert_called_once_with(version=20, iteration=0,
+ kpi={})
+
+ @mock.patch.object(oslo_messaging, 'Target', return_value='rpc_target')
+ @mock.patch.object(oslo_messaging, 'RPCClient')
+ @mock.patch.object(oslo_messaging, 'get_rpc_transport',
+ return_value='rpc_transport')
+ @mock.patch.object(payloads, 'TrafficGeneratorPayload',
+ return_value='tg_pload')
+ def test_tg_method_iteration(self, mock_tg_payload, *args):
+ tg_producer = base.TrafficGeneratorProducer(uuid.uuid1().int)
+ with mock.patch.object(tg_producer, 'send_message') as mock_message:
+ tg_producer.tg_method_iteration(100, version=30, kpi={'k': 'v'})
+
+ mock_message.assert_called_once_with(messaging.TG_METHOD_ITERATION,
+ 'tg_pload')
+ mock_tg_payload.assert_called_once_with(version=30, iteration=100,
+ kpi={'k': 'v'})
+
+
+class GenericVNFConsumerTestCase(ut_base.BaseUnitTestCase):
+
+ def test__init(self):
+ endpoints = 'endpoint_1'
+ _ids = [uuid.uuid1().int]
+ gvnf_consumer = base.GenericVNFConsumer(_ids, endpoints)
+ self.assertEqual(_ids, gvnf_consumer._ids)
+ self.assertEqual([endpoints], gvnf_consumer._endpoints)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
index 635ca41a2..32f5b758d 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
@@ -318,14 +318,14 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test___init__(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
self.assertIsNone(cgnapt_approx_vnf._vnf_process)
@mock.patch.object(process, 'check_if_process_failed')
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
def test_collect_kpi(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {cgnapt_approx_vnf.name: "mock"}
}
@@ -349,7 +349,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
@mock.patch.object(time, 'sleep')
def test_vnf_execute_command(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.q_in = mock.Mock()
cgnapt_approx_vnf.q_out = mock.Mock()
cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -357,7 +357,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test_get_stats(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
with mock.patch.object(cgnapt_approx_vnf, 'vnf_execute') as mock_exec:
mock_exec.return_value = 'output'
self.assertEqual('output', cgnapt_approx_vnf.get_stats())
@@ -366,7 +366,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test_run_vcgnapt(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.ssh_helper = mock.Mock()
cgnapt_approx_vnf.setup_helper = mock.Mock()
with mock.patch.object(cgnapt_approx_vnf, '_build_config'), \
@@ -379,7 +379,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
@mock.patch.object(ctx_base.Context, 'get_context_from_server')
def test_instantiate(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.deploy_helper = mock.MagicMock()
cgnapt_approx_vnf.resource_helper = mock.MagicMock()
cgnapt_approx_vnf._build_config = mock.MagicMock()
@@ -396,7 +396,7 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test__vnf_up_post(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
self.scenario_cfg['options'][name]['napt'] = 'static'
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.vnf_execute = mock.Mock()
cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
with mock.patch.object(cgnapt_approx_vnf, 'setup_helper') as \
@@ -407,6 +407,6 @@ class TestCgnaptApproxVnf(unittest.TestCase):
def test__vnf_up_post_short(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf = cgnapt_vnf.CgnaptApproxVnf(name, vnfd, 'task_id')
cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
cgnapt_approx_vnf._vnf_up_post()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index 678e58056..f144e8c42 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -317,7 +317,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test___init__(self, ssh, *args):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
self.assertIsNone(prox_approx_vnf._vnf_process)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@@ -325,7 +325,7 @@ class TestProxApproxVnf(unittest.TestCase):
def test_collect_kpi_no_client(self, ssh, *args):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {prox_approx_vnf.name: "mock"}
}
@@ -350,7 +350,7 @@ class TestProxApproxVnf(unittest.TestCase):
[2, 1, 2, 3, 4, 5], [3, 1, 2, 3, 4, 5]]
resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}}
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {prox_approx_vnf.name: "mock"}
}
@@ -376,7 +376,8 @@ class TestProxApproxVnf(unittest.TestCase):
mock_ssh(ssh)
resource_helper = mock.MagicMock()
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, deepcopy(self.VNFD0))
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, deepcopy(self.VNFD0),
+ 'task_id')
prox_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {prox_approx_vnf.name: "mock"}
}
@@ -399,7 +400,7 @@ class TestProxApproxVnf(unittest.TestCase):
def test_run_prox(self, ssh, *_):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
prox_approx_vnf.ssh_helper.join_bin_path.return_value = '/tool_path12/tool_file34'
prox_approx_vnf.setup_helper.remote_path = 'configs/file56.cfg'
@@ -413,7 +414,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def bad_test_instantiate(self, *args):
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.scenario_helper = mock.MagicMock()
prox_approx_vnf.setup_helper = mock.MagicMock()
# we can't mock super
@@ -423,7 +424,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test_wait_for_instantiate_panic(self, ssh, *args):
mock_ssh(ssh, exec_result=(1, "", ""))
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf._vnf_process = mock.MagicMock(**{"is_alive.return_value": True})
prox_approx_vnf._run_prox = mock.Mock(return_value=0)
prox_approx_vnf.WAIT_TIME = 0
@@ -435,7 +436,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf._vnf_process = mock.MagicMock()
prox_approx_vnf._vnf_process.terminate = mock.Mock()
prox_approx_vnf.ssh_helper = mock.MagicMock()
@@ -447,7 +448,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test__vnf_up_post(self, ssh, *args):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.resource_helper = resource_helper = mock.Mock()
prox_approx_vnf._vnf_up_post()
@@ -456,7 +457,7 @@ class TestProxApproxVnf(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test_vnf_execute_oserror(self, ssh, *args):
mock_ssh(ssh)
- prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf = prox_vnf.ProxApproxVnf(NAME, self.VNFD0, 'task_id')
prox_approx_vnf.resource_helper = resource_helper = mock.Mock()
resource_helper.execute.side_effect = OSError(errno.EPIPE, "")
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py
index edd0ff796..ad74145b4 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py
@@ -11,22 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import unittest
import mock
-from yardstick.tests import STL_MOCKS
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
from yardstick.benchmark.contexts import base as ctx_base
-
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.router_vnf import RouterVNF
+from yardstick.network_services.vnf_generic.vnf.router_vnf import RouterVNF
TEST_FILE_YAML = 'nsb_test_case.yaml'
@@ -208,7 +199,7 @@ class TestRouterVNF(unittest.TestCase):
def test___init__(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- router_vnf = RouterVNF(name, vnfd)
+ router_vnf = RouterVNF(name, vnfd, 'task_id')
self.assertIsNone(router_vnf._vnf_process)
def test_get_stats(self):
@@ -222,7 +213,7 @@ class TestRouterVNF(unittest.TestCase):
m = mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- router_vnf = RouterVNF(name, vnfd)
+ router_vnf = RouterVNF(name, vnfd, 'task_id')
router_vnf.scenario_helper.scenario_cfg = {
'nodes': {router_vnf.name: "mock"}
}
@@ -241,7 +232,7 @@ class TestRouterVNF(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- router_vnf = RouterVNF(name, vnfd)
+ router_vnf = RouterVNF(name, vnfd, 'task_id')
router_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
router_vnf._run()
router_vnf.ssh_helper.drop_connection.assert_called_once()
@@ -252,7 +243,7 @@ class TestRouterVNF(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- router_vnf = RouterVNF(name, vnfd)
+ router_vnf = RouterVNF(name, vnfd, 'task_id')
router_vnf.WAIT_TIME = 0
router_vnf.INTERFACE_WAIT = 0
self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
@@ -265,7 +256,7 @@ class TestRouterVNF(unittest.TestCase):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- router_vnf = RouterVNF(name, vnfd)
+ router_vnf = RouterVNF(name, vnfd, 'task_id')
router_vnf._vnf_process = mock.MagicMock()
router_vnf._vnf_process.terminate = mock.Mock()
self.assertIsNone(router_vnf.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index 331e80d00..c35d2db35 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1090,6 +1090,57 @@ class TestClientResourceHelper(unittest.TestCase):
self.assertIs(client_resource_helper._connect(client), client)
+ @mock.patch.object(ClientResourceHelper, '_build_ports')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once')
+ def test_run_traffic(self, mock_run_traffic_once, mock_build_ports):
+ client_resource_helper = ClientResourceHelper(mock.Mock())
+ client = mock.Mock()
+ traffic_profile = mock.Mock()
+ mq_producer = mock.Mock()
+ with mock.patch.object(client_resource_helper, '_connect') \
+ as mock_connect, \
+ mock.patch.object(client_resource_helper, '_terminated') \
+ as mock_terminated:
+ mock_connect.return_value = client
+ type(mock_terminated).value = mock.PropertyMock(
+ side_effect=[0, 1, lambda x: x])
+ client_resource_helper.run_traffic(traffic_profile, mq_producer)
+
+ mock_build_ports.assert_called_once()
+ traffic_profile.register_generator.assert_called_once()
+ mq_producer.tg_method_started.assert_called_once()
+ mq_producer.tg_method_finished.assert_called_once()
+ mq_producer.tg_method_iteration.assert_called_once_with(1)
+ mock_run_traffic_once.assert_called_once_with(traffic_profile)
+
+ @mock.patch.object(ClientResourceHelper, '_build_ports')
+ @mock.patch.object(ClientResourceHelper, '_run_traffic_once',
+ side_effect=Exception)
+ def test_run_traffic_exception(self, mock_run_traffic_once,
+ mock_build_ports):
+ client_resource_helper = ClientResourceHelper(mock.Mock())
+ client = mock.Mock()
+ traffic_profile = mock.Mock()
+ mq_producer = mock.Mock()
+ with mock.patch.object(client_resource_helper, '_connect') \
+ as mock_connect, \
+ mock.patch.object(client_resource_helper, '_terminated') \
+ as mock_terminated:
+ mock_connect.return_value = client
+ type(mock_terminated).value = mock.PropertyMock(return_value=0)
+ mq_producer.reset_mock()
+ # NOTE(ralonsoh): "trex_stl_exceptions.STLError" is mocked
+ with self.assertRaises(Exception):
+ client_resource_helper.run_traffic(traffic_profile,
+ mq_producer)
+
+ mock_build_ports.assert_called_once()
+ traffic_profile.register_generator.assert_called_once()
+ mock_run_traffic_once.assert_called_once_with(traffic_profile)
+ mq_producer.tg_method_started.assert_called_once()
+ mq_producer.tg_method_finished.assert_not_called()
+ mq_producer.tg_method_iteration.assert_not_called()
+
class TestRfc2544ResourceHelper(unittest.TestCase):
@@ -1492,7 +1543,7 @@ class TestSampleVnf(unittest.TestCase):
}
def test___init__(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
+ sample_vnf = SampleVNF('vnf1', self.VNFD_0, 'task_id')
self.assertEqual(sample_vnf.name, 'vnf1')
self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0)
@@ -1510,7 +1561,8 @@ class TestSampleVnf(unittest.TestCase):
class MyResourceHelper(ResourceHelper):
pass
- sample_vnf = SampleVNF('vnf1', self.VNFD_0, MySetupEnvHelper, MyResourceHelper)
+ sample_vnf = SampleVNF('vnf1', self.VNFD_0, 'task_id',
+ MySetupEnvHelper, MyResourceHelper)
self.assertEqual(sample_vnf.name, 'vnf1')
self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0)
@@ -1524,7 +1576,7 @@ class TestSampleVnf(unittest.TestCase):
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process')
def test__start_vnf(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf._run = mock.Mock()
self.assertIsNone(sample_vnf.queue_wrapper)
@@ -1543,7 +1595,7 @@ class TestSampleVnf(unittest.TestCase):
}
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.scenario_helper.scenario_cfg = {
'nodes': {sample_vnf.name: 'mock'}
}
@@ -1587,7 +1639,7 @@ class TestSampleVnf(unittest.TestCase):
'plugin1': {'param': 1}}}
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf__0', vnfd)
+ sample_vnf = SampleVNF('vnf__0', vnfd, 'task_id')
sample_vnf._update_collectd_options(scenario_cfg, context_cfg)
self.assertEqual(sample_vnf.setup_helper.collectd_options, expected)
@@ -1614,7 +1666,7 @@ class TestSampleVnf(unittest.TestCase):
'plugin1': {'param': 1}}}
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf._update_options(options2, options1)
self.assertEqual(options2, expected)
@@ -1636,7 +1688,7 @@ class TestSampleVnf(unittest.TestCase):
]
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.APP_NAME = 'sample1'
sample_vnf.WAIT_TIME_FOR_SCRIPT = 0
sample_vnf._start_server = mock.Mock(return_value=0)
@@ -1667,7 +1719,7 @@ class TestSampleVnf(unittest.TestCase):
]
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.APP_NAME = 'sample1'
sample_vnf.q_out = mock.Mock()
sample_vnf.q_out.qsize.side_effect = iter(queue_size_list)
@@ -1677,7 +1729,7 @@ class TestSampleVnf(unittest.TestCase):
def test_terminate_without_vnf_process(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.APP_NAME = 'sample1'
sample_vnf.vnf_execute = mock.Mock()
sample_vnf.ssh_helper = mock.Mock()
@@ -1688,7 +1740,7 @@ class TestSampleVnf(unittest.TestCase):
def test_get_stats(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.APP_NAME = 'sample1'
sample_vnf.APP_WORD = 'sample1'
sample_vnf.vnf_execute = mock.Mock(return_value='the stats')
@@ -1698,7 +1750,7 @@ class TestSampleVnf(unittest.TestCase):
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
def test_collect_kpi(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.scenario_helper.scenario_cfg = {
'nodes': {sample_vnf.name: "mock"}
}
@@ -1726,7 +1778,7 @@ class TestSampleVnf(unittest.TestCase):
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
def test_collect_kpi_default(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.scenario_helper.scenario_cfg = {
'nodes': {sample_vnf.name: "mock"}
}
@@ -1745,7 +1797,7 @@ class TestSampleVnf(unittest.TestCase):
def test_scale(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
self.assertRaises(y_exceptions.FunctionNotImplemented,
sample_vnf.scale)
@@ -1753,7 +1805,7 @@ class TestSampleVnf(unittest.TestCase):
test_cmd = 'test cmd'
run_kwargs = {'arg1': 'val1', 'arg2': 'val2'}
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf = SampleVNF('vnf1', vnfd, 'task_id')
sample_vnf.ssh_helper = mock.Mock()
sample_vnf.setup_helper = mock.Mock()
with mock.patch.object(sample_vnf, '_build_config',
@@ -1889,30 +1941,30 @@ class TestSampleVNFTrafficGen(unittest.TestCase):
}
def test__check_status(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
with self.assertRaises(NotImplementedError):
sample_vnf_tg._check_status()
def test_listen_traffic(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
sample_vnf_tg.listen_traffic(mock.Mock())
def test_verify_traffic(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
sample_vnf_tg.verify_traffic(mock.Mock())
def test_terminate(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
sample_vnf_tg._traffic_process = mock.Mock()
sample_vnf_tg._tg_process = mock.Mock()
sample_vnf_tg.terminate()
def test__wait_for_process(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
with mock.patch.object(sample_vnf_tg, '_check_status',
return_value=0) as mock_status, \
mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc:
@@ -1923,14 +1975,14 @@ class TestSampleVNFTrafficGen(unittest.TestCase):
mock_status.assert_called_once()
def test__wait_for_process_not_alive(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
with mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc:
mock_proc.is_alive.return_value = False
self.assertRaises(RuntimeError, sample_vnf_tg._wait_for_process)
mock_proc.is_alive.assert_called_once()
def test__wait_for_process_delayed(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
with mock.patch.object(sample_vnf_tg, '_check_status',
side_effect=[1, 0]) as mock_status, \
mock.patch.object(sample_vnf_tg,
@@ -1942,6 +1994,6 @@ class TestSampleVNFTrafficGen(unittest.TestCase):
mock_status.assert_has_calls([mock.call(), mock.call()])
def test_scale(self):
- sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0, 'task_id')
self.assertRaises(y_exceptions.FunctionNotImplemented,
sample_vnf_tg.scale)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
index 66f9e93ae..53474b96e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
@@ -16,19 +16,19 @@ import subprocess
import mock
import six
-import unittest
from yardstick import ssh
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.common import utils
from yardstick.network_services.vnf_generic.vnf import tg_ixload
-from yardstick.network_services.traffic_profile.base import TrafficProfile
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.tests.unit import base as ut_base
NAME = "tg__1"
-class TestIxLoadTrafficGen(unittest.TestCase):
+class TestIxLoadTrafficGen(ut_base.BaseUnitTestCase):
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
[{'short-name': 'VpeVnf',
@@ -107,28 +107,33 @@ class TestIxLoadTrafficGen(unittest.TestCase):
"frame_size": 64}}
def setUp(self):
- self._mock_call = mock.patch.object(subprocess, "call")
+ self._mock_call = mock.patch.object(subprocess, 'call')
self.mock_call = self._mock_call.start()
self._mock_open = mock.patch.object(tg_ixload, 'open')
self.mock_open = self._mock_open.start()
+ self._mock_ssh = mock.patch.object(ssh, 'SSH')
+ self.mock_ssh = self._mock_ssh.start()
+ ssh_obj_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_obj_mock.execute = mock.Mock(return_value=(0, '', ''))
+ ssh_obj_mock.run = mock.Mock(return_value=(0, '', ''))
+ self.mock_ssh.from_node.return_value = ssh_obj_mock
self.addCleanup(self._stop_mock)
def _stop_mock(self):
self._mock_call.stop()
self._mock_open.stop()
+ self._mock_ssh.stop()
- @mock.patch.object(ssh, 'SSH')
- def test___init__(self, *args):
+ def test___init__(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
self.assertIsNone(ixload_traffic_gen.resource_helper.data)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server',
return_value='mock_node')
- @mock.patch.object(ssh, 'SSH')
def test_collect_kpi(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
ixload_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {ixload_traffic_gen.name: "mock"}
}
@@ -140,97 +145,88 @@ class TestIxLoadTrafficGen(unittest.TestCase):
'collect_stats': {}}
self.assertEqual(expected, result)
- @mock.patch.object(ssh, 'SSH')
- def test_listen_traffic(self, *args):
+ def test_listen_traffic(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
self.assertIsNone(ixload_traffic_gen.listen_traffic({}))
@mock.patch.object(utils, 'find_relative_file')
@mock.patch.object(utils, 'makedirs')
@mock.patch.object(ctx_base.Context, 'get_context_from_server')
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
- @mock.patch.object(ssh, 'SSH')
+ @mock.patch.object(tg_ixload, 'shutil')
def test_instantiate(self, mock_shutil, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
scenario_cfg = {'tc': "nsb_test_case",
'ixia_profile': "ixload.cfg",
'task_path': "/path/to/task"}
ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result"
mock_shutil.copy = mock.Mock()
- scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4,
- 'rfc2544': {'allowed_drop_rate': '0.8 - 1'},
- 'vnf__1': {'rules': 'acl_1rule.yaml',
- 'vnf_config': {'lb_config': 'SW',
- 'lb_count': 1,
- 'worker_config':
- '1C/1T',
- 'worker_threads': 1}}
- }})
- scenario_cfg.update({
- 'nodes': {ixload_traffic_gen.name: "mock"}
- })
+ scenario_cfg.update(
+ {'options':
+ {'packetsize': 64, 'traffic_type': 4,
+ 'rfc2544': {'allowed_drop_rate': '0.8 - 1'},
+ 'vnf__1': {'rules': 'acl_1rule.yaml',
+ 'vnf_config': {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1}}
+ }
+ }
+ )
+ scenario_cfg.update({'nodes': {ixload_traffic_gen.name: "mock"}})
with mock.patch.object(six.moves.builtins, 'open',
create=True) as mock_open:
mock_open.return_value = mock.MagicMock()
ixload_traffic_gen.instantiate(scenario_cfg, {})
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
- @mock.patch.object(ssh, 'SSH')
+ @mock.patch.object(tg_ixload, 'open')
+ @mock.patch.object(tg_ixload, 'min')
+ @mock.patch.object(tg_ixload, 'max')
+ @mock.patch.object(tg_ixload, 'len')
+ @mock.patch.object(tg_ixload, 'shutil')
def test_run_traffic(self, *args):
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
- mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = '64'
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vnfd["mgmt-interface"].update({"tg-config": {}})
- vnfd["mgmt-interface"]["tg-config"].update({"ixchassis":
- "1.1.1.1"})
- vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path":
- "/root"})
- sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ vnfd['mgmt-interface'].update({'tg-config': {}})
+ vnfd['mgmt-interface']['tg-config'].update({'ixchassis': '1.1.1.1'})
+ vnfd['mgmt-interface']['tg-config'].update({'py_bin_path': '/root'})
+ sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
sut.connection = mock.Mock()
- sut.connection.run = mock.Mock()
sut._traffic_runner = mock.Mock(return_value=0)
result = sut.run_traffic(mock_traffic_profile)
self.assertIsNone(result)
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
- @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
- @mock.patch.object(ssh, 'SSH')
+ @mock.patch.object(tg_ixload, 'open')
+ @mock.patch.object(tg_ixload, 'min')
+ @mock.patch.object(tg_ixload, 'max')
+ @mock.patch.object(tg_ixload, 'len')
+ @mock.patch.object(tg_ixload, 'shutil')
def test_run_traffic_csv(self, *args):
- mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
- mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = '64'
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vnfd["mgmt-interface"].update({"tg-config": {}})
- vnfd["mgmt-interface"]["tg-config"].update({"ixchassis":
- "1.1.1.1"})
- vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path":
- "/root"})
- sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ vnfd['mgmt-interface'].update({'tg-config': {}})
+ vnfd['mgmt-interface']['tg-config'].update({'ixchassis': '1.1.1.1'})
+ vnfd['mgmt-interface']['tg-config'].update({'py_bin_path': '/root'})
+ sut = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
sut.connection = mock.Mock()
- sut.connection.run = mock.Mock()
sut._traffic_runner = mock.Mock(return_value=0)
- sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv")
+ subprocess.call(['touch', '/tmp/1.csv'])
+ sut.rel_bin_path = mock.Mock(return_value='/tmp/*.csv')
result = sut.run_traffic(mock_traffic_profile)
self.assertIsNone(result)
- @mock.patch.object(ssh, 'SSH')
- def test_terminate(self, *args):
+ def test_terminate(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
self.assertIsNone(ixload_traffic_gen.terminate())
- @mock.patch.object(ssh, 'SSH')
- def test_parse_csv_read(self, mock_ssh):
+ def test_parse_csv_read(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
kpi_data = {
'HTTP Total Throughput (Kbps)': 1,
@@ -240,21 +236,13 @@ class TestIxLoadTrafficGen(unittest.TestCase):
'HTTP Transaction Rate': True,
}
http_reader = [kpi_data]
-
- mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
- mock_ssh_type.execute.return_value = 0, "", ""
- mock_ssh.from_node.return_value = mock_ssh_type
-
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
result = ixload_traffic_gen.resource_helper.result
-
ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
- for key_left, key_right in (
- tg_ixload.IxLoadResourceHelper.KPI_LIST.items()):
- self.assertEqual(result[key_left][-1], int(kpi_data[key_right]))
+ for k_left, k_right in tg_ixload.IxLoadResourceHelper.KPI_LIST.items():
+ self.assertEqual(result[k_left][-1], int(kpi_data[k_right]))
- @mock.patch.object(ssh, 'SSH')
- def test_parse_csv_read_value_error(self, mock_ssh, *args):
+ def test_parse_csv_read_value_error(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
@@ -263,19 +251,13 @@ class TestIxLoadTrafficGen(unittest.TestCase):
'HTTP Connection Rate': 4,
'HTTP Transaction Rate': 5,
}]
-
- mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
- mock_ssh_type.execute.return_value = 0, "", ""
- mock_ssh.from_node.return_value = mock_ssh_type
-
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
init_value = ixload_traffic_gen.resource_helper.result
-
ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
- self.assertDictEqual(ixload_traffic_gen.resource_helper.result, init_value)
+ self.assertDictEqual(ixload_traffic_gen.resource_helper.result,
+ init_value)
- @mock.patch.object(ssh, 'SSH')
- def test_parse_csv_read_error(self, mock_ssh, *args):
+ def test_parse_csv_read_error(self,):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
@@ -283,12 +265,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
'HTTP Concurrent Connections': 3,
'HTTP Transaction Rate': 5,
}]
-
- mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
- mock_ssh_type.execute.return_value = 0, "", ""
- mock_ssh.from_node.return_value = mock_ssh_type
-
- ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen = tg_ixload.IxLoadTrafficGen(NAME, vnfd, 'task_id')
with self.assertRaises(KeyError):
ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
index d774bb9f7..434a7b770 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
@@ -20,21 +20,15 @@ import mock
import unittest
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-from yardstick.tests import STL_MOCKS
from yardstick.benchmark.contexts import base as ctx_base
+from yardstick.network_services.vnf_generic.vnf.tg_ping import PingParser
+from yardstick.network_services.vnf_generic.vnf.tg_ping import PingTrafficGen
+from yardstick.network_services.vnf_generic.vnf.tg_ping import PingResourceHelper
+from yardstick.network_services.vnf_generic.vnf.tg_ping import PingSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
-SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper"
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.tg_ping import PingParser
- from yardstick.network_services.vnf_generic.vnf.tg_ping import PingTrafficGen
- from yardstick.network_services.vnf_generic.vnf.tg_ping import PingResourceHelper
- from yardstick.network_services.vnf_generic.vnf.tg_ping import PingSetupEnvHelper
- from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper"
class TestPingResourceHelper(unittest.TestCase):
@@ -232,7 +226,7 @@ class TestPingTrafficGen(unittest.TestCase):
@mock.patch("yardstick.ssh.SSH")
def test___init__(self, ssh):
ssh.from_node.return_value.execute.return_value = 0, "success", ""
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
self.assertIsInstance(ping_traffic_gen.setup_helper, PingSetupEnvHelper)
self.assertIsInstance(ping_traffic_gen.resource_helper, PingResourceHelper)
@@ -249,7 +243,7 @@ class TestPingTrafficGen(unittest.TestCase):
(0, 'if_name_2', ''),
]
ssh.from_node.return_value.execute.side_effect = iter(execute_result_data)
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
ext_ifs = ping_traffic_gen.vnfd_helper.interfaces
self.assertNotEqual(ext_ifs[0]['virtual-interface']['local_iface_name'], 'if_name_1')
self.assertNotEqual(ext_ifs[1]['virtual-interface']['local_iface_name'], 'if_name_2')
@@ -259,7 +253,7 @@ class TestPingTrafficGen(unittest.TestCase):
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh, exec_result=(0, "success", ""))
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
ping_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {ping_traffic_gen.name: "mock"}
}
@@ -277,7 +271,7 @@ class TestPingTrafficGen(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test_instantiate(self, ssh):
mock_ssh(ssh, spec=VnfSshHelper, exec_result=(0, "success", ""))
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
ping_traffic_gen.setup_helper.ssh_helper = mock.MagicMock(
**{"execute.return_value": (0, "xe0_fake", "")})
self.assertIsInstance(ping_traffic_gen.ssh_helper, mock.Mock)
@@ -292,7 +286,7 @@ class TestPingTrafficGen(unittest.TestCase):
self.assertIsNotNone(ping_traffic_gen._result)
def test_listen_traffic(self):
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
self.assertIsNone(ping_traffic_gen.listen_traffic({}))
@mock.patch("yardstick.ssh.SSH")
@@ -300,5 +294,5 @@ class TestPingTrafficGen(unittest.TestCase):
ssh.from_node.return_value.execute.return_value = 0, "success", ""
ssh.from_node.return_value.run.return_value = 0, "success", ""
- ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0, 'task_id')
self.assertIsNone(ping_traffic_gen.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
index 050aa4aa0..5ad182f22 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
@@ -17,21 +17,14 @@ import unittest
import mock
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-from yardstick.tests import STL_MOCKS
from yardstick.benchmark.contexts import base as ctx_base
+from yardstick.network_services.vnf_generic.vnf.tg_prox import ProxTrafficGen
+from yardstick.network_services.traffic_profile.base import TrafficProfile
SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
NAME = 'vnf__1'
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.tg_prox import ProxTrafficGen
- from yardstick.network_services.traffic_profile.base import TrafficProfile
-
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
class TestProxTrafficGen(unittest.TestCase):
@@ -321,7 +314,7 @@ class TestProxTrafficGen(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test___init__(self, ssh, *args):
mock_ssh(ssh)
- prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0)
+ prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id')
self.assertIsNone(prox_traffic_gen._tg_process)
self.assertIsNone(prox_traffic_gen._traffic_process)
@@ -329,7 +322,7 @@ class TestProxTrafficGen(unittest.TestCase):
@mock.patch(SSH_HELPER)
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
- prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0)
+ prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id')
prox_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {prox_traffic_gen.name: "mock"}
}
@@ -357,7 +350,7 @@ class TestProxTrafficGen(unittest.TestCase):
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id')
ssh_helper = mock.MagicMock(
**{"execute.return_value": (0, "", ""), "bin_path": ""})
prox_traffic_gen.ssh_helper = ssh_helper
@@ -399,21 +392,22 @@ class TestProxTrafficGen(unittest.TestCase):
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sut = ProxTrafficGen(NAME, vnfd)
+ sut = ProxTrafficGen(NAME, vnfd, 'task_id')
sut._get_socket = mock.MagicMock()
sut.ssh_helper = mock.Mock()
sut.ssh_helper.run = mock.Mock()
sut.setup_helper.prox_config_dict = {}
- sut._connect_client = mock.Mock(autospec=STLClient)
+ sut._connect_client = mock.Mock(autospec=mock.Mock())
sut._connect_client.get_stats = mock.Mock(return_value="0")
- sut._traffic_runner(mock_traffic_profile)
+ sut._setup_mq_producer = mock.Mock(return_value='mq_producer')
+ sut._traffic_runner(mock_traffic_profile, mock.ANY)
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@mock.patch(SSH_HELPER)
def test_listen_traffic(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id')
self.assertIsNone(prox_traffic_gen.listen_traffic(mock.Mock()))
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@@ -421,7 +415,7 @@ class TestProxTrafficGen(unittest.TestCase):
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd, 'task_id')
prox_traffic_gen._terminated = mock.MagicMock()
prox_traffic_gen._traffic_process = mock.MagicMock()
prox_traffic_gen._traffic_process.terminate = mock.Mock()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index 42ac40b50..ddb63242e 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -18,10 +18,11 @@ import mock
import six
import unittest
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
from yardstick.network_services.traffic_profile import base as tp_base
from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia
-from yardstick.benchmark.contexts import base as ctx_base
TEST_FILE_YAML = 'nsb_test_case.yaml'
@@ -162,7 +163,8 @@ class TestIXIATrafficGen(unittest.TestCase):
'nodes': {'tg__1': 'trafficgen_1.yardstick',
'vnf__1': 'vnf.yardstick'},
'topology': 'vpe_vnf_topology.yaml'}],
- 'context': {'nfvi_type': 'baremetal', 'type': 'Node',
+ 'context': {'nfvi_type': 'baremetal',
+ 'type': contexts.CONTEXT_NODE,
'name': 'yardstick',
'file': '/etc/yardstick/nodes/pod.yaml'},
'schema': 'yardstick:task:0.1'}
@@ -175,7 +177,7 @@ class TestIXIATrafficGen(unittest.TestCase):
ssh.from_node.return_value = ssh_mock
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
# NOTE(ralonsoh): check the object returned.
- tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd, 'task_id')
def test_listen_traffic(self, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -184,7 +186,8 @@ class TestIXIATrafficGen(unittest.TestCase):
mock.Mock(return_value=(0, "", ""))
ssh.from_node.return_value = ssh_mock
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
+ 'task_id')
self.assertIsNone(ixnet_traffic_gen.listen_traffic({}))
@mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context')
@@ -197,7 +200,8 @@ class TestIXIATrafficGen(unittest.TestCase):
mock.Mock(return_value=(0, "", ""))
ssh.from_node.return_value = ssh_mock
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
+ 'task_id')
scenario_cfg = {'tc': "nsb_test_case", "topology": "",
'ixia_profile': "ixload.cfg"}
scenario_cfg.update(
@@ -234,7 +238,8 @@ class TestIXIATrafficGen(unittest.TestCase):
ssh.from_node.return_value = ssh_mock
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
+ 'task_id')
ixnet_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {ixnet_traffic_gen.name: "mock"}
}
@@ -253,7 +258,8 @@ class TestIXIATrafficGen(unittest.TestCase):
ssh_mock.execute = \
mock.Mock(return_value=(0, "", ""))
ssh.from_node.return_value = ssh_mock
- ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd,
+ 'task_id')
ixnet_traffic_gen._terminated = mock.MagicMock()
ixnet_traffic_gen._terminated.value = 0
ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock()
@@ -269,7 +275,7 @@ class TestIXIATrafficGen(unittest.TestCase):
def test__check_status(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd)
+ sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd, 'task_id')
sut._check_status()
@mock.patch("yardstick.ssh.SSH")
@@ -335,7 +341,7 @@ class TestIXIATrafficGen(unittest.TestCase):
mock_traffic_profile.get_drop_percentage.return_value = [
'Completed', samples]
- sut = tg_rfc2544_ixia.IxiaTrafficGen(name, vnfd)
+ sut = tg_rfc2544_ixia.IxiaTrafficGen(name, vnfd, 'task_id')
sut.vnf_port_pairs = [[[0], [1]]]
sut.tc_file_name = self._get_file_abspath(TEST_FILE_YAML)
sut.topology = ""
@@ -379,7 +385,8 @@ class TestIXIATrafficGen(unittest.TestCase):
mock.mock_open(), create=True)
@mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.LOG.exception')
def _traffic_runner(*args):
- result = sut._traffic_runner(mock_traffic_profile)
+ sut._setup_mq_producer = mock.Mock(return_value='mq_producer')
+ result = sut._traffic_runner(mock_traffic_profile, mock.ANY)
self.assertIsNone(result)
_traffic_runner()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 4d3e4ff0b..6aba41006 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -15,10 +15,11 @@
import mock
import unittest
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.traffic_profile import base as tp_base
from yardstick.network_services.vnf_generic.vnf import sample_vnf
from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex
-from yardstick.benchmark.contexts import base as ctx_base
class TestTrexRfcResouceHelper(unittest.TestCase):
@@ -206,7 +207,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
],
'context': {
'nfvi_type': 'baremetal',
- 'type': 'Node',
+ 'type': contexts.CONTEXT_NODE,
'name': 'yardstick',
'file': '/etc/yardstick/nodes/pod.yaml',
},
@@ -222,12 +223,14 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
self._mock_ssh_helper.stop()
def test___init__(self):
- trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC(
+ 'vnf1', self.VNFD_0, 'task_id')
self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
def test_collect_kpi(self, *args):
- trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC(
+ 'vnf1', self.VNFD_0, 'task_id')
trex_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {trex_traffic_gen.name: "mock"}
}
@@ -243,7 +246,8 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
- trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC(
+ 'vnf1', self.VNFD_0, 'task_id')
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen.resource_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
@@ -278,7 +282,8 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
- trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC(
+ 'vnf1', self.VNFD_0, 'task_id')
trex_traffic_gen.resource_helper = mock.MagicMock()
trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
scenario_cfg = {
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
index 350ba8448..9ed2abbb9 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
@@ -300,14 +300,14 @@ class TestTrexTrafficGen(unittest.TestCase):
def test___init__(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
self.assertIsInstance(trex_traffic_gen.resource_helper,
tg_trex.TrexResourceHelper)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
def test_collect_kpi(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.scenario_helper.scenario_cfg = {
'nodes': {trex_traffic_gen.name: "mock"}
}
@@ -321,13 +321,13 @@ class TestTrexTrafficGen(unittest.TestCase):
def test_listen_traffic(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
self.assertIsNone(trex_traffic_gen.listen_traffic({}))
@mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context')
def test_instantiate(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen._tg_process = mock.MagicMock()
trex_traffic_gen._tg_process.start = mock.Mock()
@@ -342,7 +342,7 @@ class TestTrexTrafficGen(unittest.TestCase):
@mock.patch.object(ctx_base.Context, 'get_context_from_server', return_value='fake_context')
def test_instantiate_error(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen._start_server = mock.Mock(return_value=0)
trex_traffic_gen._tg_process = mock.MagicMock()
trex_traffic_gen._tg_process.start = mock.Mock()
@@ -355,7 +355,7 @@ class TestTrexTrafficGen(unittest.TestCase):
def test__start_server(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.scenario_helper.scenario_cfg = {}
@@ -363,7 +363,7 @@ class TestTrexTrafficGen(unittest.TestCase):
def test__start_server_multiple_queues(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.scenario_helper.scenario_cfg = {
@@ -377,7 +377,7 @@ class TestTrexTrafficGen(unittest.TestCase):
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
self.sut.ssh_helper = mock.Mock()
self.sut.ssh_helper.run = mock.Mock()
self.sut._connect_client = mock.Mock()
@@ -387,12 +387,13 @@ class TestTrexTrafficGen(unittest.TestCase):
# must generate cfg before we can run traffic so Trex port mapping is
# created
self.sut.resource_helper.generate_cfg()
+ self.sut._setup_mq_producer = mock.Mock()
with mock.patch.object(self.sut.resource_helper, 'run_traffic'):
- self.sut._traffic_runner(mock_traffic_profile)
+ self.sut._traffic_runner(mock_traffic_profile, mock.ANY)
def test__generate_trex_cfg(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
self.assertIsNone(trex_traffic_gen.resource_helper.generate_cfg())
@@ -431,7 +432,7 @@ class TestTrexTrafficGen(unittest.TestCase):
'local_mac': '00:00:00:00:00:01'},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'}]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.generate_cfg()
trex_traffic_gen.resource_helper._build_ports()
@@ -448,25 +449,24 @@ class TestTrexTrafficGen(unittest.TestCase):
mock_traffic_profile.params = self.TRAFFIC_PROFILE
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
self.sut.ssh_helper = mock.Mock()
self.sut.ssh_helper.run = mock.Mock()
self.sut._traffic_runner = mock.Mock(return_value=0)
self.sut.resource_helper.client_started.value = 1
- result = self.sut.run_traffic(mock_traffic_profile)
+ self.sut.run_traffic(mock_traffic_profile)
self.sut._traffic_process.terminate()
- self.assertIsNotNone(result)
def test_terminate(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
trex_traffic_gen.ssh_helper = mock.MagicMock()
trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
self.assertIsNone(trex_traffic_gen.terminate())
def test__connect_client(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd, 'task_id')
client = mock.Mock()
client.connect = mock.Mock(return_value=0)
self.assertIsNotNone(trex_traffic_gen.resource_helper._connect(client))
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
index 1c4ced303..56c971da6 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
@@ -11,31 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import unittest
import mock
import os
-from yardstick.tests import STL_MOCKS
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
from yardstick.benchmark.contexts import base as ctx_base
+from yardstick.network_services.vnf_generic.vnf.udp_replay import UdpReplayApproxVnf
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.udp_replay import UdpReplayApproxVnf
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
-
-
TEST_FILE_YAML = 'nsb_test_case.yaml'
-
NAME = "vnf__1"
@@ -327,7 +317,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
}
def test___init__(self, *args):
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
self.assertIsNone(udp_replay_approx_vnf._vnf_process)
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@@ -342,7 +333,7 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
"Port\t\tRx Packet\t\tTx Packet\t\tRx Pkt Drop\t\tTx Pkt Drop \r\n"\
"0\t\t7374156\t\t7374136\t\t\t0\t\t\t0\r\n" \
"1\t\t7374316\t\t7374315\t\t\t0\t\t\t0\r\n\r\nReplay>\r\r\nReplay>"
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd, 'task_id')
udp_replay_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {udp_replay_approx_vnf.name: "mock"}
}
@@ -364,7 +355,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
def test_get_stats(self, ssh, *args):
mock_ssh(ssh)
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf.q_in = mock.MagicMock()
udp_replay_approx_vnf.q_out = mock.MagicMock()
udp_replay_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -390,7 +382,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
nfvi_context.attrs = {'nfvi_type': 'baremetal'}
mock_get_ctx.return_value = nfvi_context
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf.queue_wrapper = mock.MagicMock()
udp_replay_approx_vnf.nfvi_context = mock_get_ctx
udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'}
@@ -415,7 +408,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
nfvi_context.attrs = {'nfvi_type': "baremetal"}
mock_get_ctx.return_value = nfvi_context
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf.setup_helper.bound_pci = ['0000:00:0.1', '0000:00:0.3']
udp_replay_approx_vnf.all_ports = ["xe0", "xe1"]
udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path")
@@ -437,7 +431,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
def test_run_udp_replay(self, ssh, *args):
mock_ssh(ssh)
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf._build_config = mock.MagicMock()
udp_replay_approx_vnf.queue_wrapper = mock.MagicMock()
udp_replay_approx_vnf.scenario_helper = mock.MagicMock()
@@ -451,7 +446,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
def test_instantiate(self, ssh, *args):
mock_ssh(ssh)
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf.q_out.put("Replay>")
udp_replay_approx_vnf.WAIT_TIME = 0
udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock()
@@ -469,7 +465,8 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
@mock.patch('yardstick.ssh.SSH')
@mock.patch(SSH_HELPER)
def test_instantiate_panic(self, *args):
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0,
+ 'task_id')
udp_replay_approx_vnf.WAIT_TIME = 0
udp_replay_approx_vnf.q_out.put("some text PANIC some text")
udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
index b67a3cdee..efbb7a856 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
@@ -11,26 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
import unittest
import mock
import os
-from yardstick.tests import STL_MOCKS
-from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
-
from yardstick.common import utils
from yardstick.benchmark.contexts import base as ctx_base
-
-STLClient = mock.MagicMock()
-stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
-stl_patch.start()
-
-if stl_patch:
- from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxVnf
- from yardstick.network_services.nfvi.resource import ResourceProfile
- from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxVnf
+from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxSetupEnvHelper
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
TEST_FILE_YAML = 'nsb_test_case.yaml'
@@ -241,7 +232,7 @@ class TestFWApproxVnf(unittest.TestCase):
def test___init__(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
self.assertIsNone(vfw_approx_vnf._vnf_process)
STATS = """\
@@ -264,7 +255,7 @@ pipeline>
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
vfw_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {vfw_approx_vnf.name: "mock"}
}
@@ -290,7 +281,7 @@ pipeline>
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
vfw_approx_vnf.q_in = mock.MagicMock()
vfw_approx_vnf.q_out = mock.MagicMock()
vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -302,7 +293,7 @@ pipeline>
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
vfw_approx_vnf.q_in = mock.MagicMock()
vfw_approx_vnf.q_out = mock.MagicMock()
vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -322,7 +313,7 @@ pipeline>
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
vfw_approx_vnf._build_config = mock.MagicMock()
vfw_approx_vnf.queue_wrapper = mock.MagicMock()
vfw_approx_vnf.ssh_helper = mock.MagicMock()
@@ -344,7 +335,7 @@ pipeline>
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf = FWApproxVnf(name, vnfd, 'task_id')
vfw_approx_vnf.ssh_helper = ssh
vfw_approx_vnf.deploy_helper = mock.MagicMock()
vfw_approx_vnf.resource_helper = mock.MagicMock()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index c1664f2f0..7b937dfb5 100644
--- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -549,7 +549,7 @@ class TestVpeApproxVnf(unittest.TestCase):
self._mock_time_sleep.stop()
def test___init__(self):
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
self.assertIsNone(vpe_approx_vnf._vnf_process)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server',
@@ -563,7 +563,7 @@ class TestVpeApproxVnf(unittest.TestCase):
resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234}
resource.check_if_system_agent_running.return_value = (1, None)
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {vpe_approx_vnf.name: "mock"}
}
@@ -592,7 +592,7 @@ class TestVpeApproxVnf(unittest.TestCase):
resource.check_if_system_agent_running.return_value = 0, '1234'
resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234}
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf.scenario_helper.scenario_cfg = {
'nodes': {vpe_approx_vnf.name: "mock"}
}
@@ -614,7 +614,7 @@ class TestVpeApproxVnf(unittest.TestCase):
@mock.patch.object(sample_vnf, 'VnfSshHelper')
def test_vnf_execute(self, ssh):
test_base.mock_ssh(ssh)
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf.q_in = mock.MagicMock()
vpe_approx_vnf.q_out = mock.MagicMock()
vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
@@ -624,7 +624,7 @@ class TestVpeApproxVnf(unittest.TestCase):
def test_run_vpe(self, ssh):
test_base.mock_ssh(ssh)
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML)
vpe_approx_vnf.vnf_cfg = {
'lb_config': 'SW',
@@ -707,7 +707,7 @@ class TestVpeApproxVnf(unittest.TestCase):
mock_resource = mock.MagicMock()
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock_process
vpe_approx_vnf.q_out = mock_q_out
vpe_approx_vnf.queue_wrapper = mock.Mock(
@@ -732,7 +732,7 @@ class TestVpeApproxVnf(unittest.TestCase):
mock_resource = mock.MagicMock()
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock_process
vpe_approx_vnf.q_out = mock_q_out
vpe_approx_vnf.queue_wrapper = mock.Mock(
@@ -751,7 +751,7 @@ class TestVpeApproxVnf(unittest.TestCase):
mock_resource = mock.MagicMock()
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock_process
vpe_approx_vnf.resource_helper.resource = mock_resource
@@ -770,7 +770,7 @@ class TestVpeApproxVnf(unittest.TestCase):
mock_resource = mock.MagicMock()
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock_process
vpe_approx_vnf.resource_helper.resource = mock_resource
@@ -795,7 +795,7 @@ class TestVpeApproxVnf(unittest.TestCase):
mock_resource = mock.MagicMock()
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock_process
vpe_approx_vnf.q_out = mock_q_out
vpe_approx_vnf.resource_helper.resource = mock_resource
@@ -809,7 +809,7 @@ class TestVpeApproxVnf(unittest.TestCase):
def test_terminate(self, ssh):
test_base.mock_ssh(ssh)
- vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf = vpe_vnf.VpeApproxVnf(NAME, self.VNFD_0, 'task_id')
vpe_approx_vnf._vnf_process = mock.MagicMock()
vpe_approx_vnf._resource_collect_stop = mock.Mock()
vpe_approx_vnf.resource_helper = mock.MagicMock()
diff --git a/yardstick/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py
index 4323c026a..a73a4a132 100644
--- a/yardstick/tests/unit/orchestrator/test_kubernetes.py
+++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py
@@ -66,7 +66,11 @@ service ssh restart;while true ; do sleep 10000; done"
],
"nodeSelector": {
"kubernetes.io/hostname": "node-01"
- }
+ },
+ "restartPolicy": "Always",
+ "tolerations": [
+ {"operator": "Exists"}
+ ]
}
}
}
@@ -77,12 +81,21 @@ service ssh restart;while true ; do sleep 10000; done"
service ssh restart;while true ; do sleep 10000; done'],
'ssh_key': 'k8s-86096c30-key',
'nodeSelector': {'kubernetes.io/hostname': 'node-01'},
- 'volumes': []
+ 'volumes': [],
+ 'restartPolicy': 'Always'
}
name = 'host-k8s-86096c30'
- output_r = kubernetes.KubernetesObject(name, **input_s).get_template()
+ output_r = kubernetes.ReplicationControllerObject(
+ name, **input_s).get_template()
self.assertEqual(output_r, output_t)
+ def test_get_template_invalid_restart_policy(self):
+ input_s = {'restartPolicy': 'invalid_option'}
+ name = 'host-k8s-86096c30'
+ with self.assertRaises(exceptions.KubernetesWrongRestartPolicy):
+ kubernetes.ReplicationControllerObject(
+ name, **input_s).get_template()
+
class GetRcPodsTestCase(base.BaseUnitTestCase):
@@ -108,14 +121,14 @@ service ssh restart;while true ; do sleep 10000; done']
self.assertEqual(pods, [])
-class KubernetesObjectTestCase(base.BaseUnitTestCase):
+class ReplicationControllerObjectTestCase(base.BaseUnitTestCase):
def test__init_one_container(self):
pod_name = 'pod_name'
_kwargs = {'args': ['arg1', 'arg2'],
'image': 'fake_image',
'command': 'fake_command'}
- k8s_obj = kubernetes.KubernetesObject(pod_name, **_kwargs)
+ k8s_obj = kubernetes.ReplicationControllerObject(pod_name, **_kwargs)
self.assertEqual(1, len(k8s_obj._containers))
container = k8s_obj._containers[0]
self.assertEqual(['arg1', 'arg2'], container._args)
@@ -131,7 +144,7 @@ class KubernetesObjectTestCase(base.BaseUnitTestCase):
'image': 'fake_image_%s' % i,
'command': 'fake_command_%s' % i})
_kwargs = {'containers': containers}
- k8s_obj = kubernetes.KubernetesObject(pod_name, **_kwargs)
+ k8s_obj = kubernetes.ReplicationControllerObject(pod_name, **_kwargs)
self.assertEqual(5, len(k8s_obj._containers))
for i in range(5):
container = k8s_obj._containers[i]
@@ -145,8 +158,8 @@ class KubernetesObjectTestCase(base.BaseUnitTestCase):
'configMap': {'name': 'fake_sshkey'}}
volume2 = {'name': 'volume2',
'configMap': 'data'}
- k8s_obj = kubernetes.KubernetesObject('name', ssh_key='fake_sshkey',
- volumes=[volume2])
+ k8s_obj = kubernetes.ReplicationControllerObject(
+ 'name', ssh_key='fake_sshkey', volumes=[volume2])
k8s_obj._add_volumes()
volumes = k8s_obj.template['spec']['template']['spec']['volumes']
self.assertEqual(sorted([volume1, volume2], key=lambda k: k['name']),
@@ -155,7 +168,8 @@ class KubernetesObjectTestCase(base.BaseUnitTestCase):
def test__add_volumes_no_volumes(self):
volume1 = {'name': 'fake_sshkey',
'configMap': {'name': 'fake_sshkey'}}
- k8s_obj = kubernetes.KubernetesObject('name', ssh_key='fake_sshkey')
+ k8s_obj = kubernetes.ReplicationControllerObject(
+ 'name', ssh_key='fake_sshkey')
k8s_obj._add_volumes()
volumes = k8s_obj.template['spec']['template']['spec']['volumes']
self.assertEqual([volume1], volumes)
@@ -163,7 +177,8 @@ class KubernetesObjectTestCase(base.BaseUnitTestCase):
def test__create_ssh_key_volume(self):
expected = {'name': 'fake_sshkey',
'configMap': {'name': 'fake_sshkey'}}
- k8s_obj = kubernetes.KubernetesObject('name', ssh_key='fake_sshkey')
+ k8s_obj = kubernetes.ReplicationControllerObject(
+ 'name', ssh_key='fake_sshkey')
self.assertEqual(expected, k8s_obj._create_ssh_key_volume())
def test__create_volume_item(self):
@@ -172,13 +187,76 @@ class KubernetesObjectTestCase(base.BaseUnitTestCase):
vol_type: 'data'}
self.assertEqual(
volume,
- kubernetes.KubernetesObject._create_volume_item(volume))
+ kubernetes.ReplicationControllerObject.
+ _create_volume_item(volume))
def test__create_volume_item_invalid_type(self):
volume = {'name': 'vol_name',
'invalid_type': 'data'}
with self.assertRaises(exceptions.KubernetesTemplateInvalidVolumeType):
- kubernetes.KubernetesObject._create_volume_item(volume)
+ kubernetes.ReplicationControllerObject._create_volume_item(volume)
+
+ def test__add_security_context(self):
+ k8s_obj = kubernetes.ReplicationControllerObject('pod_name')
+ self.assertNotIn('securityContext',
+ k8s_obj.template['spec']['template']['spec'])
+
+ k8s_obj._security_context = {'key_pod': 'value_pod'}
+ k8s_obj._add_security_context()
+ self.assertEqual(
+ {'key_pod': 'value_pod'},
+ k8s_obj.template['spec']['template']['spec']['securityContext'])
+
+ def test__add_security_context_by_init(self):
+ containers = []
+ for i in range(5):
+ containers.append(
+ {'securityContext': {'key%s' % i: 'value%s' % i}})
+ _kwargs = {'containers': containers,
+ 'securityContext': {'key_pod': 'value_pod'}}
+ k8s_obj = kubernetes.ReplicationControllerObject('pod_name', **_kwargs)
+ self.assertEqual(
+ {'key_pod': 'value_pod'},
+ k8s_obj.template['spec']['template']['spec']['securityContext'])
+ for i in range(5):
+ container = (
+ k8s_obj.template['spec']['template']['spec']['containers'][i])
+ self.assertEqual({'key%s' % i: 'value%s' % i},
+ container['securityContext'])
+
+ def test__add_networks(self):
+ k8s_obj = kubernetes.ReplicationControllerObject(
+ 'name', networks=['network1', 'network2', 'network3'])
+ k8s_obj._add_networks()
+ networks = k8s_obj.\
+ template['spec']['template']['metadata']['annotations']['networks']
+ expected = ('[{"name": "network1"}, {"name": "network2"}, '
+ '{"name": "network3"}]')
+ self.assertEqual(expected, networks)
+
+ def test__add_tolerations(self):
+ _kwargs = {'tolerations': [{'key': 'key1',
+ 'value': 'value2',
+ 'effect': 'effect3',
+ 'operator': 'operator4',
+ 'wrong_key': 'error_key'}]
+ }
+ k8s_obj = kubernetes.ReplicationControllerObject('pod_name', **_kwargs)
+ k8s_obj._add_tolerations()
+ _tol = k8s_obj.template['spec']['template']['spec']['tolerations']
+ self.assertEqual(1, len(_tol))
+ self.assertEqual({'key': 'key1',
+ 'value': 'value2',
+ 'effect': 'effect3',
+ 'operator': 'operator4'},
+ _tol[0])
+
+ def test__add_tolerations_default(self):
+ k8s_obj = kubernetes.ReplicationControllerObject('pod_name')
+ k8s_obj._add_tolerations()
+ _tol = k8s_obj.template['spec']['template']['spec']['tolerations']
+ self.assertEqual(1, len(_tol))
+ self.assertEqual({'operator': 'Exists'}, _tol[0])
class ContainerObjectTestCase(base.BaseUnitTestCase):
@@ -222,8 +300,342 @@ class ContainerObjectTestCase(base.BaseUnitTestCase):
'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
args=args)
expected = {'args': args,
- 'command': [kubernetes.ContainerObject.COMMAND_DEFAULT],
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
'name': 'cname-container',
'volumeMounts': container_obj._create_volume_mounts()}
self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_with_security_context(self):
+ volume_mount = {'name': 'fake_name',
+ 'mountPath': 'fake_path'}
+ args = ['arg1', 'arg2']
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
+ args=args, securityContext={'key': 'value'})
+ expected = {'args': args,
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'securityContext': {'key': 'value'}}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_with_env(self):
+ volume_mount = {'name': 'fake_name',
+ 'mountPath': 'fake_path'}
+ args = ['arg1', 'arg2']
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
+ args=args, env=[{'name': 'fake_var_name',
+ 'value': 'fake_var_value'}])
+ expected = {'args': args,
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'env': [{'name': 'fake_var_name',
+ 'value': 'fake_var_value'}]}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_with_ports_multi_parameter(self):
+ volume_mount = {'name': 'fake_name',
+ 'mountPath': 'fake_path'}
+ args = ['arg1', 'arg2']
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
+ args=args, ports=[{'containerPort': 'fake_port_name',
+ 'hostPort': 'fake_host_port',
+ 'name': 'fake_name',
+ 'protocol': 'fake_protocol',
+ 'invalid_varible': 'fakeinvalid_varible',
+ 'hostIP': 'fake_port_number'}])
+ expected = {'args': args,
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'ports': [{'containerPort': 'fake_port_name',
+ 'hostPort': 'fake_host_port',
+ 'name': 'fake_name',
+ 'protocol': 'fake_protocol',
+ 'hostIP': 'fake_port_number'}]}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_with_ports_no_container_port(self):
+ with self.assertRaises(exceptions.KubernetesContainerPortNotDefined):
+ volume_mount = {'name': 'fake_name',
+ 'mountPath': 'fake_path'}
+ args = ['arg1', 'arg2']
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
+ args=args, ports=[{'hostPort': 'fake_host_port',
+ 'name': 'fake_name',
+ 'protocol': 'fake_protocol',
+ 'hostIP': 'fake_port_number'}])
+ container_obj.get_container_item()
+
+ def test_get_container_item_with_resources(self):
+ volume_mount = {'name': 'fake_name',
+ 'mountPath': 'fake_path'}
+ args = ['arg1', 'arg2']
+ resources = {'requests': {'key1': 'val1'},
+ 'limits': {'key2': 'val2'},
+ 'other_key': {'key3': 'val3'}}
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', volumeMount=[volume_mount],
+ args=args, resources=resources)
+ expected = {'args': args,
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'resources': {'requests': {'key1': 'val1'},
+ 'limits': {'key2': 'val2'}}}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_image_pull_policy(self):
+ container_obj = kubernetes.ContainerObject(
+ 'cname', ssh_key='fake_sshkey', imagePullPolicy='Always')
+ expected = {'args': [],
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'imagePullPolicy':'Always'}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test_get_container_item_with_tty_stdin(self):
+ args = ['arg1', 'arg2']
+ container_obj = kubernetes.ContainerObject(
+ 'cname', 'fake_sshkey', args=args, tty=False, stdin=True)
+ expected = {'args': args,
+ 'command': kubernetes.ContainerObject.COMMAND_DEFAULT,
+ 'image': kubernetes.ContainerObject.IMAGE_DEFAULT,
+ 'name': 'cname-container',
+ 'volumeMounts': container_obj._create_volume_mounts(),
+ 'tty': False,
+ 'stdin': True}
+ self.assertEqual(expected, container_obj.get_container_item())
+
+ def test__parse_commands_string(self):
+ container_obj = kubernetes.ContainerObject('cname', 'fake_sshkey')
+ self.assertEqual(['fake command'],
+ container_obj._parse_commands('fake command'))
+
+ def test__parse_commands_list(self):
+ container_obj = kubernetes.ContainerObject('cname', 'fake_sshkey')
+ self.assertEqual(['cmd1', 'cmd2'],
+ container_obj._parse_commands(['cmd1', 'cmd2']))
+
+ def test__parse_commands_exception(self):
+ container_obj = kubernetes.ContainerObject('cname', 'fake_sshkey')
+ with self.assertRaises(exceptions.KubernetesContainerCommandType):
+ container_obj._parse_commands({})
+
+
+class CustomResourceDefinitionObjectTestCase(base.BaseUnitTestCase):
+
+ def test__init(self):
+ template = {
+ 'metadata': {
+ 'name': 'newcrds.ctx_name.com'
+ },
+ 'spec': {
+ 'group': 'ctx_name.com',
+ 'version': 'v2',
+ 'scope': 'scope',
+ 'names': {'plural': 'newcrds',
+ 'singular': 'newcrd',
+ 'kind': 'Newcrd'}
+ }
+ }
+ crd_obj = kubernetes.CustomResourceDefinitionObject(
+ 'ctx_name', name='newcrd', version='v2', scope='scope')
+ self.assertEqual('newcrds.ctx_name.com', crd_obj._name)
+ self.assertEqual(template, crd_obj._template)
+
+ def test__init_missing_parameter(self):
+ with self.assertRaises(exceptions.KubernetesCRDObjectDefinitionError):
+ kubernetes.CustomResourceDefinitionObject('ctx_name',
+ noname='name')
+
+
+class NetworkObjectTestCase(base.BaseUnitTestCase):
+
+ def setUp(self):
+ self.net_obj = kubernetes.NetworkObject(name='fake_name',
+ plugin='fake_plugin',
+ args='fake_args')
+
+ def test__init_missing_parameter(self):
+ with self.assertRaises(
+ exceptions.KubernetesNetworkObjectDefinitionError):
+ kubernetes.NetworkObject('network_name', plugin='plugin')
+ with self.assertRaises(
+ exceptions.KubernetesNetworkObjectDefinitionError):
+ kubernetes.NetworkObject('network_name', args='args')
+
+ @mock.patch.object(kubernetes_utils, 'get_custom_resource_definition')
+ def test_crd(self, mock_get_crd):
+ mock_crd = mock.Mock()
+ mock_get_crd.return_value = mock_crd
+ net_obj = copy.deepcopy(self.net_obj)
+ self.assertEqual(mock_crd, net_obj.crd)
+
+ def test_template(self):
+ net_obj = copy.deepcopy(self.net_obj)
+ expected = {'apiVersion': 'group.com/v2',
+ 'kind': kubernetes.NetworkObject.KIND,
+ 'metadata': {
+ 'name': 'fake_name'},
+ 'plugin': 'fake_plugin',
+ 'args': 'fake_args'}
+ crd = mock.Mock()
+ crd.spec.group = 'group.com'
+ crd.spec.version = 'v2'
+ net_obj._crd = crd
+ self.assertEqual(expected, net_obj.template)
+
+ def test_group(self):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._crd = mock.Mock()
+ net_obj._crd.spec.group = 'fake_group'
+ self.assertEqual('fake_group', net_obj.group)
+
+ def test_version(self):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._crd = mock.Mock()
+ net_obj._crd.spec.version = 'version_4'
+ self.assertEqual('version_4', net_obj.version)
+
+ def test_plural(self):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._crd = mock.Mock()
+ net_obj._crd.spec.names.plural = 'name_ending_in_s'
+ self.assertEqual('name_ending_in_s', net_obj.plural)
+
+ def test_scope(self):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._crd = mock.Mock()
+ net_obj._crd.spec.scope = 'Cluster'
+ self.assertEqual('Cluster', net_obj.scope)
+
+ @mock.patch.object(kubernetes_utils, 'create_network')
+ def test_create(self, mock_create_network):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._scope = 'scope'
+ net_obj._group = 'group'
+ net_obj._version = 'version'
+ net_obj._plural = 'plural'
+ net_obj._template = 'template'
+ net_obj.create()
+ mock_create_network.assert_called_once_with(
+ 'scope', 'group', 'version', 'plural', 'template')
+
+ @mock.patch.object(kubernetes_utils, 'delete_network')
+ def test_delete(self, mock_delete_network):
+ net_obj = copy.deepcopy(self.net_obj)
+ net_obj._scope = 'scope'
+ net_obj._group = 'group'
+ net_obj._version = 'version'
+ net_obj._plural = 'plural'
+ net_obj._name = 'name'
+ net_obj.delete()
+ mock_delete_network.assert_called_once_with(
+ 'scope', 'group', 'version', 'plural', 'name')
+
+
+class ServiceNodePortObjectTestCase(base.BaseUnitTestCase):
+
+ def test__init(self):
+ with mock.patch.object(kubernetes.ServiceNodePortObject, '_add_port') \
+ as mock_add_port:
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'port': 80, 'name': 'web'}])
+
+ mock_add_port.assert_has_calls([mock.call(22, 'ssh', protocol='TCP'),
+ mock.call(80, 'web')])
+
+ @mock.patch.object(kubernetes.ServiceNodePortObject, '_add_port')
+ def test__init_missing_mandatory_parameters(self, *args):
+ with self.assertRaises(
+ exceptions.KubernetesServiceObjectDefinitionError):
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'port': 80}])
+ with self.assertRaises(
+ exceptions.KubernetesServiceObjectDefinitionError):
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'name': 'web'}])
+
+ @mock.patch.object(kubernetes.ServiceNodePortObject, '_add_port')
+ def test__init_missing_bad_name(self, *args):
+ with self.assertRaises(
+ exceptions.KubernetesServiceObjectNameError):
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'port': 80, 'name': '-web'}])
+ with self.assertRaises(
+ exceptions.KubernetesServiceObjectNameError):
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'port': 80, 'name': 'Web'}])
+ with self.assertRaises(
+ exceptions.KubernetesServiceObjectNameError):
+ kubernetes.ServiceNodePortObject(
+ 'fake_name', node_ports=[{'port': 80, 'name': 'web-'}])
+
+ def test__add_port(self):
+ nodeport_object = kubernetes.ServiceNodePortObject('fake_name')
+ port_ssh = {'name': 'ssh',
+ 'port': 22,
+ 'protocol': 'TCP'}
+ port_definition = {'port': 80,
+ 'protocol': 'TCP',
+ 'name': 'web',
+ 'targetPort': 10080,
+ 'nodePort': 30080}
+ port = copy.deepcopy(port_definition)
+ _port = port.pop('port')
+ name = port.pop('name')
+ nodeport_object._add_port(_port, name, **port)
+ self.assertEqual([port_ssh, port_definition],
+ nodeport_object.template['spec']['ports'])
+
+ @mock.patch.object(kubernetes_utils, 'create_service')
+ def test_create(self, mock_create_service):
+ nodeport_object = kubernetes.ServiceNodePortObject('fake_name')
+ nodeport_object.template = 'fake_template'
+ nodeport_object.create()
+ mock_create_service.assert_called_once_with('fake_template')
+
+ @mock.patch.object(kubernetes_utils, 'delete_service')
+ def test_delete(self, mock_delete_service):
+ nodeport_object = kubernetes.ServiceNodePortObject('fake_name')
+ nodeport_object.delete()
+ mock_delete_service.assert_called_once_with('fake_name-service')
+
+
+class KubernetesTemplate(base.BaseUnitTestCase):
+
+ def test_get_rc_by_name(self):
+ ctx_cfg = {
+ 'servers': {
+ 'host1': {'args': 'some data'}
+ }
+ }
+ k_template = kubernetes.KubernetesTemplate('k8s_name', ctx_cfg)
+ rc = k_template.get_rc_by_name('host1-k8s_name')
+ self.assertTrue(isinstance(rc, kubernetes.ReplicationControllerObject))
+
+ def test_get_rc_by_name_wrong_name(self):
+ ctx_cfg = {
+ 'servers': {
+ 'host1': {'args': 'some data'}
+ }
+ }
+ k_template = kubernetes.KubernetesTemplate('k8s_name', ctx_cfg)
+ self.assertIsNone(k_template.get_rc_by_name('wrong_host_name'))
+
+ def test_get_rc_by_name_no_rcs(self):
+ ctx_cfg = {'servers': {}}
+ k_template = kubernetes.KubernetesTemplate('k8s_name', ctx_cfg)
+ self.assertIsNone(k_template.get_rc_by_name('any_host_name'))