aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/build_yardstick_image.yml15
-rw-r--r--ansible/install_dependencies.yml (renamed from samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets.yaml)12
-rw-r--r--ansible/library/find_kernel.py93
-rw-r--r--ansible/post_build_yardstick_image.yml2
-rw-r--r--ansible/roles/download_dpdk/tasks/main.yml15
-rw-r--r--ansible/roles/download_prox/defaults/main.yml12
-rw-r--r--ansible/roles/download_prox/tasks/main.yml36
-rw-r--r--ansible/roles/download_samplevnfs/defaults/main.yml8
-rw-r--r--ansible/roles/download_samplevnfs/tasks/main.yml17
-rw-r--r--ansible/roles/download_trex/defaults/main.yml6
-rw-r--r--ansible/roles/download_trex/tasks/main.yml12
-rw-r--r--ansible/roles/enable_hugepages_on_boot/defaults/main.yml3
-rwxr-xr-xansible/roles/enable_hugepages_on_boot/tasks/main.yml4
-rwxr-xr-xansible/roles/install_dependencies/tasks/Debian.yml1
-rw-r--r--ansible/roles/install_dependencies/tasks/RedHat.yml1
-rw-r--r--ansible/roles/install_dpdk/defaults/main.yml2
-rwxr-xr-xansible/roles/install_dpdk/tasks/Debian.yml3
-rw-r--r--ansible/roles/install_dpdk/tasks/RedHat.yml2
-rw-r--r--ansible/roles/install_dpdk/tasks/main.yml21
-rw-r--r--ansible/roles/install_dpdk/vars/main.yml2
-rwxr-xr-xansible/roles/install_prox/tasks/Debian.yml24
-rw-r--r--ansible/roles/install_prox/tasks/RedHat.yml22
-rw-r--r--ansible/roles/install_samplevnf/tasks/main.yml55
-rw-r--r--ansible/roles/install_samplevnf/vars/main.yml (renamed from ansible/roles/install_prox/tasks/main.yml)43
-rw-r--r--ansible/roles/install_trex/defaults/main.yml3
-rw-r--r--ansible/roles/install_trex/tasks/main.yml17
-rw-r--r--ansible/roles/install_vnf_vACL/tasks/main.yml41
-rw-r--r--ansible/roles/install_vnf_vACL/vars/main.yml2
-rw-r--r--ansible/roles/install_vnf_vCGNAPT/tasks/main.yml41
-rw-r--r--ansible/roles/install_vnf_vCGNAPT/vars/main.yml2
-rw-r--r--ansible/roles/install_vnf_vFW/tasks/main.yml50
-rw-r--r--ansible/roles/install_vnf_vFW/vars/main.yml2
-rw-r--r--ansible/roles/install_vnf_vPE/tasks/main.yml37
-rw-r--r--ansible/roles/install_vnf_vPE/vars/main.yml2
-rw-r--r--ansible/roles/reset_resolv_conf/tasks/main.yml2
-rw-r--r--ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml (renamed from ansible/ubuntu_server_cloudimg_modify_vpe.yml)32
-rw-r--r--ansible/ubuntu_server_cloudimg_modify.yml2
-rw-r--r--ansible/ubuntu_server_cloudimg_modify_cgnapt.yml41
-rw-r--r--ansible/ubuntu_server_cloudimg_modify_dpdk.yml2
-rw-r--r--ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml (renamed from ansible/ubuntu_server_cloudimg_modify_acl.yml)33
-rw-r--r--ansible/ubuntu_server_cloudimg_modify_vfw.yml41
-rw-r--r--api/database/v2/handlers.py5
-rw-r--r--api/database/v2/models.py3
-rw-r--r--api/resources/v2/environments.py8
-rw-r--r--api/resources/v2/images.py341
-rw-r--r--api/server.py1
-rw-r--r--api/urls.py1
-rwxr-xr-xdocker/nginx.sh1
-rw-r--r--docs/testing/user/userguide/14-nsb_installation.rst3
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc056.rst149
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc057.rst165
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc058.rst148
-rw-r--r--etc/yardstick/yardstick.conf.sample1
-rw-r--r--gui/app/scripts/controllers/image.controller.js301
-rw-r--r--gui/app/scripts/controllers/main.js169
-rw-r--r--gui/app/scripts/controllers/projectDetail.controller.js2
-rw-r--r--gui/app/scripts/factory/main.factory.js39
-rw-r--r--gui/app/views/modal/environmentDialog.html15
-rw-r--r--gui/app/views/modal/imageDialog.html19
-rw-r--r--gui/app/views/podupload.html2
-rw-r--r--gui/app/views/uploadImage.html82
-rwxr-xr-xnsb_setup.sh6
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml8
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml11
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml22
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml11
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml11
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml11
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml42
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml11
-rw-r--r--samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml21
-rw-r--r--samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml (renamed from samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml)40
-rw-r--r--samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml57
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml4
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml12
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml12
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml11
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml12
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml12
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml80
-rw-r--r--samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml83
-rw-r--r--samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml11
-rw-r--r--samples/vnf_samples/nsut/vfw/acl_1rule.yaml4
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml4
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml11
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml11
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml14
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml11
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml11
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml81
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml84
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml11
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml11
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml11
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml11
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml12
-rw-r--r--samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml12
-rw-r--r--samples/vnf_samples/traffic_profiles/imix_storage.yaml41
-rw-r--r--samples/vnf_samples/traffic_profiles/imix_video.yaml43
-rw-r--r--samples/vnf_samples/traffic_profiles/imix_voice.yaml41
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml20
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml52
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml52
-rw-r--r--samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml64
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml33
-rw-r--r--samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml39
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml5
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml81
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml179
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml111
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml39
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml54
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml (renamed from samples/ping_k8s.yaml)0
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml (renamed from samples/container_ping_vm.yaml)2
-rw-r--r--tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml18
-rw-r--r--tests/unit/__init__.py76
-rw-r--r--tests/unit/benchmark/runner/test_search.py63
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_scenario_general.py1
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_network.py39
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_port.py36
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_router.py39
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_sec_group.py39
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_subnet.py41
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py36
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_delete_keypair.py36
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_delete_volume.py36
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_detach_volume.py35
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen.py10
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vnf_generic.py94
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py2
-rw-r--r--tests/unit/network_services/helpers/test_samplevnf_helper.py2
-rw-r--r--tests/unit/network_services/traffic_profile/test_fixed.py61
-rw-r--r--tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py60
-rw-r--r--tests/unit/network_services/traffic_profile/test_prox_acl.py62
-rw-r--r--tests/unit/network_services/traffic_profile/test_prox_binsearch.py62
-rw-r--r--tests/unit/network_services/traffic_profile/test_prox_profile.py62
-rw-r--r--tests/unit/network_services/traffic_profile/test_prox_ramp.py62
-rw-r--r--tests/unit/network_services/traffic_profile/test_rfc2544.py70
-rw-r--r--tests/unit/network_services/traffic_profile/test_traffic_profile.py86
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py61
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_iniparser.py63
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py61
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py64
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py63
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py63
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py66
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py61
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py62
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py65
-rw-r--r--yardstick/benchmark/contexts/heat.py3
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml4
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash14
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash23
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash22
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash14
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash15
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash15
-rw-r--r--yardstick/benchmark/scenarios/availability/operation_conf.yaml11
-rw-r--r--yardstick/benchmark/scenarios/availability/result_checker_conf.yaml4
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py5
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/util.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/computecapacity.bash13
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py17
-rw-r--r--yardstick/benchmark/scenarios/lib/create_keypair.py2
-rw-r--r--yardstick/benchmark/scenarios/lib/create_network.py64
-rw-r--r--yardstick/benchmark/scenarios/lib/create_port.py66
-rw-r--r--yardstick/benchmark/scenarios/lib/create_router.py66
-rw-r--r--yardstick/benchmark/scenarios/lib/create_sec_group.py65
-rw-r--r--yardstick/benchmark/scenarios/lib/create_server.py2
-rw-r--r--yardstick/benchmark/scenarios/lib/create_subnet.py66
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_floating_ip.py54
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_keypair.py56
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_volume.py55
-rw-r--r--yardstick/benchmark/scenarios/lib/detach_volume.py54
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py61
-rw-r--r--yardstick/benchmark/scenarios/storage/storagecapacity.bash4
-rw-r--r--yardstick/common/constants.py1
-rw-r--r--yardstick/common/openstack_utils.py193
-rw-r--r--yardstick/network_services/helpers/cpu.py8
-rw-r--r--yardstick/network_services/helpers/samplevnf_helper.py114
-rw-r--r--yardstick/network_services/nfvi/resource.py28
-rw-r--r--yardstick/network_services/traffic_profile/fixed.py8
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py6
-rw-r--r--yardstick/network_services/traffic_profile/traffic_profile.py86
-rw-r--r--yardstick/network_services/utils.py5
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py6
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py2
-rw-r--r--yardstick/orchestrator/heat.py6
-rw-r--r--yardstick/ssh.py10
196 files changed, 4361 insertions, 2833 deletions
diff --git a/ansible/build_yardstick_image.yml b/ansible/build_yardstick_image.yml
index 9a65d3ae0..025573b4b 100644
--- a/ansible/build_yardstick_image.yml
+++ b/ansible/build_yardstick_image.yml
@@ -28,11 +28,9 @@
sha256sums_filename: "{{ sha256sums_path|basename }}"
sha256sums_url: "{{ lookup('env', 'SHA256SUMS_URL')|default('https://' ~ host ~ '/' ~ sha256sums_path, true) }}"
- mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"
workspace: "{{ lookup('env', 'workspace')|default('/tmp/workspace/yardstick', true) }}"
imgfile: "{{ workspace }}/yardstick-image.img"
raw_imgfile_basename: "yardstick-{{ release }}-server.raw"
- raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
environment:
PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/bin
@@ -42,6 +40,12 @@
- package: name=parted state=present
+ - set_fact:
+ mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"
+
+ - set_fact:
+ raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
+
# cleanup non-lxd
- name: unmount all old mount points
mount:
@@ -260,9 +264,16 @@
ansible_python_interpreter: /usr/bin/python3
# set this host variable here
nameserver_ip: "{{ ansible_dns.nameservers[0] }}"
+ image_type: vm
- name: include {{ img_modify_playbook }}
include: "{{ img_modify_playbook }}"
- name: run post build tasks
include: post_build_yardstick_image.yml
+
+- hosts: localhost
+
+ tasks:
+ - debug:
+ msg: "yardstick image = {{ raw_imgfile }}"
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets.yaml b/ansible/install_dependencies.yml
index e713ea858..001418497 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets.yaml
+++ b/ansible/install_dependencies.yml
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,8 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+---
+- name: install yardstick dependencies
+ hosts: all
+
+ roles:
+ - install_dependencies
-flow:
- srcip4_range: '152.16.100.20'
- dstip4_range: '152.40.40.20'
- count: 1
diff --git a/ansible/library/find_kernel.py b/ansible/library/find_kernel.py
new file mode 100644
index 000000000..4623bce89
--- /dev/null
+++ b/ansible/library/find_kernel.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+DOCUMENTATION = '''
+---
+module: find_kernel
+short_description: Look for the system kernel on the filesystem
+description:
+ - We need to find the kernel on non-booted systems, disk images, chroots, etc.
+ To do this we check /lib/modules and look for the kernel that matches the running
+ kernle, or failing that we look for the highest-numbered kernel
+options:
+ kernel: starting kernel to check
+ module_dir: Override kernel module dir, default /lib/modules
+'''
+
+LIB_MODULES = "/lib/modules"
+
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
+
+
+def convert_ints(fields, orig):
+ return tuple((try_int(f) for f in fields)), orig
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'kernel': {'required': True, 'type': 'str'},
+ 'module_dir': {'required': False, 'type': 'str', 'default': LIB_MODULES},
+ }
+ )
+ params = module.params
+ kernel = params['kernel']
+ module_dir = params['module_dir']
+
+ if os.path.isdir(os.path.join(module_dir, kernel)):
+ module.exit_json(changed=False, kernel=kernel)
+
+ kernel_dirs = os.listdir(module_dir)
+ kernels = sorted((convert_ints(re.split('[-.]', k), k) for k in kernel_dirs), reverse=True)
+ try:
+ newest_kernel = kernels[0][-1]
+ except IndexError:
+ module.fail_json(msg="Unable to find kernels in {}".format(module_dir))
+
+ if os.path.isdir(os.path.join(module_dir, newest_kernel)):
+ module.exit_json(changed=False, kernel=newest_kernel)
+ else:
+ return kernel
+
+ module.fail_json(msg="Unable to kernel other than {}".format(kernel))
+
+
+# <<INCLUDE_ANSIBLE_MODULE_COMMON>>
+from ansible.module_utils.basic import * # noqa
+
+if __name__ == '__main__':
+ main()
+
+"""
+
+get kernel from uname, ansible_kernel
+look for that kernel in /lib/modules
+if that kernel doens't exist
+sort lib/modules
+use latest
+
+parse grub
+
+
+
+"""
diff --git a/ansible/post_build_yardstick_image.yml b/ansible/post_build_yardstick_image.yml
index b0c418721..d1f2a73a8 100644
--- a/ansible/post_build_yardstick_image.yml
+++ b/ansible/post_build_yardstick_image.yml
@@ -40,5 +40,3 @@
- name: kpartx -dv to delete all image partition device nodes
command: kpartx -dv "{{ raw_imgfile }}"
ignore_errors: true
-
- - command: losetup -d "{{ loop_device }}" \ No newline at end of file
diff --git a/ansible/roles/download_dpdk/tasks/main.yml b/ansible/roles/download_dpdk/tasks/main.yml
index 322f3cd0c..bcb5dde1a 100644
--- a/ansible/roles/download_dpdk/tasks/main.yml
+++ b/ansible/roles/download_dpdk/tasks/main.yml
@@ -16,6 +16,10 @@
var: dpdk_version
verbosity: 2
+- file:
+ path: "{{ dpdk_dest }}"
+ state: directory
+
- name: fetch dpdk
get_url:
url: "{{ dpdk_url }}"
@@ -24,12 +28,17 @@
checksum: "{{ dpdk_sha256s[dpdk_version] }}"
- unarchive:
- src: "{{ clone_dest }}/{{ dpdk_file }}"
- dest: "{{ clone_dest }}/"
+ src: "{{ dpdk_dest }}/{{ dpdk_file }}"
+ dest: "{{ dpdk_dest }}/"
copy: no
+- name: cleanup tar file to save space
+ file:
+ path: "{{ dpdk_dest }}/{{ dpdk_file }}"
+ state: absent
+
- set_fact:
- dpdk_path: "{{ clone_dest }}/{{ dpdk_unarchive }}"
+ dpdk_path: "{{ dpdk_dest }}/{{ dpdk_unarchive }}"
- set_fact:
RTE_SDK: "{{ dpdk_path }}"
diff --git a/ansible/roles/download_prox/defaults/main.yml b/ansible/roles/download_prox/defaults/main.yml
deleted file mode 100644
index 797db3125..000000000
--- a/ansible/roles/download_prox/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-prox_version: v037
-prox_suffix:
- v035: "zip"
- v037: "tar.gz"
-prox_url: "https://01.org/sites/default/files/downloads/intelr-data-plane-performance-demonstrators/dppd-prox-{{ prox_version }}.{{ prox_suffix[prox_version] }}"
-prox_file: "{{ prox_url|basename }}"
-prox_unarchive: "{{ prox_file|regex_replace('[.]zip$', '')|regex_replace('-prox-', '-PROX-') }}"
-prox_dest: "{{ clone_dest }}/"
-prox_sha256s:
- v035: "sha256:f5d3f7c3855ca198d2babbc7045ed4373f0ddc13dc243fedbe23ed395ce65cc9"
- v037: "sha256:a12d021fbc0f5ae55ab55a2bbf8f3b260705ce3e61866288f023ccabca010bca"
diff --git a/ansible/roles/download_prox/tasks/main.yml b/ansible/roles/download_prox/tasks/main.yml
deleted file mode 100644
index 0614c74fa..000000000
--- a/ansible/roles/download_prox/tasks/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- debug:
- var: prox_version
- verbosity: 2
-
-- name: fetch prox
- get_url:
- url: "{{ prox_url }}"
- dest: "{{ prox_dest }}"
- validate_certs: False
- checksum: "{{ prox_sha256s[prox_version] }}"
-
-- unarchive:
- src: "{{ clone_dest }}/{{ prox_file }}"
- dest: "{{ clone_dest }}/"
- copy: no
-
-- debug:
- var: prox_unarchive
- verbosity: 2
-
-- set_fact:
- prox_path: "{{ clone_dest }}/{{ prox_unarchive }}" \ No newline at end of file
diff --git a/ansible/roles/download_samplevnfs/defaults/main.yml b/ansible/roles/download_samplevnfs/defaults/main.yml
index 44449af6f..5f565a415 100644
--- a/ansible/roles/download_samplevnfs/defaults/main.yml
+++ b/ansible/roles/download_samplevnfs/defaults/main.yml
@@ -1,6 +1,4 @@
---
-samplevnf_version: ""
-samplevnf_file: "{{ samplevnf_url|basename }}"
-samplevnf_unarchive: "{{ samplevnf_file|regex_replace('[.]tar[.]gz$', '') }}"
-samplevnf_dest: "{{ clone_dest }}/"
-samplevnf_sha256: "sha256:36457cadfd23053c9ce1cf2e6f048cad6a5d04a7371d7a122e133dcbf007989e"
+samplevnf_url: "https://git.opnfv.org/samplevnf"
+samplevnf_dest: "{{ clone_dest }}/samplevnf"
+samplevnf_version: "master"
diff --git a/ansible/roles/download_samplevnfs/tasks/main.yml b/ansible/roles/download_samplevnfs/tasks/main.yml
index 005d57dda..e9d4142c9 100644
--- a/ansible/roles/download_samplevnfs/tasks/main.yml
+++ b/ansible/roles/download_samplevnfs/tasks/main.yml
@@ -17,16 +17,13 @@
# verbosity: 2
- name: fetch samplevnf
- get_url:
- url: "{{ samplevnf_url }}"
+ git:
+ repo: "{{ samplevnf_url }}"
dest: "{{ samplevnf_dest }}"
- validate_certs: False
- checksum: "{{ samplevnf_sha256 }}"
-
-- unarchive:
- src: "{{ clone_dest }}/{{ samplevnf_file }}"
- dest: "{{ clone_dest }}/"
- copy: no
+ version: "{{ samplevnf_version }}"
+ accept_hostkey: yes
+ recursive: no
+ force: yes
- set_fact:
- samplevnf_path: "{{ clone_dest }}/{{ samplevnf_unarchive }}"
+ samplevnf_path: "{{ samplevnf_dest }}"
diff --git a/ansible/roles/download_trex/defaults/main.yml b/ansible/roles/download_trex/defaults/main.yml
index dd2dd27eb..6e8fa7020 100644
--- a/ansible/roles/download_trex/defaults/main.yml
+++ b/ansible/roles/download_trex/defaults/main.yml
@@ -12,9 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-trex_version: v2.20
+trex_version: v2.28
trex_url: "https://trex-tgn.cisco.com/trex/release/{{ trex_version }}.tar.gz"
trex_file: "{{ trex_url|basename }}"
trex_unarchive: "{{ trex_file|regex_replace('[.]tar.gz$', '') }}"
trex_dest: "{{ clone_dest }}/"
-trex_sha256: "sha256:eb5a069f758a36133a185c7e27af10834ca03d11441165403529fbd7844658fb"
+trex_sha256s:
+ "v2.20": "sha256:eb5a069f758a36133a185c7e27af10834ca03d11441165403529fbd7844658fb"
+ "v2.28": "sha256:c3f08aabbd69dddb09843984d41acbe9ba1af6a6ef3380a7830f7c9e33134207"
diff --git a/ansible/roles/download_trex/tasks/main.yml b/ansible/roles/download_trex/tasks/main.yml
index 75a3169f0..baa964fd8 100644
--- a/ansible/roles/download_trex/tasks/main.yml
+++ b/ansible/roles/download_trex/tasks/main.yml
@@ -16,10 +16,16 @@
get_url:
url: "{{ trex_url }}"
dest: "{{ trex_dest }}"
- checksum: "{{ trex_sha256 }}"
+ validate_certs: False
+ checksum: "{{ trex_sha256s[trex_version] }}"
- name: unarchive Trex
unarchive:
- src: "{{ clone_dest }}/{{ trex_file }}"
- dest: "{{ clone_dest }}/"
+ src: "{{ trex_dest }}/{{ trex_file }}"
+ dest: "{{ trex_dest }}/"
copy: no
+
+- name: cleanup tar file to save space
+ file:
+ path: "{{ trex_dest }}/{{ trex_file }}"
+ state: absent
diff --git a/ansible/roles/enable_hugepages_on_boot/defaults/main.yml b/ansible/roles/enable_hugepages_on_boot/defaults/main.yml
new file mode 100644
index 000000000..015e01bab
--- /dev/null
+++ b/ansible/roles/enable_hugepages_on_boot/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+num_hugepages: auto
+huge_pagesize_mb: 1024 \ No newline at end of file
diff --git a/ansible/roles/enable_hugepages_on_boot/tasks/main.yml b/ansible/roles/enable_hugepages_on_boot/tasks/main.yml
index f258bb684..be4a328a2 100755
--- a/ansible/roles/enable_hugepages_on_boot/tasks/main.yml
+++ b/ansible/roles/enable_hugepages_on_boot/tasks/main.yml
@@ -38,15 +38,11 @@
line: '{{ hugepage_param }}'
state: present
-- name: Update grub
- command: "{{ update_grub[ansible_os_family] }}"
-
- name: create hugetables mount
file:
path: "{{ hugetable_mount }}"
state: directory
-
- name: mount hugetlbfs
mount:
name: "{{ hugetable_mount }}"
diff --git a/ansible/roles/install_dependencies/tasks/Debian.yml b/ansible/roles/install_dependencies/tasks/Debian.yml
index ac8332287..0047a5e3b 100755
--- a/ansible/roles/install_dependencies/tasks/Debian.yml
+++ b/ansible/roles/install_dependencies/tasks/Debian.yml
@@ -29,6 +29,7 @@
- qemu-kvm
- qemu-user-static
- qemu-utils
+ - kpartx
- libvirt0
- python-libvirt
- bridge-utils
diff --git a/ansible/roles/install_dependencies/tasks/RedHat.yml b/ansible/roles/install_dependencies/tasks/RedHat.yml
index 4bb7c318e..b725933d0 100644
--- a/ansible/roles/install_dependencies/tasks/RedHat.yml
+++ b/ansible/roles/install_dependencies/tasks/RedHat.yml
@@ -46,6 +46,7 @@
- python-setuptools
- libffi-devel
- python-devel
+ - kpartx
# don't install kernel-devel here it will trigger unwanted kernel upgrade
# Mandatory Packages:
# Don't use yum groups, they don't work, expand them manually
diff --git a/ansible/roles/install_dpdk/defaults/main.yml b/ansible/roles/install_dpdk/defaults/main.yml
new file mode 100644
index 000000000..fe2172401
--- /dev/null
+++ b/ansible/roles/install_dpdk/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+INSTALL_BIN_PATH: "/opt/nsb_bin" \ No newline at end of file
diff --git a/ansible/roles/install_dpdk/tasks/Debian.yml b/ansible/roles/install_dpdk/tasks/Debian.yml
index 486d40e11..c77e4f96a 100755
--- a/ansible/roles/install_dpdk/tasks/Debian.yml
+++ b/ansible/roles/install_dpdk/tasks/Debian.yml
@@ -17,3 +17,6 @@
with_items:
- libpcap-dev
+- name: Install kernel headers
+ action: "{{ ansible_pkg_mgr }} name=linux-headers-{{ dpdk_kernel }} state=present"
+
diff --git a/ansible/roles/install_dpdk/tasks/RedHat.yml b/ansible/roles/install_dpdk/tasks/RedHat.yml
index af35c9b3e..2fb249eae 100644
--- a/ansible/roles/install_dpdk/tasks/RedHat.yml
+++ b/ansible/roles/install_dpdk/tasks/RedHat.yml
@@ -17,3 +17,5 @@
with_items:
- libpcap-devel
+- name: Install kernel headers
+ action: "{{ ansible_pkg_mgr }} name=kernel-headers-{{ dpdk_kernel }} state=present"
diff --git a/ansible/roles/install_dpdk/tasks/main.yml b/ansible/roles/install_dpdk/tasks/main.yml
index fca0e33af..cab093ad5 100644
--- a/ansible/roles/install_dpdk/tasks/main.yml
+++ b/ansible/roles/install_dpdk/tasks/main.yml
@@ -20,8 +20,22 @@
# with_fileglob:
# - "{{ local_nsb_path }}/patches/dpdk_custom_patch/0*.patch"
+- name: find kernel for image, (including chroot)
+ find_kernel:
+ kernel: "{{ ansible_kernel }}"
+ register: found_kernel
+
+# Do this before installing kernel headers
+- name: Set dpdk_kernel to be the kernel we found
+ set_fact:
+ dpdk_kernel: "{{ found_kernel.kernel }}"
+
- include: "{{ ansible_os_family }}.yml"
+- name: set RTE_KERNELDIR to point to found kernel
+ set_fact:
+ RTE_KERNELDIR: "/lib/modules/{{ dpdk_kernel }}/build"
+
- my_make:
chdir: "{{ dpdk_path }}"
target: config
@@ -29,6 +43,8 @@
T: "{{ dpdk_make_arch }}"
O: "{{ dpdk_make_arch }}"
extra_args: "-j {{ ansible_processor_vcpus }}"
+ environment:
+ RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
- name: enable RTE_PORT_STATS_COLLECT
lineinfile:
@@ -57,6 +73,8 @@
- my_make:
chdir: "{{ dpdk_path }}/{{ dpdk_make_arch}}"
extra_args: "-j {{ ansible_processor_vcpus }}"
+ environment:
+ RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
- file:
path: "{{ dpdk_module_dir}}"
@@ -67,7 +85,8 @@
dest: "{{ dpdk_module_dir }}/igb_uio.ko"
remote_src: yes
-- command: depmod -a
+- name: run depmod for dpdk_kernel
+ command: depmod "{{ dpdk_kernel }}"
- file:
path: "{{ INSTALL_BIN_PATH }}"
diff --git a/ansible/roles/install_dpdk/vars/main.yml b/ansible/roles/install_dpdk/vars/main.yml
index 730215c90..1cc4f1583 100644
--- a/ansible/roles/install_dpdk/vars/main.yml
+++ b/ansible/roles/install_dpdk/vars/main.yml
@@ -1,6 +1,6 @@
---
dpdk_make_arch: x86_64-native-linuxapp-gcc
-dpdk_module_dir: "/lib/modules/{{ ansible_kernel }}/extra"
+dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra"
hugetable_mount: /mnt/huge
dpdk_devbind:
"16.07": "{{ dpdk_path }}/tools/dpdk-devbind.py"
diff --git a/ansible/roles/install_prox/tasks/Debian.yml b/ansible/roles/install_prox/tasks/Debian.yml
deleted file mode 100755
index 00a31fc41..000000000
--- a/ansible/roles/install_prox/tasks/Debian.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: Install PROX build dependencies
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - pkg-config
- - liblua5.2-dev
- - libncurses5
- - libncurses5-dev
- - libncursesw5
- - libncursesw5-dev
- - libedit-dev
diff --git a/ansible/roles/install_prox/tasks/RedHat.yml b/ansible/roles/install_prox/tasks/RedHat.yml
deleted file mode 100644
index 69fa83b31..000000000
--- a/ansible/roles/install_prox/tasks/RedHat.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: Install PROX build dependencies
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - pkgconfig
- - lua-devel
- - ncurses-devel
- - libedit-devel
-
diff --git a/ansible/roles/install_samplevnf/tasks/main.yml b/ansible/roles/install_samplevnf/tasks/main.yml
new file mode 100644
index 000000000..d332c88bc
--- /dev/null
+++ b/ansible/roles/install_samplevnf/tasks/main.yml
@@ -0,0 +1,55 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- set_fact:
+ vnf_build_dir: "{{ samplevnf_path }}/VNFs/{{ vnf_build_dirs[vnf_name] }}"
+
+- set_fact:
+ vnf_app_name: "{{ vnf_app_names[vnf_name] }}"
+
+- name: Install extra build dependencies
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: "{{ vnf_build_dependencies.get(vnf_name, {}).get(ansible_os_family, []) }}"
+
+
+- name: set build env vars
+ set_fact:
+ build_env_vars:
+ RTE_SDK: "{{ RTE_SDK }}"
+ RTE_TARGET: "{{ RTE_TARGET }}"
+ VNF_CORE: "{{ samplevnf_path }}"
+
+- name: set soft CRC for PROX when building in VM
+ set_fact:
+ build_env_vars: "{{ build_env_vars|combine({'crc': 'soft'}) }}"
+ when: vnf_name == "PROX" and image_type is defined and image_type == "vm"
+
+- name: "make {{ vnf_name }} clean"
+ my_make: chdir="{{ vnf_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
+ environment: "{{ build_env_vars }}"
+
+- name: "make {{ vnf_name }}"
+ my_make: chdir="{{ vnf_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
+ environment: "{{ build_env_vars }}"
+
+#- command: cp "{{ vnf_build_dir }}/{{ vnf_name }}/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vACL_vnf"
+
+- name: "Install {{ vnf_name }} VNF"
+ copy:
+ src: "{{ vnf_build_dir }}/build/{{ vnf_app_name }}"
+ dest: "{{ INSTALL_BIN_PATH }}/{{ vnf_app_name }}"
+ remote_src: True
+ # make executable
+ mode: 0755
+
diff --git a/ansible/roles/install_prox/tasks/main.yml b/ansible/roles/install_samplevnf/vars/main.yml
index 93025fcb4..6f2c44a84 100644
--- a/ansible/roles/install_prox/tasks/main.yml
+++ b/ansible/roles/install_samplevnf/vars/main.yml
@@ -12,19 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- include: "{{ ansible_os_family }}.yml"
-
-- name: workaround, make trailing.sh executable
- file:
- path: "{{ prox_path }}/helper-scripts/trailing.sh"
- state: touch
- mode: 0755
- when: prox_version == "v035"
-
-- make:
- chdir: "{{ prox_path }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
-
-
+vnf_build_dependencies:
+ PROX:
+ Debian:
+ - pkg-config
+ - liblua5.2-dev
+ - libncurses5
+ - libncurses5-dev
+ - libncursesw5
+ - libncursesw5-dev
+ - libedit-dev
+ RedHat:
+ - pkgconfig
+ - lua-devel
+ - ncurses-devel
+ - libedit-devel
+vnf_build_dirs:
+ ACL: vACL
+ FW: vFW
+ CGNATP: vCGNAPT
+ UDP_Replay: UDP_Replay
+ PROX: DPPD-PROX
+vnf_app_names:
+ ACL: vACL
+ FW: vFW
+ CGNATP: vCGNAPT
+ UDP_Replay: UDP_Replay
+ PROX: prox
diff --git a/ansible/roles/install_trex/defaults/main.yml b/ansible/roles/install_trex/defaults/main.yml
index 1b2876301..a5555e355 100644
--- a/ansible/roles/install_trex/defaults/main.yml
+++ b/ansible/roles/install_trex/defaults/main.yml
@@ -13,5 +13,6 @@
# limitations under the License.
---
#TREX_DOWNLOAD: "https://trex-tgn.cisco.com/trex/release/v2.05.tar.gz"
-TREX_VERSION: v2.20
+TREX_VERSION: v2.28
TREX_DOWNLOAD: "{{ nsb_mirror_url|ternary(nsb_mirror_url, 'https://trex-tgn.cisco.com/trex/release' }}/{{ TREX_VERSION }}.tar.gz"
+INSTALL_BIN_PATH: "/opt/nsb_bin"
diff --git a/ansible/roles/install_trex/tasks/main.yml b/ansible/roles/install_trex/tasks/main.yml
index 4818a8087..7ba1fc833 100644
--- a/ansible/roles/install_trex/tasks/main.yml
+++ b/ansible/roles/install_trex/tasks/main.yml
@@ -12,17 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- set_fact:
- trex_file: "{{ trex_url|basename|regex_replace('[.]tar.gz', '') }}"
-
- file: path="{{ INSTALL_BIN_PATH }}/trex" state=absent
- file: path="{{ INSTALL_BIN_PATH }}/trex" state=directory
+- command: mv "{{ trex_dest }}/{{ trex_unarchive }}" "{{ INSTALL_BIN_PATH }}/trex/scripts"
+
+# Don't overwrite igb_uio.ko compiled from DPDK
-- command: mv "{{ clone_dest }}/{{ trex_unarchive }}" "{{ INSTALL_BIN_PATH }}/trex/scripts"
+- name: fix stl __init__.py for python module
+ file:
+ path: "{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane/stl/__init__.py"
+ state: touch
-- file: path="{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane/stl/__init__.py" state=touch
+- name: "symlink client to {{ INSTALL_BIN_PATH }}/trex_client"
+ file:
+ src: "{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane"
+ dest: "{{ INSTALL_BIN_PATH }}/trex_client"
+ state: link
# Don't use trex/scripts/dpdk_nic_bind.py use DPDK usertools/dpdk-devbind.py
#- command: cp "{{ INSTALL_BIN_PATH }}/trex/scripts/dpdk_nic_bind.py" "{{ INSTALL_BIN_PATH }}"
diff --git a/ansible/roles/install_vnf_vACL/tasks/main.yml b/ansible/roles/install_vnf_vACL/tasks/main.yml
deleted file mode 100644
index ff2e769f0..000000000
--- a/ansible/roles/install_vnf_vACL/tasks/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vACL_vnf make clean
- my_make: chdir="{{ acl_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vACL VNF
- my_make: chdir="{{ acl_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ acl_build_dir }}/vACL/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vACL_vnf"
-- name: Install vACL VNF
- copy:
- src: "{{ acl_build_dir }}/build/vACL"
- dest: "{{ INSTALL_BIN_PATH }}/vACL"
- remote_src: True
- # make executable
- mode: 0755
-
-#- command: cp "{{ acl_build_dir }}/vACL/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-# src: "{{ acl_build_dir }}/vACL/config/full_tm_profile_10G.cfg"
-# dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vACL/vars/main.yml b/ansible/roles/install_vnf_vACL/vars/main.yml
deleted file mode 100644
index ee61bf11c..000000000
--- a/ansible/roles/install_vnf_vACL/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-acl_build_dir: "{{ samplevnf_path }}/VNFs/vACL" \ No newline at end of file
diff --git a/ansible/roles/install_vnf_vCGNAPT/tasks/main.yml b/ansible/roles/install_vnf_vCGNAPT/tasks/main.yml
deleted file mode 100644
index 9f8458f6f..000000000
--- a/ansible/roles/install_vnf_vCGNAPT/tasks/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vCGNAPT_vnf make clean
- my_make: chdir="{{ acl_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vCGNAPT VNF
- my_make: chdir="{{ acl_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ acl_build_dir }}/vCGNAPT/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vCGNAPT_vnf"
-- name: Install vCGNAPT VNF
- copy:
- src: "{{ acl_build_dir }}/build/vCGNAPT"
- dest: "{{ INSTALL_BIN_PATH }}/vCGNAPT"
- remote_src: True
- # make executable
- mode: 0755
-
-#- command: cp "{{ acl_build_dir }}/vCGNAPT/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-# src: "{{ acl_build_dir }}/vCGNAPT/config/full_tm_profile_10G.cfg"
-# dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vCGNAPT/vars/main.yml b/ansible/roles/install_vnf_vCGNAPT/vars/main.yml
deleted file mode 100644
index cca1a89a3..000000000
--- a/ansible/roles/install_vnf_vCGNAPT/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-acl_build_dir: "{{ samplevnf_path }}/VNFs/vCGNAPT" \ No newline at end of file
diff --git a/ansible/roles/install_vnf_vFW/tasks/main.yml b/ansible/roles/install_vnf_vFW/tasks/main.yml
deleted file mode 100644
index cb3df3ed4..000000000
--- a/ansible/roles/install_vnf_vFW/tasks/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vFW_vnf make clean
- my_make: chdir="{{ vfw_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-#- name: make vFW VNF
-# my_make: chdir="{{ vfw_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
-# environment:
-# RTE_SDK: "{{ RTE_SDK }}"
-# RTE_TARGET: "{{ RTE_TARGET }}"
-# VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vFW VNF
- command: make chdir="{{ vfw_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}" all
- args:
- chdir: "{{ vfw_build_dir }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
- VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ vfw_build_dir }}/vFW/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vFW_vnf"
-- name: Install vFW VNF
- copy:
- src: "{{ vfw_build_dir }}/build/vFW"
- dest: "{{ INSTALL_BIN_PATH }}/vFW"
- remote_src: True
- # make executable
- mode: 0755
-
-#- command: cp "{{ vfw_build_dir }}/vFW/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-# src: "{{ vfw_build_dir }}/vFW/config/full_tm_profile_10G.cfg"
-# dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vFW/vars/main.yml b/ansible/roles/install_vnf_vFW/vars/main.yml
deleted file mode 100644
index 8a8a39865..000000000
--- a/ansible/roles/install_vnf_vFW/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-vfw_build_dir: "{{ samplevnf_path }}/VNFs/vFW" \ No newline at end of file
diff --git a/ansible/roles/install_vnf_vPE/tasks/main.yml b/ansible/roles/install_vnf_vPE/tasks/main.yml
deleted file mode 100644
index 91d449a41..000000000
--- a/ansible/roles/install_vnf_vPE/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vPE_vnf make clean
- my_make: chdir="{{ vpe_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
-
-- name: make vPE VNF
- my_make: chdir="{{ vpe_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
- environment:
- RTE_SDK: "{{ RTE_SDK }}"
- RTE_TARGET: "{{ RTE_TARGET }}"
-
-#- command: cp "{{ vpe_build_dir }}/vPE/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vPE_vnf"
-- name: Install vPE_vnf
- copy:
- src: "{{ vpe_build_dir }}/build/ip_pipeline"
- dest: "{{ INSTALL_BIN_PATH }}/vPE_vnf"
- remote_src: True
-
-#- command: cp "{{ vpe_build_dir }}/vPE/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-# src: "{{ vpe_build_dir }}/vPE/config/full_tm_profile_10G.cfg"
-# dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vPE/vars/main.yml b/ansible/roles/install_vnf_vPE/vars/main.yml
deleted file mode 100644
index fe0a9727f..000000000
--- a/ansible/roles/install_vnf_vPE/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-vpe_build_dir: "{{ dpdk_path }}/examples/ip_pipeline" \ No newline at end of file
diff --git a/ansible/roles/reset_resolv_conf/tasks/main.yml b/ansible/roles/reset_resolv_conf/tasks/main.yml
index 50094f24f..4e6de695d 100644
--- a/ansible/roles/reset_resolv_conf/tasks/main.yml
+++ b/ansible/roles/reset_resolv_conf/tasks/main.yml
@@ -21,7 +21,7 @@
file:
path: "{{ resolv_conf_stat.stat.lnk_source|dirname }}"
state: directory
- mode: 755
+ mode: 0755
- name: Override resolv.conf link source with specific nameserver
template:
diff --git a/ansible/ubuntu_server_cloudimg_modify_vpe.yml b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml
index f55a30fb9..3a1fbd08f 100644
--- a/ansible/ubuntu_server_cloudimg_modify_vpe.yml
+++ b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml
@@ -12,30 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- hosts: chroot_image
- connection: chroot
+- hosts: all
vars:
clone_dir: /tmp/yardstick-clone
- pre_tasks:
- - debug: msg="chrooted in {{ inventory_hostname }}"
roles:
- - reset_resolv_conf
- add_custom_repos
- role: set_package_installer_proxy
when: proxy_env is defined and proxy_env
- # can update grub in chroot/docker
-# - enable_hugepages_on_boot
- - modify_cloud_config
+# can't update grub in chroot/docker
+ - enable_hugepages_on_boot
- install_image_dependencies
- role: download_dpdk
- dpdk_version: "16.07"
+# dpdk_version: "17.02"
- install_dpdk
- # vPE is part of DPDK so we don't need to copy it
- - install_vnf_vPE
-# - copy_L4Replay
-# - install_L4Replay
-# - copy_trex
-# - install_trex
+ - download_trex
+ - install_trex
+ - download_samplevnfs
+ - role: install_samplevnf
+ vnf_name: PROX
+ - role: install_samplevnf
+ vnf_name: UDP_Replay
+ - role: install_samplevnf
+ vnf_name: ACL
+ - role: install_samplevnf
+ vnf_name: FW
+ - role: install_samplevnf
+ vnf_name: CGNATP
diff --git a/ansible/ubuntu_server_cloudimg_modify.yml b/ansible/ubuntu_server_cloudimg_modify.yml
index 950655ec8..099d5803f 100644
--- a/ansible/ubuntu_server_cloudimg_modify.yml
+++ b/ansible/ubuntu_server_cloudimg_modify.yml
@@ -25,6 +25,8 @@
- reset_resolv_conf
- add_custom_repos
- modify_cloud_config
+ - role: set_package_installer_proxy
+ when: proxy_env is defined and proxy_env
- install_image_dependencies
- download_unixbench
- install_unixbench
diff --git a/ansible/ubuntu_server_cloudimg_modify_cgnapt.yml b/ansible/ubuntu_server_cloudimg_modify_cgnapt.yml
deleted file mode 100644
index 3f2a179bb..000000000
--- a/ansible/ubuntu_server_cloudimg_modify_cgnapt.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- hosts: chroot_image
- connection: chroot
- vars:
- clone_dir: /tmp/yardstick-clone
-
- pre_tasks:
- - debug: msg="chrooted in {{ inventory_hostname }}"
-
- roles:
-# - reset_resolv_conf
-# - add_custom_repos
-# - role: set_package_installer_proxy
-# when: proxy_env is defined and proxy_env
- # can update grub in chroot/docker
-# - enable_hugepages_on_boot
-# - modify_cloud_config
-# - install_image_dependencies
-# - role: download_dpdk
-# dpdk_version: "16.07"
-# - install_dpdk
-# - download_samplevnfs
- - install_vnf_vCGNAPT
-# - copy_L4Replay
-# - install_L4Replay
-# - copy_trex
-# - install_trex
-
diff --git a/ansible/ubuntu_server_cloudimg_modify_dpdk.yml b/ansible/ubuntu_server_cloudimg_modify_dpdk.yml
index 2a087ce91..6bbb383d8 100644
--- a/ansible/ubuntu_server_cloudimg_modify_dpdk.yml
+++ b/ansible/ubuntu_server_cloudimg_modify_dpdk.yml
@@ -25,6 +25,8 @@
- add_custom_repos
- enable_hugepages_on_boot
- modify_cloud_config
+ - role: set_package_installer_proxy
+ when: proxy_env is defined and proxy_env
- install_image_dependencies
- download_unixbench
- install_unixbench
diff --git a/ansible/ubuntu_server_cloudimg_modify_acl.yml b/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
index 98542d7a3..2700b810f 100644
--- a/ansible/ubuntu_server_cloudimg_modify_acl.yml
+++ b/ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
@@ -18,24 +18,35 @@
clone_dir: /tmp/yardstick-clone
pre_tasks:
- - debug: msg="chrooted in {{ inventory_hostname }}"
+ - debug:
+ msg: "chrooted in {{ inventory_hostname }}"
+ - debug:
+ var: proxy_env
+ verbosity: 2
roles:
- reset_resolv_conf
- add_custom_repos
- role: set_package_installer_proxy
when: proxy_env is defined and proxy_env
- # can update grub in chroot/docker
-# - enable_hugepages_on_boot
+# can't update grub in chroot/docker
+ - enable_hugepages_on_boot
- modify_cloud_config
- install_image_dependencies
-# - role: download_dpdk
-# dpdk_version: "16.07"
-# - install_dpdk
-# - download_samplevnfs
-# - install_vnf_vACL
-# - copy_L4Replay
-# - install_L4Replay
- - copy_trex
+ - role: download_dpdk
+# dpdk_version: "17.02"
+ - install_dpdk
+ - download_trex
- install_trex
+ - download_samplevnfs
+ - role: install_samplevnf
+ vnf_name: PROX
+ - role: install_samplevnf
+ vnf_name: UDP_Replay
+ - role: install_samplevnf
+ vnf_name: ACL
+ - role: install_samplevnf
+ vnf_name: FW
+ - role: install_samplevnf
+ vnf_name: CGNATP
diff --git a/ansible/ubuntu_server_cloudimg_modify_vfw.yml b/ansible/ubuntu_server_cloudimg_modify_vfw.yml
deleted file mode 100644
index f8cd3ecdc..000000000
--- a/ansible/ubuntu_server_cloudimg_modify_vfw.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- hosts: chroot_image
- connection: chroot
- vars:
- clone_dir: /tmp/yardstick-clone
-
- pre_tasks:
- - debug: msg="chrooted in {{ inventory_hostname }}"
-
- roles:
-# - reset_resolv_conf
-# - add_custom_repos
-# - role: set_package_installer_proxy
-# when: proxy_env is defined and proxy_env
- # can update grub in chroot/docker
-# - enable_hugepages_on_boot
-# - modify_cloud_config
-# - install_image_dependencies
-# - role: download_dpdk
-# dpdk_version: "16.07"
-# - install_dpdk
-# - download_samplevnfs
- - install_vnf_vFW
-# - copy_L4Replay
-# - install_L4Replay
-# - copy_trex
-# - install_trex
-
diff --git a/api/database/v2/handlers.py b/api/database/v2/handlers.py
index 1bc32bf0e..e4f1dd668 100644
--- a/api/database/v2/handlers.py
+++ b/api/database/v2/handlers.py
@@ -87,6 +87,11 @@ class V2ImageHandler(object):
raise ValueError
return image
+ def delete_by_uuid(self, uuid):
+ image = self.get_by_uuid(uuid)
+ db_session.delete(image)
+ db_session.commit()
+
class V2PodHandler(object):
diff --git a/api/database/v2/models.py b/api/database/v2/models.py
index 1e85559cb..59dab3ebc 100644
--- a/api/database/v2/models.py
+++ b/api/database/v2/models.py
@@ -48,9 +48,6 @@ class V2Image(Base):
name = Column(String(30))
description = Column(Text)
environment_id = Column(String(30))
- size = Column(String(30))
- status = Column(String(30))
- time = Column(DateTime)
class V2Container(Base):
diff --git a/api/resources/v2/environments.py b/api/resources/v2/environments.py
index f021a3c5a..158e98be7 100644
--- a/api/resources/v2/environments.py
+++ b/api/resources/v2/environments.py
@@ -35,6 +35,9 @@ class V2Environments(ApiResource):
container_info = e['container_id']
e['container_id'] = jsonutils.loads(container_info) if container_info else {}
+ image_id = e['image_id']
+ e['image_id'] = image_id.split(',') if image_id else []
+
data = {
'environments': environments
}
@@ -78,8 +81,13 @@ class V2Environment(ApiResource):
return result_handler(consts.API_ERROR, 'no such environment id')
environment = change_obj_to_dict(environment)
+
container_id = environment['container_id']
environment['container_id'] = jsonutils.loads(container_id) if container_id else {}
+
+ image_id = environment['image_id']
+ environment['image_id'] = image_id.split(',') if image_id else []
+
return result_handler(consts.API_SUCCESS, {'environment': environment})
def delete(self, environment_id):
diff --git a/api/resources/v2/images.py b/api/resources/v2/images.py
index 8359e105b..0c36a0a26 100644
--- a/api/resources/v2/images.py
+++ b/api/resources/v2/images.py
@@ -7,76 +7,361 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
-import subprocess
+import os
+import uuid
import threading
+import requests
+import datetime
from api import ApiResource
+from api.database.v2.handlers import V2ImageHandler
+from api.database.v2.handlers import V2EnvironmentHandler
from yardstick.common.utils import result_handler
from yardstick.common.utils import source_env
from yardstick.common.utils import change_obj_to_dict
from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common.openstack_utils import get_glance_client
from yardstick.common import constants as consts
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
+IMAGE_MAP = {
+ 'yardstick-image': {
+ 'path': os.path.join(consts.IMAGE_DIR, 'yardstick-image.img'),
+ 'url': 'http://artifacts.opnfv.org/yardstick/images/yardstick-image.img'
+ },
+ 'Ubuntu-16.04': {
+ 'path': os.path.join(consts.IMAGE_DIR, 'xenial-server-cloudimg-amd64-disk1.img'),
+ 'url': 'cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img'
+ },
+ 'cirros-0.3.5': {
+ 'path': os.path.join(consts.IMAGE_DIR, 'cirros-0.3.5-x86_64-disk.img'),
+ 'url': 'http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img'
+ }
+}
+
class V2Images(ApiResource):
def get(self):
try:
source_env(consts.OPENRC)
- except:
+ except Exception:
return result_handler(consts.API_ERROR, 'source openrc error')
nova_client = get_nova_client()
try:
images_list = nova_client.images.list()
- except:
+ except Exception:
return result_handler(consts.API_ERROR, 'get images error')
else:
- images = [self.get_info(change_obj_to_dict(i)) for i in images_list]
- status = 1 if all(i['status'] == 'ACTIVE' for i in images) else 0
- if not images:
- status = 0
+ images = {i.name: self.get_info(change_obj_to_dict(i)) for i in images_list}
- return result_handler(consts.API_SUCCESS, {'status': status, 'images': images})
+ return result_handler(consts.API_SUCCESS, {'status': 1, 'images': images})
def post(self):
return self._dispatch_post()
def get_info(self, data):
+ try:
+ size = data['OS-EXT-IMG-SIZE:size']
+ except KeyError:
+ size = None
+ else:
+ size = float(size) / 1024 / 1024
+
result = {
'name': data.get('name', ''),
- 'size': data.get('OS-EXT-IMG-SIZE:size', ''),
- 'status': data.get('status', ''),
- 'time': data.get('updated', '')
+ 'discription': data.get('description', ''),
+ 'size': size,
+ 'status': data.get('status'),
+ 'time': data.get('updated')
}
return result
def load_image(self, args):
- thread = threading.Thread(target=self._load_images)
+ try:
+ image_name = args['name']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'image name must provided')
+
+ if image_name not in IMAGE_MAP:
+ return result_handler(consts.API_ERROR, 'wrong image name')
+
+ thread = threading.Thread(target=self._do_load_image, args=(image_name,))
thread.start()
+ return result_handler(consts.API_SUCCESS, {'image': image_name})
+
+ def upload_image(self, args):
+ try:
+ image_file = args['file']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'file must be provided')
+
+ try:
+ environment_id = args['environment_id']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+ try:
+ uuid.UUID(environment_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'invalid environment id')
+
+ environment_handler = V2EnvironmentHandler()
+ try:
+ environment = environment_handler.get_by_uuid(environment_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'no such environment')
+
+ file_path = os.path.join(consts.IMAGE_DIR, image_file.filename)
+ LOG.info('saving file')
+ image_file.save(file_path)
+
+ LOG.info('loading image')
+ self._load_image(image_file.filename, file_path)
+
+ LOG.info('creating image in DB')
+ image_handler = V2ImageHandler()
+ image_id = str(uuid.uuid4())
+ image_init_data = {
+ 'uuid': image_id,
+ 'name': image_file.filename,
+ 'environment_id': environment_id
+ }
+ image_handler.insert(image_init_data)
+
+ LOG.info('update image in environment')
+ if environment.image_id:
+ image_list = environment.image_id.split(',')
+ image_list.append(image_id)
+ new_image_id = ','.join(image_list)
+ else:
+ new_image_id = image_id
+
+ environment_handler.update_attr(environment_id, {'image_id': new_image_id})
+
+ return result_handler(consts.API_SUCCESS, {'uuid': image_id})
+
+ def upload_image_by_url(self, args):
+ try:
+ url = args['url']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'url must be provided')
+
+ try:
+ environment_id = args['environment_id']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+ try:
+ uuid.UUID(environment_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'invalid environment id')
+
+ environment_handler = V2EnvironmentHandler()
+ try:
+ environment = environment_handler.get_by_uuid(environment_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'no such environment')
+
+ thread = threading.Thread(target=self._do_upload_image_by_url, args=(url,))
+ thread.start()
+
+ file_name = url.split('/')[-1]
+
+ LOG.info('creating image in DB')
+ image_handler = V2ImageHandler()
+ image_id = str(uuid.uuid4())
+ image_init_data = {
+ 'uuid': image_id,
+ 'name': file_name,
+ 'environment_id': environment_id
+ }
+ image_handler.insert(image_init_data)
+
+ LOG.info('update image in environment')
+ if environment.image_id:
+ image_list = environment.image_id.split(',')
+ image_list.append(image_id)
+ new_image_id = ','.join(image_list)
+ else:
+ new_image_id = image_id
+
+ environment_handler.update_attr(environment_id, {'image_id': new_image_id})
+
+ return result_handler(consts.API_SUCCESS, {'uuid': image_id})
+
+ def delete_image(self, args):
+ try:
+ image_name = args['name']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'image name must provided')
+
+ if image_name not in IMAGE_MAP:
+ return result_handler(consts.API_ERROR, 'wrong image name')
+
+ glance_client = get_glance_client()
+ try:
+ image = next((i for i in glance_client.images.list() if i.name == image_name))
+ except StopIteration:
+ return result_handler(consts.API_ERROR, 'can not find image')
+
+ glance_client.images.delete(image.id)
+
return result_handler(consts.API_SUCCESS, {})
- def _load_images(self):
+ def _do_upload_image_by_url(self, url):
+ file_name = url.split('/')[-1]
+ path = os.path.join(consts.IMAGE_DIR, file_name)
+
+ LOG.info('download image')
+ self._download_image(url, path)
+
+ LOG.info('loading image')
+ self._load_image(file_name, path)
+
+ def _do_load_image(self, image_name):
+ if not os.path.exists(IMAGE_MAP[image_name]['path']):
+ self._download_image(IMAGE_MAP[image_name]['url'],
+ IMAGE_MAP[image_name]['path'])
+
+ self._load_image(image_name, IMAGE_MAP[image_name]['path'])
+
+ def _load_image(self, image_name, image_path):
LOG.info('source openrc')
source_env(consts.OPENRC)
- LOG.info('clean images')
- cmd = [consts.CLEAN_IMAGES_SCRIPT]
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- cwd=consts.REPOS_DIR)
- _, err = p.communicate()
- if p.returncode != 0:
- LOG.error('clean image failed: %s', err)
-
- LOG.info('load images')
- cmd = [consts.LOAD_IMAGES_SCRIPT]
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- cwd=consts.REPOS_DIR)
- _, err = p.communicate()
- if p.returncode != 0:
- LOG.error('load image failed: %s', err)
+ LOG.info('load image')
+ glance_client = get_glance_client()
+ image = glance_client.images.create(name=image_name,
+ visibility='public',
+ disk_format='qcow2',
+ container_format='bare')
+ with open(image_path, 'rb') as f:
+ glance_client.images.upload(image.id, f)
LOG.info('Done')
+
+ def _download_image(self, url, path):
+ start = datetime.datetime.now().replace(microsecond=0)
+
+ LOG.info('download image from: %s', url)
+ self._download_file(url, path)
+
+ end = datetime.datetime.now().replace(microsecond=0)
+ LOG.info('download image success, total: %s s', end - start)
+
+ def _download_handler(self, start, end, url, filename):
+
+ headers = {'Range': 'bytes=%d-%d' % (start, end)}
+ r = requests.get(url, headers=headers, stream=True)
+
+ with open(filename, "r+b") as fp:
+ fp.seek(start)
+ fp.tell()
+ fp.write(r.content)
+
+ def _download_file(self, url, path, num_thread=5):
+
+ r = requests.head(url)
+ try:
+ file_size = int(r.headers['content-length'])
+ except Exception:
+ return
+
+ with open(path, 'wb') as f:
+ f.truncate(file_size)
+
+ thread_list = []
+ part = file_size // num_thread
+ for i in range(num_thread):
+ start = part * i
+ end = start + part if i != num_thread - 1 else file_size
+
+ kwargs = {'start': start, 'end': end, 'url': url, 'filename': path}
+ t = threading.Thread(target=self._download_handler, kwargs=kwargs)
+ t.setDaemon(True)
+ t.start()
+ thread_list.append(t)
+
+ for t in thread_list:
+ t.join()
+
+
+class V2Image(ApiResource):
+ def get(self, image_id):
+ try:
+ uuid.UUID(image_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'invalid image id')
+
+ image_handler = V2ImageHandler()
+ try:
+ image = image_handler.get_by_uuid(image_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'no such image id')
+
+ nova_client = get_nova_client()
+ images = nova_client.images.list()
+ try:
+ image = next((i for i in images if i.name == image.name))
+ except StopIteration:
+ pass
+
+ return_image = self.get_info(change_obj_to_dict(image))
+ return_image['id'] = image_id
+
+ return result_handler(consts.API_SUCCESS, {'image': return_image})
+
+ def delete(self, image_id):
+ try:
+ uuid.UUID(image_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'invalid image id')
+
+ image_handler = V2ImageHandler()
+ try:
+ image = image_handler.get_by_uuid(image_id)
+ except ValueError:
+ return result_handler(consts.API_ERROR, 'no such image id')
+
+ LOG.info('delete image in openstack')
+ glance_client = get_glance_client()
+ try:
+ image_o = next((i for i in glance_client.images.list() if i.name == image.name))
+ except StopIteration:
+ return result_handler(consts.API_ERROR, 'can not find image')
+
+ glance_client.images.delete(image_o.id)
+
+ LOG.info('delete image in environment')
+ environment_id = image.environment_id
+ environment_handler = V2EnvironmentHandler()
+ environment = environment_handler.get_by_uuid(environment_id)
+ image_list = environment.image_id.split(',')
+ image_list.remove(image_id)
+ environment_handler.update_attr(environment_id, {'image_id': ','.join(image_list)})
+
+ LOG.info('delete image in DB')
+ image_handler.delete_by_uuid(image_id)
+
+ return result_handler(consts.API_SUCCESS, {'image': image_id})
+
+ def get_info(self, data):
+ try:
+ size = data['OS-EXT-IMG-SIZE:size']
+ except KeyError:
+ size = None
+ else:
+ size = float(size) / 1024 / 1024
+
+ result = {
+ 'name': data.get('name', ''),
+ 'description': data.get('description', ''),
+ 'size': size,
+ 'status': data.get('status'),
+ 'time': data.get('updated')
+ }
+ return result
diff --git a/api/server.py b/api/server.py
index 158b8a508..37a1ab6a6 100644
--- a/api/server.py
+++ b/api/server.py
@@ -35,6 +35,7 @@ except ImportError:
LOG = logging.getLogger(__name__)
app = Flask(__name__)
+app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 * 1024
Swagger(app)
diff --git a/api/urls.py b/api/urls.py
index 83cf4daf9..9b0040b6c 100644
--- a/api/urls.py
+++ b/api/urls.py
@@ -36,6 +36,7 @@ urlpatterns = [
Url('/api/v2/yardstick/images', 'v2_images'),
Url('/api/v2/yardstick/images/action', 'v2_images'),
+ Url('/api/v2/yardstick/images/<image_id>', 'v2_image'),
Url('/api/v2/yardstick/containers', 'v2_containers'),
Url('/api/v2/yardstick/containers/action', 'v2_containers'),
diff --git a/docker/nginx.sh b/docker/nginx.sh
index 74009f5bd..1ac1d3f42 100755
--- a/docker/nginx.sh
+++ b/docker/nginx.sh
@@ -20,6 +20,7 @@ server {
index index.htm index.html;
location / {
include uwsgi_params;
+ client_max_body_size 2000m;
uwsgi_pass unix:///var/run/yardstick.sock;
}
diff --git a/docs/testing/user/userguide/14-nsb_installation.rst b/docs/testing/user/userguide/14-nsb_installation.rst
index 3eb17bbca..7c5327964 100644
--- a/docs/testing/user/userguide/14-nsb_installation.rst
+++ b/docs/testing/user/userguide/14-nsb_installation.rst
@@ -103,7 +103,7 @@ Config yardstick conf
cp ./etc/yardstick/yardstick.conf.sample /etc/yardstick/yardstick.conf
vi /etc/yardstick/yardstick.conf
-Add trex_path and bin_path in 'nsb' section.
+Add trex_path, trex_client_lib and bin_path in 'nsb' section.
::
@@ -121,6 +121,7 @@ Add trex_path and bin_path in 'nsb' section.
[nsb]
trex_path=/opt/nsb_bin/trex/scripts
bin_path=/opt/nsb_bin
+ trex_client_lib=/opt/nsb_bin/trex_client/stl
Config pod.yaml describing Topology
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
new file mode 100644
index 000000000..01aa99ac2
--- /dev/null
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
@@ -0,0 +1,149 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC056
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Messaging Queue Service High Availability |
++==============+==============================================================+
+|test case id | OPNFV_YARDSTICK_TC056:OpenStack Controller Messaging Queue |
+| | Service High Availability |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the high availability of the |
+| | messaging queue service(RabbitMQ) that supports OpenStack on |
+| | controller node. When messaging queue service(which is |
+| | active) of a specified controller node is killed, the test |
+| | case will check whether messaging queue services(which are |
+| | standby) on other controller nodes will be switched active, |
+| | and whether the cluster manager on attacked the controller |
+| | node will restart the stopped messaging queue. |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of messaging queue |
+| | service on a selected controller node, then checks whether |
+| | the request of the related Openstack command is OK and the |
+| | killed processes are recovered. |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the specified |
+| | OpenStack service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | In this case, this parameter should always set to "rabbitmq".|
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | e.g. |
+| | -fault_type: "kill-process" |
+| | -process_name: "rabbitmq-server" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, two kinds of monitor are needed: |
+| | 1. the "openstack-cmd" monitor constantly request a specific |
+| | Openstack command, which needs two parameters: |
+| | 1) monitor_type: which is used for finding the monitor class |
+| | and related scritps. It should be always set to |
+| | "openstack-cmd" for this monitor. |
+| | 2) command_name: which is the command name used for request. |
+| | |
+| | 2. the "process" monitor check whether a process is running |
+| | on a specific node, which needs three parameters: |
+| | 1) monitor_type: which used for finding the monitor class |
+| | and related scripts. It should be always set to "process" |
+| | for this monitor. |
+| | 2) process_name: which is the process name for monitor |
+| | 3) host: which is the name of the node runing the process |
+| | In this case, the command_name of monitor1 should be |
+| | services that will use the messaging queue(current nova, |
+| | neutron, cinder ,heat and ceilometer are using RabbitMQ) |
+| | , and the process-name of monitor2 should be "rabbitmq", |
+| | for example: |
+| | |
+| | e.g. |
+| | monitor1-1: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "openstack image list" |
+| | monitor1-2: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "openstack network list" |
+| | monitor1-3: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "openstack volume list" |
+| | monitor2: |
+| | -monitor_type: "process" |
+| | -process_name: "rabbitmq" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are two metrics: |
+| | 1)service_outage_time: which indicates the maximum outage |
+| | time (seconds) of the specified Openstack command request. |
+| | 2)process_recover_time: which indicates the maximum time |
+| | (seconds) from the process being killed to recovered |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Developed by the project. Please see folder: |
+| | "yardstick/benchmark/scenarios/availability/ha_tools" |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file:opnfv_yardstick_tc056.yaml |
+| | -Attackers: see above "attackers" description |
+| | -waiting_time: which is the time (seconds) from the process |
+| | being killed to stoping monitors the monitors |
+| | -Monitors: see above "monitors" description |
+| | -SLA: see above "metrics" description |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | stop monitors after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | verify the SLA |
+| | |
+| | Result: The test case is passed or not. |
+| | |
++--------------+--------------------------------------------------------------+
+|post-action | It is the action when the test cases exist. It will check |
+| | the status of the specified process on the host, and restart |
+| | the process if it is not running for next test cases. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc057.rst b/docs/testing/user/userguide/opnfv_yardstick_tc057.rst
new file mode 100644
index 000000000..2a4ce40c0
--- /dev/null
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc057.rst
@@ -0,0 +1,165 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC057
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Cluster Management Service High Availability |
++==============+==============================================================+
+|test case id | |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the quorum configuration of the |
+| | cluster manager(pacemaker) on controller nodes. When a |
+| | controller node , which holds all active application |
+| | resources, failed to communicate with other cluster nodes |
+| | (via corosync), the test case will check whether the standby |
+| | application resources will take place of those active |
+| | application resources which should be regarded to be down in |
+| | the cluster manager. |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of cluster messaging |
+| | service(corosync) on a selected controller node(the node |
+| | holds the active application resources), then checks whether |
+| | active application resources are switched to other |
+| | controller nodes and whether the Openstack commands are OK. |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the load |
+| | balance service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | In this case, this process name should set to "corosync" , |
+| | for example |
+| | -fault_type: "kill-process" |
+| | -process_name: "corosync" |
+| | -host: node1 |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, a kind of monitor is needed: |
+| | 1. the "openstack-cmd" monitor constantly request a specific |
+| | Openstack command, which needs two parameters: |
+| | 1) monitor_type: which is used for finding the monitor class |
+| | and related scripts. It should be always set to |
+| | "openstack-cmd" for this monitor. |
+| | 2) command_name: which is the command name used for request |
+| | |
+| | In this case, the command_name of monitor1 should be services|
+| | that are managed by the cluster manager. (Since rabbitmq and |
+| | haproxy are managed by pacemaker, most Openstack Services |
+| | can be used to check high availability in this case) |
+| | |
+| | (e.g.) |
+| | monitor1: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "nova image-list" |
+| | monitor2: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "neutron router-list" |
+| | monitor3: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "heat stack-list" |
+| | monitor4: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "cinder list" |
+| | |
++--------------+--------------------------------------------------------------+
+|checkers | In this test case, a checker is needed, the checker will |
+| | the status of application resources in pacemaker and the |
+| | checker have three parameters: |
+| | 1) checker_type: which is used for finding the result |
+| | checker class and related scripts. In this case the checker |
+| | type will be "pacemaker-check-resource" |
+| | 2) resource_name: the application resource name |
+| | 3) resource_status: the expected status of the resource |
+| | 4) expectedValue: the expected value for the output of the |
+| | checker script, in the case the expected value will be the |
+| | identifier in the cluster manager |
+| | 3) condition: whether the expected value is in the output of |
+| | checker script or is totally same with the output. |
+| | (note: pcs is required to installed on controller node in |
+| | order to run this checker) |
+| | |
+| | (e.g.) |
+| | checker1: |
+| | -checker_type: "pacemaker-check-resource" |
+| | -resource_name: "p_rabbitmq-server" |
+| | -resource_status: "Stopped" |
+| | -expectedValue: "node-1" |
+| | -condition: "in" |
+| | checker2: |
+| | -checker_type: "pacemaker-check-resource" |
+| | -resource_name: "p_rabbitmq-server" |
+| | -resource_status: "Master" |
+| | -expectedValue: "node-2" |
+| | -condition: "in" |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are two metrics: |
+| | 1)service_outage_time: which indicates the maximum outage |
+| | time (seconds) of the specified Openstack command request. |
++--------------+--------------------------------------------------------------+
+|test tool | None. Self-developed. |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file: opnfv_yardstick_tc057.yaml |
+| | -Attackers: see above "attackers" description |
+| | -Monitors: see above "monitors" description |
+| | -Checkers: see above "checkers" description |
+| | -Steps: the test case execution step, see "test sequence" |
+| | description below |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
++--------------+------+----------------------------------+--------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | do checker: check whether the status of application |
+| | resources on different nodes are updated |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | stop monitors after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | verify the SLA |
+| | |
+| | Result: The test case is passed or not. |
+| | |
++--------------+------+----------------------------------+--------------------+
+|post-action | It is the action when the test cases exist. It will check the|
+| | status of the cluster messaging process(corosync) on the |
+| | host, and restart the process if it is not running for next |
+| | test cases |
++--------------+------+----------------------------------+--------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc058.rst b/docs/testing/user/userguide/opnfv_yardstick_tc058.rst
new file mode 100644
index 000000000..fb9a4c2d1
--- /dev/null
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc058.rst
@@ -0,0 +1,148 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC058
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Virtual Router Service High Availability |
++==============+==============================================================+
+|test case id | OPNFV_YARDSTICK_TC058:OpenStack Controller Virtual Router |
+| | Service High Availability |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the high availability of virtual |
+| | routers(L3 agent) on controller node. When a virtual router |
+| | service on a specified controller node is shut down, this |
+| | test case will check whether the network of virtual machines |
+| | will be affected, and whether the attacked virtual router |
+| | service will be recovered. |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of virtual router service |
+| | (l3-agent) on a selected controller node(the node holds the |
+| | active l3-agent), then checks whether the network routing |
+| | of virtual machines is OK and whether the killed service |
+| | will be recovered. |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the load |
+| | balance service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | In this case, this process name should set to "l3agent" , |
+| | for example |
+| | -fault_type: "kill-process" |
+| | -process_name: "l3agent" |
+| | -host: node1 |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, two kinds of monitor are needed: |
+| | 1. the "ip_status" monitor that pings a specific ip to check |
+| | the connectivity of this ip, which needs two parameters: |
+| | 1) monitor_type: which is used for finding the monitor class |
+| | and related scripts. It should be always set to "ip_status" |
+| | for this monitor. |
+| | 2) ip_address: The ip to be pinged. In this case, ip_address |
+| | will be either an ip address of external network or an ip |
+| | address of a virtual machine. |
+| | 3) host: The node on which ping will be executed, in this |
+| | case the host will be a virtual machine. |
+| | |
+| | 2. the "process" monitor check whether a process is running |
+| | on a specific node, which needs three parameters: |
+| | 1) monitor_type: which used for finding the monitor class |
+| | and related scripts. It should be always set to "process" |
+| | for this monitor. |
+| | 2) process_name: which is the process name for monitor. In |
+| | this case, the process-name of monitor2 should be "l3agent" |
+| | 3) host: which is the name of the node running the process |
+| | |
+| | e.g. |
+| | monitor1-1: |
+| | -monitor_type: "ip_status" |
+| | -host: 172.16.0.11 |
+| | -ip_address: 172.16.1.11 |
+| | monitor1-2: |
+| | -monitor_type: "ip_status" |
+| | -host: 172.16.0.11 |
+| | -ip_address: 8.8.8.8 |
+| | monitor2: |
+| | -monitor_type: "process" |
+| | -process_name: "l3agent" |
+| | -host: node1 |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are two metrics: |
+| | 1)service_outage_time: which indicates the maximum outage |
+| | time (seconds) of the specified Openstack command request. |
+| | 2)process_recover_time: which indicates the maximum time |
+| | (seconds) from the process being killed to recovered |
++--------------+--------------------------------------------------------------+
+|test tool | None. Self-developed. |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file: opnfv_yardstick_tc058.yaml |
+| | -Attackers: see above "attackers" description |
+| | -Monitors: see above "monitors" description |
+| | -Steps: the test case execution step, see "test sequence" |
+| | description below |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
++--------------+------+----------------------------------+--------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with cachestat included in the image. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | Two host VMs are booted, these two hosts are in two different|
+| | networks, the networks are connected by a virtual router |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | stop monitors after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | verify the SLA |
+| | |
+| | Result: The test case is passed or not. |
+| | |
++--------------+------+----------------------------------+--------------------+
+|post-action | It is the action when the test cases exist. It will check |
+| | the status of the specified process on the host, and restart |
+| | the process if it is not running for next test cases. |
+| | Virtual machines and network created in the test case will |
+| | be destoryed. |
+| | |
++--------------+------+----------------------------------+--------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
++--------------+--------------------------------------------------------------+
diff --git a/etc/yardstick/yardstick.conf.sample b/etc/yardstick/yardstick.conf.sample
index 227aded2d..5675cc3bd 100644
--- a/etc/yardstick/yardstick.conf.sample
+++ b/etc/yardstick/yardstick.conf.sample
@@ -31,3 +31,4 @@ password = root
[nsb]
trex_path=/opt/nsb_bin/trex/scripts
bin_path=/opt/nsb_bin
+trex_client_lib=/opt/nsb_bin/trex_client/stl
diff --git a/gui/app/scripts/controllers/image.controller.js b/gui/app/scripts/controllers/image.controller.js
index f6c91592f..d7a7edfa9 100644
--- a/gui/app/scripts/controllers/image.controller.js
+++ b/gui/app/scripts/controllers/image.controller.js
@@ -1,150 +1,235 @@
'use strict';
angular.module('yardStickGui2App')
- .controller('ImageController', ['$scope', '$state', '$stateParams', 'mainFactory', 'Upload', 'toaster', '$location', '$interval',
- function($scope, $state, $stateParams, mainFactory, Upload, toaster, $location, $interval) {
+ .controller('ImageController', ['$scope', '$state', '$stateParams', 'mainFactory', 'Upload', 'toaster', '$location', '$interval', 'ngDialog',
+ function($scope, $state, $stateParams, mainFactory, Upload, toaster, $location, $interval, ngDialog) {
init();
- $scope.showloading = false;
- $scope.ifshowStatus = 0;
function init() {
+ $scope.showloading = false;
+ $scope.ifshowStatus = 0;
+
+ $scope.yardstickImage = [
+ {
+ 'name': 'yardstick-image',
+ 'description': '',
+ 'size': 'N/A',
+ 'status': 'N/A',
+ 'time': 'N/A'
+ },
+ {
+ 'name': 'Ubuntu-16.04',
+ 'description': '',
+ 'size': 'N/A',
+ 'status': 'N/A',
+ 'time': 'N/A'
+ },
+ {
+ 'name': 'cirros-0.3.5',
+ 'description': '',
+ 'size': 'N/A',
+ 'status': 'N/A',
+ 'time': 'N/A'
+ }
+ ];
+ $scope.customImage = [];
$scope.uuid = $stateParams.uuid;
- $scope.uploadImage = uploadImage;
- getItemIdDetail();
- getImageListSimple();
+ $scope.showloading = false;
+ $scope.url = null;
+ $scope.environmentInfo = null;
+
+ getYardstickImageList();
+ getCustomImageList(function(image, image_id){});
}
- function getItemIdDetail() {
+ function getYardstickImageList(){
+ mainFactory.ImageList().get({}).$promise.then(function(response){
+ if(response.status == 1){
+ angular.forEach($scope.yardstickImage, function(ele, index){
+ if(typeof(response.result.images[ele.name]) != 'undefined'){
+ $scope.yardstickImage[index] = response.result.images[ele.name];
+ }
+ });
+ }else{
+ mainFactory.errorHandler1(response);
+ }
+ }, function(response){
+ mainFactory.errorHandler2(response);
+ });
+ }
+
+ function getCustomImageList(func){
mainFactory.ItemDetail().get({
'envId': $stateParams.uuid
}).$promise.then(function(response) {
- if (response.status == 1) {
- $scope.baseElementInfo = response.result.environment;
-
-
- } else {
- toaster.pop({
- type: 'error',
- title: 'fail',
- body: response.error_msg,
- timeout: 3000
+ if(response.status == 1){
+ $scope.environmentInfo = response.result.environment;
+ $scope.customImage = [];
+ angular.forEach(response.result.environment.image_id, function(ele){
+ mainFactory.getImage().get({'imageId': ele}).$promise.then(function(responseData){
+ if(responseData.status == 1){
+ $scope.customImage.push(responseData.result.image);
+ func(responseData.result.image, ele);
+ }else{
+ mainFactory.errorHandler1(responseData);
+ }
+ }, function(errorData){
+ mainFactory.errorHandler2(errorData);
+ });
});
+ }else{
+ mainFactory.errorHandler1(response);
}
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'fail',
- body: 'unknow error',
- timeout: 3000
- });
- })
+ }, function(response){
+ mainFactory.errorHandler2(response);
+ });
}
- function getImageListSimple() {
-
- mainFactory.ImageList().get({}).$promise.then(function(response) {
- if (response.status == 1) {
- $scope.imageListData = response.result.images;
- // $scope.imageStatus = response.result.status;
-
- } else {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
- });
- }
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
+ $scope.loadYardstickImage = function(image_name){
+
+ var updateImageTask = $interval(updateYardstickImage, 10000);
+
+ function updateYardstickImage(){
+ mainFactory.ImageList().get({}).$promise.then(function(responseData){
+ if(responseData.status == 1){
+ if(typeof(responseData.result.images[image_name]) != 'undefined' && responseData.result.images[image_name].status == 'ACTIVE'){
+ angular.forEach($scope.yardstickImage, function(ele, index){
+ if(ele.name == image_name){
+ $scope.yardstickImage[index] = responseData.result.images[ele.name];
+ }
+ });
+ $interval.cancel(updateImageTask);
+ }
+ }else{
+ mainFactory.errorHandler1(responseData);
+ }
+ },function(errorData){
+ mainFactory.errorHandler2(errorData);
});
- })
- }
+ }
+ mainFactory.uploadImage().post({'action': 'load_image', 'args': {'name': image_name}}).$promise.then(function(response){
+ },function(response){
+ mainFactory.errorHandler2(response);
+ });
+ }
- function getImageList() {
- if ($scope.intervalImgae != undefined) {
- $interval.cancel($scope.intervalImgae);
- }
- mainFactory.ImageList().get({}).$promise.then(function(response) {
- if (response.status == 1) {
- $scope.imageListData = response.result.images;
- $scope.imageStatus = response.result.status;
-
- if ($scope.imageStatus == 0) {
- $scope.intervalImgae = $interval(function() {
- getImageList();
- }, 5000);
- } else if ($scope.intervalImgae != undefined) {
- $interval.cancel($scope.intervalImgae);
+ $scope.deleteYardstickImage = function(image_name){
+
+ var updateImageTask = $interval(updateYardstickImage, 10000);
+
+ function updateYardstickImage(){
+ mainFactory.ImageList().get({}).$promise.then(function(response){
+ if(response.status == 1){
+ if(typeof(response.result.images[image_name]) == 'undefined'){
+ angular.forEach($scope.yardstickImage, function(ele, index){
+ if(ele.name == image_name){
+ $scope.yardstickImage[index].size = 'N/A';
+ $scope.yardstickImage[index].status = 'N/A';
+ $scope.yardstickImage[index].time = 'N/A';
+ }
+ });
+ $interval.cancel(updateImageTask);
+ }
+ }else{
+ mainFactory.errorHandler1(response);
}
+ },function(response){
+ mainFactory.errorHandler2(response);
+ });
+ }
- } else {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
+ mainFactory.uploadImage().post({'action': 'delete_image', 'args': {'name': image_name}}).$promise.then(function(response){
+ },function(response){
+ mainFactory.errorHandler2(response);
+ });
+ }
+
+ $scope.uploadCustomImageByUrl = function(url){
+ mainFactory.uploadImageByUrl().post({
+ 'action': 'upload_image_by_url',
+ 'args': {
+ 'environment_id': $stateParams.uuid,
+ 'url': url
+ }
+ }).$promise.then(function(response){
+ if(response.status == 1){
+ var updateImageTask = $interval(getCustomImageList, 30000, 10, true, function(image, image_id){
+ if(image_id == response.result.uuid && image.status == 'ACTIVE'){
+ $interval.cancel(updateImageTask);
+ }
});
+ ngDialog.close();
+ }else{
+ mainFactory.errorHandler1(response);
}
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
- });
- })
+ }, function(response){
+ mainFactory.errorHandler2(response);
+ });
}
- function uploadImage() {
- $scope.imageStatus = 0;
- $interval.cancel($scope.intervalImgae);
- $scope.ifshowStatus = 1;
+ $scope.uploadCustomImage = function($file, $invalidFiles) {
$scope.showloading = true;
- mainFactory.uploadImage().post({
- 'action': 'load_image',
- 'args': {
- 'environment_id': $scope.uuid
- }
- }).$promise.then(function(response) {
+ $scope.displayImageFile = $file;
+ Upload.upload({
+ url: Base_URL + '/api/v2/yardstick/images',
+ data: { file: $file, 'environment_id': $scope.uuid, 'action': 'upload_image' }
+ }).then(function(response) {
+
$scope.showloading = false;
- if (response.status == 1) {
+ if (response.data.status == 1) {
+
toaster.pop({
type: 'success',
- title: 'create success',
+ title: 'upload success',
body: 'you can go next step',
timeout: 3000
});
- setTimeout(function() {
- getImageList();
- }, 10000);
- } else {
- toaster.pop({
- type: 'error',
- title: 'failed',
- body: 'something wrong',
- timeout: 3000
+ var updateImageTask = $interval(getCustomImageList, 10000, 10, true, function(image, image_id){
+ if(image_id == response.data.result.uuid && image.status == 'ACTIVE'){
+ $interval.cancel(updateImageTask);
+ }
});
+ }else{
+ mainFactory.errorHandler1(response);
+ }
+ }, function(response) {
+ $scope.uploadfile = null;
+ mainFactory.errorHandler2(response);
+ })
+ }
+
+ $scope.deleteCustomImage = function(image_id){
+ mainFactory.deleteImage().delete({'imageId': image_id}).$promise.then(function(response){
+ if(response.status == 1){
+ $interval(getCustomImageList, 10000, 5, true, function(image, image_id){
+ });
+ }else{
+ mainFactory.errorHandler2(response);
}
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'failed',
- body: 'something wrong',
- timeout: 3000
- });
+ }, function(response){
+ mainFactory.errorHandler2(response);
+ });
+ }
+
+ $scope.openImageDialog = function(){
+ $scope.url = null;
+ ngDialog.open({
+ preCloseCallback: function(value) {
+ },
+ template: 'views/modal/imageDialog.html',
+ scope: $scope,
+ className: 'ngdialog-theme-default',
+ width: 950,
+ showClose: true,
+ closeByDocument: false
})
}
@@ -158,9 +243,5 @@ angular.module('yardStickGui2App')
$state.go('app.podUpload', { uuid: $scope.uuid });
}
-
-
-
-
}
]);
diff --git a/gui/app/scripts/controllers/main.js b/gui/app/scripts/controllers/main.js
index ab76bf0f2..ceec83fa9 100644
--- a/gui/app/scripts/controllers/main.js
+++ b/gui/app/scripts/controllers/main.js
@@ -15,7 +15,7 @@ angular.module('yardStickGui2App')
$scope.showImage = null;
$scope.showContainer = null;
$scope.showNextOpenRc = null;
- $scope.showNextPod = null;
+ $scope.showNextPod = 1;
$scope.displayContainerInfo = [];
$scope.containerList = [{ value: 'create_influxdb', name: "InfluxDB" }, { value: 'create_grafana', name: "Grafana" }]
@@ -51,7 +51,6 @@ angular.module('yardStickGui2App')
$scope.chooseResult = chooseResult;
getEnvironmentList();
- // getImageList();
}
@@ -85,7 +84,7 @@ angular.module('yardStickGui2App')
}
$scope.goToImage = function goToImage() {
- getImageListSimple();
+ getImageList();
$scope.showImage = 1;
}
$scope.goToPod = function goToPod() {
@@ -290,7 +289,7 @@ angular.module('yardStickGui2App')
$scope.showImage = null;
$scope.showContainer = null;
$scope.showNextOpenRc = null;
- $scope.showNextPod = null;
+ $scope.showNextPod = 1;
$scope.displayContainerInfo = [];
$scope.displayPodFile = null;
@@ -308,7 +307,6 @@ angular.module('yardStickGui2App')
ngDialog.open({
preCloseCallback: function(value) {
getEnvironmentList();
- // getImageList();
},
template: 'views/modal/environmentDialog.html',
scope: $scope,
@@ -479,106 +477,97 @@ angular.module('yardStickGui2App')
})
}
- $scope.uploadImage = function uploadImage() {
- $scope.imageStatus = 0;
- $scope.showImageStatus = 1;
- $scope.showloading = true;
- mainFactory.uploadImage().post({
- 'action': 'load_image',
- 'args': {
- 'environment_id': $scope.uuid
+ $scope.yardstickImage = {
+ 'yardstick-image': {
+ 'name': 'yardstick-image',
+ 'description': '',
+ 'status': 'N/A'
+ },
+ 'Ubuntu-16.04': {
+ 'name': 'Ubuntu-16.04',
+ 'description': '',
+ 'status': 'N/A'
+ },
+ 'cirros-0.3.5': {
+ 'name': 'cirros-0.3.5',
+ 'description': '',
+ 'status': 'N/A'
+ }
+ };
- }
- }).$promise.then(function(response) {
- $scope.showloading = false;
- if (response.status == 1) {
- toaster.pop({
- type: 'success',
- title: 'create success',
- body: 'you can go next step',
- timeout: 3000
- });
- setTimeout(function() {
- getImageList();
- }, 10000);
- $scope.showNextPod = 1;
+ $scope.selectImageList = [];
- } else {
- toaster.pop({
- type: 'error',
- title: 'failed',
- body: 'something wrong',
- timeout: 3000
- });
+ $scope.selectImage = function(name){
+ $scope.selectImageList.push(name);
+ }
- }
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'failed',
- body: 'something wrong',
- timeout: 3000
- });
- })
+ $scope.unselectImage = function(name){
+ var index = $scope.selectImageList.indexOf(name);
+ $scope.selectImageList.splice(index, 1);
}
- function getImageList() {
- if ($scope.intervalImgae != undefined) {
- $interval.cancel($scope.intervalImgae);
- }
- mainFactory.ImageList().get({}).$promise.then(function(response) {
- if (response.status == 1) {
- $scope.imageListData = response.result.images;
- $scope.imageStatus = response.result.status;
+ $scope.uploadImage = function() {
+ $scope.imageStatus = 0;
+ $scope.showImageStatus = 1;
+ $scope.showloading = true;
- if ($scope.imageStatus == 0) {
- $scope.intervalImgae = $interval(function() {
- getImageList();
- }, 5000);
- } else if ($scope.intervalImgae != undefined) {
- $interval.cancel($scope.intervalImgae);
+ var updateImageTask = $interval(function(){
+ mainFactory.ImageList().get({}).$promise.then(function(response){
+ if(response.status == 1){
+ var isOk = true;
+ angular.forEach($scope.selectImageList, function(ele){
+ if(typeof(response.result.images[ele]) != 'undefined' && response.result.images[ele].status == 'ACTIVE'){
+ $scope.yardstickImage[ele] = response.result.images[ele];
+ }else{
+ isOk = false;
+ }
+ });
+ if(isOk){
+ $interval.cancel(updateImageTask);
+ $scope.imageStatus = 1;
+ }
+ }else{
+ mainFactory.errorHandler1(response);
}
-
- } else {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
- });
- }
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
+ }, function(response){
+ mainFactory.errorHandler2(response);
});
- })
+ }, 10000);
+
+ angular.forEach($scope.selectImageList, function(ele){
+ mainFactory.uploadImage().post({
+ 'action': 'load_image',
+ 'args': {
+ 'name': ele
+ }
+ }).$promise.then(function(response) {
+ if(response.status == 1){
+ $scope.showloading = false;
+ $scope.showNextPod = 1;
+ }else{
+ mainFactory.errorHandler1(response);
+ }
+ }, function(response) {
+ mainFactory.errorHandler2(response);
+ })
+ });
}
- function getImageListSimple() {
+ function getImageList() {
mainFactory.ImageList().get({}).$promise.then(function(response) {
if (response.status == 1) {
- $scope.imageListData = response.result.images;
- $scope.imageStatus = response.result.status;
-
- } else {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
+ angular.forEach($scope.yardstickImage, function(value, key){
+ if(typeof(response.result.images[key]) != 'undefined'){
+ $scope.yardstickImage[key] = response.result.images[key];
+ }
});
+ $scope.imageStatus = response.result.status;
+ }else{
+ mainFactory.errorHandler1(response);
}
- }, function(error) {
- toaster.pop({
- type: 'error',
- title: 'get data failed',
- body: 'please retry',
- timeout: 3000
- });
+ }, function(response) {
+ mainFactory.errorHandler2(response);
})
}
diff --git a/gui/app/scripts/controllers/projectDetail.controller.js b/gui/app/scripts/controllers/projectDetail.controller.js
index 843f66c57..e8468045d 100644
--- a/gui/app/scripts/controllers/projectDetail.controller.js
+++ b/gui/app/scripts/controllers/projectDetail.controller.js
@@ -672,7 +672,7 @@ angular.module('yardStickGui2App')
}
$scope.gotoLog = function gotoLog(task_id) {
- $state.go('app2.taskLog', { taskId: task_id });
+ $state.go('app.taskLog', { taskId: task_id });
}
}
]);
diff --git a/gui/app/scripts/factory/main.factory.js b/gui/app/scripts/factory/main.factory.js
index 44fbeb39f..7637a9ff3 100644
--- a/gui/app/scripts/factory/main.factory.js
+++ b/gui/app/scripts/factory/main.factory.js
@@ -9,7 +9,7 @@ var Base_URL;
var Grafana_URL;
angular.module('yardStickGui2App')
- .factory('mainFactory', ['$resource','$rootScope','$http', '$location',function($resource, $rootScope,$http,$location) {
+ .factory('mainFactory', ['$resource','$rootScope','$http', '$location', 'toaster',function($resource, $rootScope ,$http ,$location, toaster) {
Base_URL = 'http://' + $location.host() + ':' + $location.port();
Grafana_URL = 'http://' + $location.host();
@@ -86,6 +86,20 @@ angular.module('yardStickGui2App')
}
})
},
+ getImage: function(){
+ return $resource(Base_URL + '/api/v2/yardstick/images/:imageId', {imageId: "@imageId"}, {
+ 'get': {
+ method: 'GET'
+ }
+ })
+ },
+ deleteImage: function() {
+ return $resource(Base_URL + '/api/v2/yardstick/images/:imageId', { imageId: '@imageId' }, {
+ 'delete': {
+ method: 'DELETE'
+ }
+ })
+ },
uploadImage: function() {
return $resource(Base_URL + '/api/v2/yardstick/images', {}, {
'post': {
@@ -93,6 +107,13 @@ angular.module('yardStickGui2App')
}
})
},
+ uploadImageByUrl: function() {
+ return $resource(Base_URL + '/api/v2/yardstick/images', {}, {
+ 'post': {
+ method: 'POST'
+ }
+ })
+ },
getPodDetail: function() {
return $resource(Base_URL + '/api/v2/yardstick/pods/:podId', { podId: "@podId" }, {
'get': {
@@ -249,6 +270,22 @@ angular.module('yardStickGui2App')
method: 'DELETE'
}
})
+ },
+ errorHandler1: function(response){
+ toaster.pop({
+ 'type': 'error',
+ 'title': 'error',
+ 'body': response.result,
+ 'showCloseButton': true
+ });
+ },
+ errorHandler2: function(response){
+ toaster.pop({
+ 'type': 'error',
+ 'title': response.status,
+ 'body': response.statusText,
+ 'showCloseButton': true
+ });
}
};
diff --git a/gui/app/views/modal/environmentDialog.html b/gui/app/views/modal/environmentDialog.html
index 389de8340..4c539fc33 100644
--- a/gui/app/views/modal/environmentDialog.html
+++ b/gui/app/views/modal/environmentDialog.html
@@ -133,16 +133,17 @@
<table class="table table-striped">
<tr>
+ <th>choose</th>
<th>name</th>
- <th>size</th>
+ <th>description</th>
<th>status</th>
- <th>time</th>
</tr>
- <tr ng-repeat="image in imageListData">
- <td>{{image.name}}</td>
- <td>{{image.size/1024}} mb</td>
- <td>{{image.status}}</td>
- <td>{{image.time}}</td>
+ <tr ng-repeat="(name, value) in yardstickImage">
+ <td ng-if="selectImageList.indexOf(name) > -1"><img src="images/checkyes.png" style="height:12px;cursor:pointer" ng-click="unselectImage(name)" /></td>
+ <td ng-if="selectImageList.indexOf(name) == -1"><img src="images/checkno.png" style="height:12px;cursor:pointer" ng-click="selectImage(name)" /></td>
+ <td>{{name}}</td>
+ <td>{{value.description}}</td>
+ <td>{{value.status}}</td>
</tr>
diff --git a/gui/app/views/modal/imageDialog.html b/gui/app/views/modal/imageDialog.html
new file mode 100644
index 000000000..c568f2aba
--- /dev/null
+++ b/gui/app/views/modal/imageDialog.html
@@ -0,0 +1,19 @@
+<div>
+
+ <h4>Enter Remote Image Url</h4>
+ <input type="text" ng-model="url" />
+
+ <div style="text-align:center;margin-top:20px;">
+ <button class="btn btn-default" ng-disabled=" url==null || url==''" ng-click="uploadCustomImageByUrl(url)">Upload</button>
+ </div>
+
+</div>
+
+
+<style>
+ input {
+ border-radius: 10px;
+ border: 1px solid #eeeeee;
+ width: 100%;
+ }
+</style>
diff --git a/gui/app/views/podupload.html b/gui/app/views/podupload.html
index 99e83aca2..d6d7c0c6e 100644
--- a/gui/app/views/podupload.html
+++ b/gui/app/views/podupload.html
@@ -13,7 +13,7 @@
<hr/>
- <button class="btn btn-default" ngf-select="uploadFiles($file, $invalidFiles)" ngf-max-size="5MB">
+ <button class="btn btn-default" ngf-select="uploadFiles($file, $invalidFiles)" ngf-max-size="1024MB">
<div ng-show="!loadingOPENrc">Upload</div>
<img src="images/loading2.gif" width="25" height="25" ng-if="loadingOPENrc" />
</button>
diff --git a/gui/app/views/uploadImage.html b/gui/app/views/uploadImage.html
index 17ccfdb8b..0c337feeb 100644
--- a/gui/app/views/uploadImage.html
+++ b/gui/app/views/uploadImage.html
@@ -4,56 +4,86 @@
<div style="display:flex;flex-direction:row;">
<div style="width:750px;">
- <h3>{{baseElementInfo.name}} -- Image
+ <h3>{{environmentInfo.name}} -- Image
<button class="btn btn-default" style="float:right" ng-click="goNext()">Next</button>
</h3>
<!--<p>In this process, you can input your define openrc config or upload a openrc file</p>-->
- <hr/>
- <button class="btn btn-default" ng-click="uploadImage()">
- <div ng-if="!showloading">Load Image</div>
- <img src="images/loading2.gif" width="25" height="25" ng-if="showloading" />
- </button>
- <i class="fa fa-check" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: #2ecc71;" ng-show="imageStatus==1&&ifshowStatus==1">done</i>
- <i class="fa fa-spinner" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: #2ecc71;" ng-show="imageStatus==0&&ifshowStatus==1">loading</i>
- <i class="fa fa-exclamation-triangle" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: red;" ng-show="imageStatus==2&&ifshowStatus==1">error</i>
-
<hr>
- <h4>Current Images</h4>
-
+ <h4>Alternative Images</h4>
<div>
<table class="table table-striped">
<tr>
<th>name</th>
+ <th>description</th>
<th>size</th>
<th>status</th>
<th>time</th>
+ <th>action</th>
</tr>
- <tr ng-repeat="image in imageListData">
+ <tr ng-repeat="image in yardstickImage">
<td>{{image.name}}</td>
- <td>{{image.size/1024}} MB</td>
+ <td>{{image.description}}</td>
+ <td>{{image.size | number:2}} MB</td>
<td>{{image.status}}</td>
<td>{{image.time}}</td>
-
+ <td>
+ <div class="btn-group" uib-dropdown>
+ <button id="single-button" type="button" class="btn btn-default btn-sm" uib-dropdown-toggle>
+ action<span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu" uib-dropdown-menu role="menu" aria-labelledby="single-button">
+ <li role="menuitem" ng-show="image.status == 'N/A'"><a ng-click="loadYardstickImage(image.name)">load</a></li>
+ <li role="menuitem" ng-show="image.status != 'N/A'"><a ng-click="deleteYardstickImage(image.name)">delete</a></li>
+ </ul>
+ </div>
+ </td>
</tr>
-
-
-
</table>
</div>
+ <hr>
+ <h4 style="display:inline">Custom Images</h4>
+ <div class="btn-group button-margin" style="float:right;margin-top:-10px;margin-bottom:5px">
+ <button class="btn btn-default" style="width:60px" ngf-select="uploadCustomImage($file, $invalidFiles)" ngf-max-size="2048MB">
+ <div ng-show="!showloading">Local</div>
+ <img src="images/loading2.gif" width="25" height="25" ng-if="showloading" />
+ </button>
+ <button class="btn btn-default" style="width:60px" ng-click="openImageDialog()">Url</button>
+ </div>
+ <div>
+ <table class="table table-striped">
-
-
-
-
-
-
+ <tr>
+ <th>name</th>
+ <th>description</th>
+ <th>size</th>
+ <th>status</th>
+ <th>time</th>
+ <th>action</th>
+ </tr>
+ <tr ng-repeat="image in customImage">
+ <td>{{image.name}}</td>
+ <td>{{image.description}}</td>
+ <td>{{image.size | number:2}} MB</td>
+ <td>{{image.status}}</td>
+ <td>{{image.time}}</td>
+ <td>
+ <div class="btn-group" uib-dropdown>
+ <button id="single-button" type="button" class="btn btn-default btn-sm" uib-dropdown-toggle>
+ action<span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu" uib-dropdown-menu role="menu" aria-labelledby="single-button">
+ <li role="menuitem" ><a ng-click="deleteCustomImage(image.id)">delete</a></li>
+ </ul>
+ </div>
+ </td>
+ </tr>
+ </table>
+ </div>
</div>
-
-
</div>
</div>
diff --git a/nsb_setup.sh b/nsb_setup.sh
index cc2542989..c11dc1038 100755
--- a/nsb_setup.sh
+++ b/nsb_setup.sh
@@ -83,7 +83,7 @@ install_trex()
{
TREX_DIR=$INSTALL_BIN_PATH/trex/scripts
if [ -d "$TREX_DIR" ]; then
- echo "Trex v2.20 already installed. Make sure it contains PYTHONPATH which is required to run TRex"
+ echo "Trex $TREX_VERSION already installed."
else
echo "Build TRex and installing Trex TG in $INSTALL_BIN_PATH/trex"
rm -rf ${TREX_DOWNLOAD##*/}
@@ -99,9 +99,7 @@ install_trex()
cd trex/scripts/ko/src/
make
make install
- # workaround trex module issue
- touch "$REPO_DIR/trex/scripts/automation/trex_control_plane/stl/__init__.py"
- cp "$REPO_DIR/trex/scripts/dpdk_nic_bind.py" "$INSTALL_BIN_PATH"
+ ln -s $TREX_DIR/automation/trex_control_plane $INSTALL_BIN_PATH/trex_client
popd
fi
echo "Done."
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
index cf2dbfc33..85ed2450f 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
vnf__1:
rules: acl_1rule.yaml
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
index 477bd8fbf..1b3377388 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
ixia_profile: ../../traffic_profiles/acl/acl_ipv4_profile_1flows.ixncfg
context:
type: Node
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
index 695768194..9a16466bf 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
@@ -21,20 +21,24 @@ scenarios:
nodes:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
- tc_options:
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
- vnf_options:
- acl:
+ vnf__1:
rules: acl_1rule.yaml
- cfg: acl_config
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
runner:
- type: Duration
- duration: 400
+ type: Iteration
+ iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
index 8d7fe3cb2..95fad73dc 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
index 6b21ba576..397f352f1 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
@@ -23,7 +23,13 @@ scenarios:
vnf__1: vnf.yardstick
tg__2: trafficgen_2.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -35,9 +41,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
index df7a909a1..1fa0b5b3d 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
@@ -23,7 +23,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
{% endfor %}
context:
type: Node
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml
deleted file mode 100644
index 96b3f687c..000000000
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2016 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the License);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an AS IS BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-schema: yardstick:task:0.1
-scenarios:
-- type: NSPerf
- traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
- topology: acl-tg-topology.yaml
- nodes:
- tg__1: trafficgen_1.yardstick
- vnf__1: vnf.yardstick
- tc_options:
- rfc2544:
- allowed_drop_rate: 0.0001 - 0.0001
- vnf_options:
- acl:
- rules: acl_worstcaserules.yaml
- cfg: acl_config
- runner:
- type: Duration
- duration: 400
- interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
-context:
- type: Node
- name: yardstick
- nfvi_type: baremetal
- file: /etc/yardstick/nodes/pod.yaml
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml
index 8d7fe3cb2..95fad73dc 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
index ab688a23f..3ba22ff1c 100644
--- a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
@@ -21,23 +21,24 @@ scenarios:
nodes:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
- tc_options:
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
- latency: true
- vnf_options:
- acl:
+ vnf__1:
rules: acl_1rule.yaml
- cfg: acl_config
- options:
- packetsize: 64
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
runner:
type: Iteration
iterations: 28
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml b/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
index 10eba4c3c..3c92e877f 100644
--- a/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,23 +39,18 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
# put node context first, so we don't HEAT deploy if node has errors
name: yardstick
- flavor: yardstick-dpdk-flavor
-# flavor:
-# name: yardstick-dpdk-flavor
-# vcpus: 10
-# ram: 20480
-# disk: 4
-# extra_specs:
-# hw:cpu_sockets: 1
-# hw:cpu_cores: 10
-# hw:cpu_threads: 1
-# # hw:mem_page_size: large
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
user: ubuntu
placement_groups:
pgrp1:
@@ -58,25 +59,24 @@ context:
vnf:
floating_ip: true
placement: "pgrp1"
- image: yardstick-vnfs
trafficgen_1:
floating_ip: true
placement: "pgrp1"
- image: yardstick-trex
networks:
mgmt:
cidr: '10.0.1.0/24'
- external_network: "yardstick-public"
xe0:
cidr: '10.0.2.0/24'
- vld_id: public
+ vld_id: public_1
+ gateway_ip: 'null'
# port_security_enabled: False
allowed_address_pairs:
- ip_address:
'0.0.0.0/0'
xe1:
cidr: '10.0.3.0/24'
- vld_id: private
+ vld_id: private_1
+ gateway_ip: 'null'
# port_security_enabled: False
allowed_address_pairs:
- ip_address:
diff --git a/samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml b/samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
index 3344a1de1..998a126dc 100644
--- a/samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
+++ b/samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
@@ -21,38 +21,39 @@ scenarios:
nodes:
tg__1: trafficgen_1.baremetal
vnf__1: vnf.yardstick
- tc_options:
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
- vnf_options:
- acl:
+ vnf__1:
rules: acl_1rule.yaml
- cfg: acl_config
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
runner:
- type: Duration
- duration: 400
+ type: Iteration
+ iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
contexts:
# put node context first, so we don't HEAT deploy if node has errors
- name: baremetal
type: Node
file: trex-baremetal.yml
- name: yardstick
- image: yardstick-acl
- flavor: yardstick-flavor
-# flavor:
-# # name: yardstick-dpdk-flavor
-# vcpus: 6
-# ram: 20480
-# disk: 4
-# extra_specs:
-# hw:cpu_sockets: 1
-# hw:cpu_cores: 6
-# hw:cpu_threads: 1
-# # hw:mem_page_size: large
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
user: ubuntu
placement_groups:
pgrp1:
@@ -64,11 +65,17 @@ contexts:
networks:
mgmt:
cidr: '10.0.1.0/24'
- external_network: "yardstick-public"
xe0:
cidr: '10.0.2.0/24'
- vld_id: public
+ vld_id: public_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant1
+ port_security_enabled: False
xe1:
cidr: '10.0.3.0/24'
- vld_id: private
-
+ vld_id: private_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant2
+ port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
index 7e9a589ad..fc1c3e9b8 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
@@ -22,7 +22,9 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
index 5203e8d0a..6160ca090 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
@@ -22,7 +22,14 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ publicip: ["152.16.40.10"]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
ixia_profile: ../../traffic_profiles/cgnapt/cgnat_ipv4_profile_1flows.ixncfg
context:
type: Node
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
index feeacf5b0..15365b01a 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
@@ -22,7 +22,14 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ publicip: ["152.16.40.10"]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
index b5548d5ba..c1f5f2162 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
@@ -23,7 +23,13 @@ scenarios:
vnf__1: vnf.yardstick
tg__2: trafficgen_2.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__2': 'xe0'}
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -35,9 +41,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml
index 7d746f0de..1bf7df875 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml
@@ -23,7 +23,14 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ publicip: ["152.16.40.10"]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
{% endfor %}
context:
type: Node
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml
index 16d0d08cd..e8cac4af9 100644
--- a/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml
@@ -22,7 +22,14 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ publicip: ["152.16.40.10"]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
type: Iteration
iterations: 28
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml
new file mode 100644
index 000000000..0ad7898a1
--- /dev/null
+++ b/samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml
@@ -0,0 +1,80 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput_cgnapt.yaml
+ topology: cgnapt-vnf-topology.yaml
+ nodes:
+ tg__1: trafficgen_1.baremetal
+ vnf__1: vnf.yardstick
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+ vnf__1:
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 35
+contexts:
+ # put node context first, so we don't HEAT deploy if node has errors
+ - name: baremetal
+ type: Node
+ file: trex-baremetal.yml
+ - name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant1
+ port_security_enabled: False
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant2
+ port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml
new file mode 100644
index 000000000..516c727de
--- /dev/null
+++ b/samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml
@@ -0,0 +1,83 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput_cgnapt.yaml
+ topology: cgnapt-vnf-topology.yaml
+ nodes:
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+ vnf__1:
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 35
+context:
+ # put node context first, so we don't HEAT deploy if node has errors
+ name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ trafficgen_1:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public_1
+ gateway_ip: 'null'
+# port_security_enabled: False
+ allowed_address_pairs:
+ - ip_address:
+ '0.0.0.0/0'
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private_1
+ gateway_ip: 'null'
+# port_security_enabled: False
+ allowed_address_pairs:
+ - ip_address:
+ '0.0.0.0/0'
+
diff --git a/samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
index a2b73b6ec..e80e1fb2d 100644
--- a/samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -30,9 +36,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vfw/acl_1rule.yaml b/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
index b184a29e2..6753645ba 100644
--- a/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
+++ b/samples/vnf_samples/nsut/vfw/acl_1rule.yaml
@@ -20,7 +20,7 @@ access-list1:
match-counter: 0
actions: drop,count
matches:
- destination-ipv4-network: 152.16.40.20/24
+ destination-ipv4-network: 152.16.0.0/24
destination-port-range:
lower-port: 0
upper-port: 65535
@@ -38,7 +38,7 @@ access-list1:
destination-port-range:
lower-port: 0
upper-port: 65535
- source-ipv4-network: 152.16.100.20/24
+ source-ipv4-network: 152.16.0.0/24
source-port-range:
lower-port: 0
upper-port: 65535
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
index d4a4bb706..e0bd82a30 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
@@ -22,7 +22,9 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
index 71a803d32..e07f5f9e9 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
ixia_profile: ../../traffic_profiles/vfw/vfw_ipv4_profile_1flows.ixncfg
context:
type: Node
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
index 3a17abac4..53d4d3d85 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
index a92a91e56..562575b3c 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
@@ -21,8 +21,15 @@ scenarios:
nodes:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
+ tg__2: trafficgen_2.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__2': 'xe0'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -34,11 +41,8 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
nfvi_type: baremetal
- file: /etc/yardstick/nodes/pod.yaml
+ file: /etc/yardstick/nodes/pod_3node.yaml
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
index ab2791c86..db8622122 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
@@ -23,7 +23,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
{% endfor %}
context:
type: Node
diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
index 1e6330038..a1381454e 100644
--- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
type: Iteration
iterations: 28
interval: 35
- traffic_options:
- flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
- imix: ../../traffic_profiles/imix_voice.yaml
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
new file mode 100644
index 000000000..3e323d9c9
--- /dev/null
+++ b/samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -0,0 +1,81 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
+ topology: vfw-tg-topology.yaml
+ nodes:
+ tg__1: trafficgen_1.baremetal
+ vnf__1: vnf.yardstick
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+ vnf__1:
+ rules: acl_1rule.yaml
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 35
+contexts:
+ # put node context first, so we don't HEAT deploy if node has errors
+ - name: baremetal
+ type: Node
+ file: trex-baremetal.yml
+ - name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant1
+ port_security_enabled: False
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private_1
+ gateway_ip: 'null'
+ provider: true
+ physical_network: phystenant2
+ port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
new file mode 100644
index 000000000..82e89a2a4
--- /dev/null
+++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -0,0 +1,84 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
+ topology: vfw-tg-topology.yaml
+ nodes:
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
+ options:
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
+ traffic_type: 4
+ rfc2544:
+ allowed_drop_rate: 0.0001 - 0.0001
+ vnf__1:
+ rules: acl_1rule.yaml
+ vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 35
+context:
+ # put node context first, so we don't HEAT deploy if node has errors
+ name: yardstick
+ image: yardstick-samplevnfs
+ flavor:
+ vcpus: 10
+ ram: 20480
+ disk: 4
+ extra_specs:
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 10
+ hw:cpu_threads: 1
+ user: ubuntu
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ trafficgen_1:
+ floating_ip: true
+ placement: "pgrp1"
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public_1
+ gateway_ip: 'null'
+# port_security_enabled: False
+ allowed_address_pairs:
+ - ip_address:
+ '0.0.0.0/0'
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private_1
+ gateway_ip: 'null'
+# port_security_enabled: False
+ allowed_address_pairs:
+ - ip_address:
+ '0.0.0.0/0'
+
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml
index 7b251395b..16996cb5c 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml
@@ -22,16 +22,19 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
vnf__1:
cfg: vpe_config
runner:
type: Duration
duration: 4
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
ixia_profile: ../../traffic_profiles/vpe/HTTP-vPE_IPv4_2Ports.rxf # Need vlan update
context:
type: Node
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml
index 4652a62d5..02346d238 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {1518B: 100}
+ public: {1518B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_storage.yaml"
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
index bd64a45f3..a50ba380f 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
index 0257886fb..e0a749301 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
@@ -22,7 +22,13 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
ixia_profile: ../../traffic_profiles/vpe/vpe_ipv4_profile_1flows.ixncfg
context:
type: Node
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
index 2c48d0ef9..57c512888 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
@@ -21,8 +21,15 @@ scenarios:
nodes:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
+ tg__2: trafficgen_2.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 100}
+ public: {64B: 100}
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__2': 'xe0'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_voice.yaml"
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml
index 674fa95f4..6b78574eb 100644
--- a/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml
+++ b/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml
@@ -22,7 +22,14 @@ scenarios:
tg__1: trafficgen_1.yardstick
vnf__1: vnf.yardstick
options:
- packetsize: 64
+ framesize:
+ private: {64B: 5, 128B: 11, 256B: 16, 373B: 10, 570B: 35, 1400B: 10, 1500B: 13}
+ public: {64B: 5, 128B: 3, 256B: 4, 373B: 6, 570B: 8, 1400B: 36, 1500B: 38}
+
+ flow:
+ src_ip: [{'tg__1': 'xe0'}]
+ dst_ip: [{'tg__1': 'xe1'}]
+ count: 1
traffic_type: 4
rfc2544:
allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
type: Iteration
iterations: 10
interval: 35
- traffic_options:
- flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
- imix: "../../traffic_profiles/imix_video.yaml"
context:
type: Node
name: yardstick
diff --git a/samples/vnf_samples/traffic_profiles/imix_storage.yaml b/samples/vnf_samples/traffic_profiles/imix_storage.yaml
deleted file mode 100644
index 8fd10ecbe..000000000
--- a/samples/vnf_samples/traffic_profiles/imix_storage.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for storage traffic
-#
-# it is a typical case for testing the synthetic VNF performance.
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-
-schema: "nsb:imix:0.1"
-
-imix:
- private:
- imix_small: 0 #ipv4 case - 72B should be 0 ipv6 case - 84B
- imix_128B: 0
- imix_256B: 0
- imix_373B: 0
- imix_570B: 0
- imix_1400B: 0
- imix_1500B: 100
-
- public:
- imix_small: 0 #ipv4 case - 72B ipv6 - 84B
- imix_128B: 0
- imix_256B: 0
- imix_373B: 0
- imix_570B: 0
- imix_1400B: 0
- imix_1500B: 100
diff --git a/samples/vnf_samples/traffic_profiles/imix_video.yaml b/samples/vnf_samples/traffic_profiles/imix_video.yaml
deleted file mode 100644
index 36324bfa4..000000000
--- a/samples/vnf_samples/traffic_profiles/imix_video.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for video traffic
-#
-# this is a real traffic profile when video/data traffic only is present
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-#
-#small means ipv4 case - 72B should be 0, ipv6 case - 84B
-
-schema: "nsb:imix:0.1"
-
-imix:
- private:
- imix_small: 5 #ipv4 case - 72B should be 0 ipv6 case - 84B
- imix_128B: 11
- imix_256B: 16
- imix_373B: 10
- imix_570B: 35
- imix_1400B: 10
- imix_1500B: 13
-
- public:
- imix_small: 5 #ipv4 case - 72B ipv6 - 84B
- imix_128B: 3
- imix_256B: 4
- imix_373B: 6
- imix_570B: 8
- imix_1400B: 36
- imix_1500B: 38
diff --git a/samples/vnf_samples/traffic_profiles/imix_voice.yaml b/samples/vnf_samples/traffic_profiles/imix_voice.yaml
deleted file mode 100644
index b8f8e5358..000000000
--- a/samples/vnf_samples/traffic_profiles/imix_voice.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for voice traffic
-#
-# it is a typical case for testing the synthetic VNF performance.
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-
-schema: "nsb:imix:0.1"
-
-imix:
- private:
- imix_small: 100 #ipv4 case - 72B should be 0 ipv6 case - 84B
- imix_128B: 0
- imix_256B: 0
- imix_373B: 0
- imix_570B: 0
- imix_1400B: 0
- imix_1500B: 0
-
- public:
- imix_small: 100 #ipv4 case - 72B ipv6 - 84B
- imix_128B: 0
- imix_256B: 0
- imix_373B: 0
- imix_570B: 0
- imix_1400B: 0
- imix_1500B: 0
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml b/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml
deleted file mode 100644
index 8bb913ebf..000000000
--- a/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-flow:
- srcip4_range_1: '152.16.0.20'
- dstip4_range_1: '152.40.0.20'
- srcip4_range_2: '172.16.0.20'
- dstip4_range_2: '172.40.0.20'
- count: 1
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
index 2854826e6..98624b108 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
@@ -49,41 +49,49 @@ private_1:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.private.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.1.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.90.255.255') }}"
+ srcip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.1.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.90.255.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
public_1:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.public.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.90.255.255') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.1.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.90.255.255') }}"
+ dstip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.1.255.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.dst_port0', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.src_port0', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
index 2ea8d3c44..7283b6377 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
@@ -49,41 +49,49 @@ private_1:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.private.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.srcip4_range', '10.0.2.1-10.0.2.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range', '10.0.3.1-10.0.3.255') }}"
+ srcip4: "{{get(flow, 'flow.src_ip0', '10.0.2.1-10.0.2.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip0', '10.0.3.1-10.0.3.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
public_1:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.public.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.dstip4_range', '10.0.3.1-10.0.3.255') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range', '10.0.2.1-10.0.2.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip0', '10.0.3.1-10.0.3.255') }}"
+ dstip4: "{{get(flow, 'flow.public_ip0', '10.0.2.1-10.0.2.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.dst_port0', '1234-4321') }}"
+ dstport: "{{get(flow, 'flow.src_port0', '2001-4001') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
index e935bdbd0..233457eba 100644
--- a/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
@@ -49,13 +49,15 @@ private_1:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.private.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
QinQ:
S-VLAN:
@@ -69,14 +71,14 @@ private_1:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
+ srcip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '0') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '0') }}"
public_1:
ipv4:
outer_l2:
@@ -91,25 +93,27 @@ public_1:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+ dstip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.dst_port0', '0') }}"
+ dstport: "{{get(flow, 'flow.src_port0', '0') }}"
private_2:
ipv4:
outer_l2:
framesize:
- 64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
- 128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
- 256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
- 373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
- 570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
- 1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
- 1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+ 64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+ 128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+ 256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+ 373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+ 512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+ 570B: "{{get(imix, 'imix.public.570B', '0') }}"
+ 1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+ 1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+ 1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
QinQ:
S-VLAN:
@@ -123,14 +127,14 @@ private_2:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
+ srcip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.src_port1', '0') }}"
+ dstport: "{{get(flow, 'flow.dst_port1', '0') }}"
public_2:
ipv4:
outer_l2:
@@ -145,11 +149,11 @@ public_2:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
+ dstip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.dst_port1', '0') }}"
+ dstport: "{{get(flow, 'flow.src_port1', '0') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
index 6b213a54a..d7531fcdb 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
@@ -45,16 +45,15 @@ private_1:
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.15.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.105.255.255') }}"
+ srcip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.15.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '1234') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
-
public_1:
ipv4:
outer_l2:
@@ -69,14 +68,14 @@ public_1:
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.dstip4_range', '1.1.1.1-1.15.255.255') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range', '90.90.1.1-90.105.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip0', '1.1.1.1-1.15.255.255') }}"
+ dstip4: "{{get(flow, 'flow.src_ip0', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '1234') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
private_2:
ipv4:
@@ -92,14 +91,14 @@ private_2:
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.15.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.105.255.255') }}"
+ srcip4: "{{get(flow, 'flow.src_ip1', '1.1.1.1-1.15.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.src_port1', '1234') }}"
+ dstport: "{{get(flow, 'flow.dst_port1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
public_2:
ipv4:
@@ -115,12 +114,12 @@ public_2:
outer_l3v4:
proto: "udp"
- srcip4: "{{get(flow, 'flow.dstip4_range', '1.1.1.1-1.15.255.255') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range', '90.90.1.1-90.105.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip1', '1.1.1.1-1.15.255.255') }}"
+ dstip4: "{{get(flow, 'flow.src_ip1', '90.90.1.1-90.105.255.255') }}"
count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 0
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+ srcport: "{{get(flow, 'flow.dst_port1', '1234') }}"
+ dstport: "{{get(flow, 'flow.src_port1', '2001') }}"
count: "{{get(flow, 'flow.count', '1') }}"
diff --git a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
index 4a21a4274..7468dbdb1 100644
--- a/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
+++ b/samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
@@ -68,14 +68,16 @@ private_1:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
+ srcip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.src_port0', '0') }}"
+ dstport: "{{get(flow, 'flow.dst_port0', '0') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
public_1:
ipv4:
outer_l2:
@@ -90,14 +92,16 @@ public_1:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+ dstip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.dst_port0', '0') }}"
+ dstport: "{{get(flow, 'flow.src_port0', '0') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
private_2:
ipv4:
outer_l2:
@@ -122,14 +126,15 @@ private_2:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
- dstip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
+ srcip4: "{{get(flow, 'flow.srcip1', '192.168.0.0-192.168.255.255') }}"
+ dstip4: "{{get(flow, 'flow.dstip1', '192.16.0.0-192.16.0.31') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
- dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.src_port1', '0') }}"
+ dstport: "{{get(flow, 'flow.dst_port1', '0') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
public_2:
ipv4:
outer_l2:
@@ -144,11 +149,13 @@ public_2:
outer_l3v4:
proto: "tcp"
- srcip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
- dstip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
+ srcip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
+ dstip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
ttl: 32
dscp: 32
outer_l4:
- srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
- dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+ srcport: "{{get(flow, 'flow.dst_port1', '0') }}"
+ dstport: "{{get(flow, 'flow.src_port1', '0') }}"
+ count: "{{get(flow, 'flow.count', '1') }}"
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
index 58f5b783a..7f8c22943 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
@@ -18,13 +18,14 @@ description: >
{% set provider = provider or none %}
{% set physical_network = physical_network or 'physnet1' %}
{% set segmentation_id = segmentation_id or none %}
+{% set packetsize = packetsize or 100 %}
scenarios:
{% for i in range(2) %}
-
type: Ping
options:
- packetsize: 100
+ packetsize: {{packetsize}}
host: athena.demo
target: ares.demo
@@ -64,4 +65,4 @@ context:
{% if segmentation_id %}
segmentation_id: {{segmentation_id}}
{% endif %}
- {% endif %} \ No newline at end of file
+ {% endif %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml
new file mode 100644
index 000000000..7f1dc1010
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml
@@ -0,0 +1,81 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Test case for TC056 :OpenStack Controller Messaging Queue Service High
+ Availability.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or 'node1' %}
+{% set monitor_time = monitor_time or 10 %}
+{% set monitor_number = monitor_number or 3 %}
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "rabbitmq-server"
+ host: {{attack_host}}
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "openstack image list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ - monitor_type: "openstack-cmd"
+ command_name: "openstack network list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ - monitor_type: "openstack-cmd"
+ command_name: "openstack volume list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ - monitor_type: "openstack-cmd"
+ command_name: "openstack stack list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ - monitor_type: "process"
+ process_name: "rabbitmq-server"
+ host: {{attack_host}}
+ monitor_time: 20
+ sla:
+ max_recover_time: 20
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: {{file}}
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml
new file mode 100644
index 000000000..322e2bd76
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml
@@ -0,0 +1,179 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Test case for TC057 :OpenStack Controller Cluster Management Service High
+ Availability;
+ This test case is written by scenario-based HA testing framework.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set vip_mgmt = vip_mgmt or 'vip__management' %}
+{% set vip_vrouter = vip_vrouter or 'vip__vrouter' %}
+{% set attack_host = attack_host or 'node1' %}
+{% set check_host = check_host or 'node2' %}
+{% set monitor_time = monitor_time or 10 %}
+{% set monitor_number = monitor_number or 3 %}
+
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "general-attacker"
+ host: {{attack_host}}
+ key: "kill-process"
+ attack_key: "kill-corosync"
+ action_parameter:
+ process_name: "corosync"
+
+ monitors:
+ -
+ monitor_type: "openstack-cmd"
+ key: "check-nova-service"
+ command_name: "openstack image list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "check-neutron-service"
+ command_name: "openstack network list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "check-keystone-service"
+ command_name: "openstack user list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ -
+ monitor_type: "openstack-cmd"
+ key: "check-heat-service"
+ command_name: "openstack stack list"
+ monitor_time: {{monitor_time}}
+ monitor_number: {{monitor_number}}
+ sla:
+ max_outage_time: 5
+
+ operations:
+ -
+ operation_type: "general-operation"
+ key: "get-mgmt-vip-host"
+ operation_key: "get-vip-host"
+ host: {{check_host}}
+ action_parameter:
+ vip_name: {{vip_mgmt}}
+ return_parameter:
+ all: "$vip_mgmt_host"
+
+ -
+ operation_type: "general-operation"
+ key: "get-router-vip-host"
+ operation_key: "get-vip-host"
+ host: {{check_host}}
+ action_parameter:
+ vip_name: {{vip_vrouter}}
+ return_parameter:
+ all: "$vip_router_host"
+
+ resultCheckers:
+ -
+ checker_type: "general-result-checker"
+ key: "check-rabbitmq-master"
+ checker_key: "pacemaker-resource-checker"
+ host: {{check_host}}
+ parameter:
+ resource_name: "p_rabbitmq-server"
+ resource_host: "$vip_mgmt_host"
+ expectedValue: "Masters"
+ condition: "in"
+
+ -
+ checker_type: "general-result-checker"
+ key: "check-conntrackd-master"
+ checker_key: "pacemaker-resource-checker"
+ host: {{check_host}}
+ parameter:
+ resource_name: "p_conntrackd"
+ resource_host: "$vip_router_host"
+ expectedValue: "Masters"
+ condition: "in"
+
+ steps:
+ -
+ actionKey: "kill-process"
+ actionType: "attacker"
+ index: 1
+
+ -
+ actionKey: "check-nova-service"
+ actionType: "monitor"
+ index: 2
+
+ -
+ actionKey: "check-neutron-service"
+ actionType: "monitor"
+ index: 3
+
+ -
+ actionKey: "check-keystone-service"
+ actionType: "monitor"
+ index: 4
+
+ -
+ actionKey: "check-heat-service"
+ actionType: "monitor"
+ index: 5
+
+ -
+ actionKey: "get-mgmt-vip-host"
+ actionType: "operation"
+ index: 6
+
+ -
+ actionKey: "check-rabbitmq-master"
+ actionType: "resultchecker"
+ index: 7
+
+ -
+ actionKey: "get-router-vip-host"
+ actionType: "operation"
+ index: 8
+
+ -
+ actionKey: "check-conntrackd-master"
+ actionType: "resultchecker"
+ index: 9
+
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ {{check_host}}: {{check_host}}.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+context:
+ type: Node
+ name: LF
+ file: {{file}}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml
new file mode 100644
index 000000000..e9feb97f5
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Test case for TC058 :OpenStack Controller Virtual Router Service High
+ Availability;
+ This test case is written by scenario-based HA testing framework.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set image = image or 'yardstick-image' %}
+{% set flavor = flavor or 'yardstick-flavor' %}
+{% set attack_host = attack_host or 'node1' %}
+
+scenarios:
+ -
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ host: {{attack_host}}
+ key: "kill-process"
+ process_name: "neutron-l3-agent"
+
+ monitors:
+ -
+ monitor_type: "process"
+ process_name: "neutron-l3-agent"
+ host: {{attack_host}}
+ key: "monitor-recovery"
+ monitor_time: 20
+ sla:
+ max_recover_time: 20
+
+ -
+ monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "server-status"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ parameter:
+ ip_address: "$floating_ip"
+
+ operations:
+ -
+ operation_type: "general-operation"
+ key: "get-floatingip"
+ operation_key: "get-floatingip"
+ action_parameter:
+ server_name: "tc058"
+ return_parameter:
+ all: "$floating_ip"
+
+
+ steps:
+ -
+ actionKey: "get-floatingip"
+ actionType: "operation"
+ index: 1
+ -
+ actionKey: "kill-process"
+ actionType: "attacker"
+ index: 2
+
+ -
+ actionKey: "monitor-recovery"
+ actionType: "monitor"
+ index: 3
+
+ -
+ actionKey: "server-status"
+ actionType: "monitor"
+ index: 4
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+contexts:
+-
+ type: Node
+ name: LF
+ file: {{file}}
+
+-
+ name: demo
+ image: {{image}}
+ flavor: {{flavor}}
+ user: cirros
+
+ servers:
+ tc058:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml
new file mode 100644
index 000000000..b89f7674b
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC078 config file;
+ Measure CPU performance using SPEC CPU2006;
+
+{% set file = file or "/etc/yardstick/pod.yaml" %}
+
+scenarios:
+-
+ type: SpecCPU2006
+
+ options:
+ benchmark_subset: int
+
+ host: node1.yardstick-TC078
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Node
+ name: yardstick-TC078
+ file: {{ file }}
+
+ env:
+ type: ansible
+ setup: spec_cpu2006_install.yaml
+ teardown: spec_cpu2006_uninstall.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
new file mode 100644
index 000000000..9c15acc9c
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC079 config file;
+ measure storage and file system performance using bonnie++;
+
+{% set provider = provider or none %}
+{% set physical_network = physical_network or 'physnet1' %}
+{% set segmentation_id = segmentation_id or none %}
+
+scenarios:
+-
+ type: Bonnie++
+ options:
+ file_size: 1024
+ ram_size: 512
+ test_dir: /tmp
+ concurrency: 1
+
+ host: bonnie.yardstick-TC079
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ name: yardstick-TC079
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ servers:
+ bonnie:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+ {% if provider == "vlan" %}
+ provider: {{provider}}
+ physical_network: {{physical_network}}
+ {% if segmentation_id %}
+ segmentation_id: {{segmentation_id}}
+ {% endif %}
+ {% endif %}
diff --git a/samples/ping_k8s.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml
index 503fe6a45..503fe6a45 100644
--- a/samples/ping_k8s.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml
diff --git a/samples/container_ping_vm.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml
index 4b7b64f68..d99757e47 100644
--- a/samples/container_ping_vm.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml
@@ -9,7 +9,7 @@
---
# Sample benchmark task config file
-# measure network latency using ping in container
+# measure network latency using ping betwwen container and VM
schema: "yardstick:task:0.1"
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml
new file mode 100644
index 000000000..08a075845
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn lb noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-lb-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
index e69de29bb..a468b272b 100644
--- a/tests/unit/__init__.py
+++ b/tests/unit/__init__.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import mock
+
+
+STL_MOCKS = {
+ 'trex_stl_lib': mock.MagicMock(),
+ 'trex_stl_lib.base64': mock.MagicMock(),
+ 'trex_stl_lib.binascii': mock.MagicMock(),
+ 'trex_stl_lib.collections': mock.MagicMock(),
+ 'trex_stl_lib.copy': mock.MagicMock(),
+ 'trex_stl_lib.datetime': mock.MagicMock(),
+ 'trex_stl_lib.functools': mock.MagicMock(),
+ 'trex_stl_lib.imp': mock.MagicMock(),
+ 'trex_stl_lib.inspect': mock.MagicMock(),
+ 'trex_stl_lib.json': mock.MagicMock(),
+ 'trex_stl_lib.linecache': mock.MagicMock(),
+ 'trex_stl_lib.math': mock.MagicMock(),
+ 'trex_stl_lib.os': mock.MagicMock(),
+ 'trex_stl_lib.platform': mock.MagicMock(),
+ 'trex_stl_lib.pprint': mock.MagicMock(),
+ 'trex_stl_lib.random': mock.MagicMock(),
+ 'trex_stl_lib.re': mock.MagicMock(),
+ 'trex_stl_lib.scapy': mock.MagicMock(),
+ 'trex_stl_lib.socket': mock.MagicMock(),
+ 'trex_stl_lib.string': mock.MagicMock(),
+ 'trex_stl_lib.struct': mock.MagicMock(),
+ 'trex_stl_lib.sys': mock.MagicMock(),
+ 'trex_stl_lib.threading': mock.MagicMock(),
+ 'trex_stl_lib.time': mock.MagicMock(),
+ 'trex_stl_lib.traceback': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_client': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_ext': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_port': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_stats': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_streams': mock.MagicMock(),
+ 'trex_stl_lib.trex_stl_types': mock.MagicMock(),
+ 'trex_stl_lib.types': mock.MagicMock(),
+ 'trex_stl_lib.utils': mock.MagicMock(),
+ 'trex_stl_lib.utils.argparse': mock.MagicMock(),
+ 'trex_stl_lib.utils.collections': mock.MagicMock(),
+ 'trex_stl_lib.utils.common': mock.MagicMock(),
+ 'trex_stl_lib.utils.json': mock.MagicMock(),
+ 'trex_stl_lib.utils.os': mock.MagicMock(),
+ 'trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
+ 'trex_stl_lib.utils.pwd': mock.MagicMock(),
+ 'trex_stl_lib.utils.random': mock.MagicMock(),
+ 'trex_stl_lib.utils.re': mock.MagicMock(),
+ 'trex_stl_lib.utils.string': mock.MagicMock(),
+ 'trex_stl_lib.utils.sys': mock.MagicMock(),
+ 'trex_stl_lib.utils.text_opts': mock.MagicMock(),
+ 'trex_stl_lib.utils.text_tables': mock.MagicMock(),
+ 'trex_stl_lib.utils.texttable': mock.MagicMock(),
+ 'trex_stl_lib.warnings': mock.MagicMock(),
+ 'trex_stl_lib.yaml': mock.MagicMock(),
+ 'trex_stl_lib.zlib': mock.MagicMock(),
+ 'trex_stl_lib.zmq': mock.MagicMock(),
+}
diff --git a/tests/unit/benchmark/runner/test_search.py b/tests/unit/benchmark/runner/test_search.py
index 9cfe6e154..8fab5a71f 100644
--- a/tests/unit/benchmark/runner/test_search.py
+++ b/tests/unit/benchmark/runner/test_search.py
@@ -14,70 +14,11 @@
#
from __future__ import absolute_import
-import unittest
-from contextlib import contextmanager
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index de2170b16..244a5e798 100644
--- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -67,4 +67,5 @@ class ScenarioGeneralTestCase(unittest.TestCase):
ins.director = mock_obj
ins.director.data = {}
ins.run({})
+ ins.pass_flag = True
ins.teardown()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_network.py b/tests/unit/benchmark/scenarios/lib/test_create_network.py
new file mode 100644
index 000000000..8e7d8b5a1
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_create_network.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_network import CreateNetwork
+
+
+class CreateNetworkTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+ @mock.patch('yardstick.common.openstack_utils.create_neutron_net')
+ def test_create_network(self, mock_get_neutron_client, mock_create_neutron_net):
+ options = {
+ 'openstack_paras': {
+ 'name': 'yardstick_net',
+ 'admin_state_up': 'True'
+ }
+ }
+ args = {"options": options}
+ obj = CreateNetwork(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_neutron_client.called)
+ self.assertTrue(mock_create_neutron_net.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_port.py b/tests/unit/benchmark/scenarios/lib/test_create_port.py
new file mode 100644
index 000000000..3b2aa2247
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_create_port.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_port import CreatePort
+
+
+class CreatePortTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+ def test_create_port(self, mock_get_neutron_client):
+ options = {
+ 'openstack_paras': {
+ 'name': 'yardstick_port'
+ }
+ }
+ args = {"options": options}
+ obj = CreatePort(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_neutron_client.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_router.py b/tests/unit/benchmark/scenarios/lib/test_create_router.py
new file mode 100644
index 000000000..b956a3634
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_create_router.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_router import CreateRouter
+
+
+class CreateRouterTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+ @mock.patch('yardstick.common.openstack_utils.create_neutron_router')
+ def test_create_router(self, mock_get_neutron_client, mock_create_neutron_router):
+ options = {
+ 'openstack_paras': {
+ 'admin_state_up': 'True',
+ 'name': 'yardstick_router'
+ }
+ }
+ args = {"options": options}
+ obj = CreateRouter(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_neutron_client.called)
+ self.assertTrue(mock_create_neutron_router.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py b/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
new file mode 100644
index 000000000..b962f7f0e
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
+
+
+class CreateSecGroupTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+ @mock.patch('yardstick.common.openstack_utils.create_security_group_full')
+ def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
+ options = {
+ 'openstack_paras': {
+ 'sg_name': 'yardstick_sec_group',
+ 'description': 'security group for yardstick manual VM'
+ }
+ }
+ args = {"options": options}
+ obj = CreateSecgroup(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_neutron_client.called)
+ self.assertTrue(mock_create_security_group_full.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_subnet.py b/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
new file mode 100644
index 000000000..0154755c4
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_subnet import CreateSubnet
+
+
+class CreateSubnetTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+ @mock.patch('yardstick.common.openstack_utils.create_neutron_subnet')
+ def test_create_subnet(self, mock_get_neutron_client, mock_create_neutron_subnet):
+ options = {
+ 'openstack_paras': {
+ 'network_id': '123-123-123',
+ 'name': 'yardstick_subnet',
+ 'cidr': '10.10.10.0/24',
+ 'ip_version': '4'
+ }
+ }
+ args = {"options": options}
+ obj = CreateSubnet(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_neutron_client.called)
+ self.assertTrue(mock_create_neutron_subnet.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py b/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
new file mode 100644
index 000000000..7592c8070
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_floating_ip import DeleteFloatingIp
+
+
+class DeleteFloatingIpTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_nova_client')
+ @mock.patch('yardstick.common.openstack_utils.delete_floating_ip')
+ def test_delete_floating_ip(self, mock_get_nova_client, mock_delete_floating_ip):
+ options = {
+ 'floating_ip_id': '123-123-123'
+ }
+ args = {"options": options}
+ obj = DeleteFloatingIp(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_nova_client.called)
+ self.assertTrue(mock_delete_floating_ip.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
new file mode 100644
index 000000000..9663fe9fb
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
+
+
+class DeleteKeypairTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_nova_client')
+ @mock.patch('yardstick.common.openstack_utils.delete_keypair')
+ def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair):
+ options = {
+ 'key_name': 'yardstick_key'
+ }
+ args = {"options": options}
+ obj = DeleteKeypair(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_nova_client.called)
+ self.assertTrue(mock_delete_keypair.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_volume.py b/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
new file mode 100644
index 000000000..a11d0121b
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
+
+
+class DeleteVolumeTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
+ @mock.patch('yardstick.common.openstack_utils.delete_volume')
+ def test_delete_volume(self, mock_get_cinder_client, mock_delete_volume):
+ options = {
+ 'volume_id': '123-123-123'
+ }
+ args = {"options": options}
+ obj = DeleteVolume(args, {})
+ obj.run({})
+ self.assertTrue(mock_get_cinder_client.called)
+ self.assertTrue(mock_delete_volume.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_detach_volume.py b/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
new file mode 100644
index 000000000..0cffcba15
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
+
+
+class DetachVolumeTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.common.openstack_utils.detach_volume')
+ def test_detach_volume(self, mock_detach_volume):
+ options = {
+ 'server_id': '321-321-321',
+ 'volume_id': '123-123-123'
+ }
+ args = {"options": options}
+ obj = DetachVolume(args, {})
+ obj.run({})
+ self.assertTrue(mock_detach_volume.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 32ba255b2..0ca31d484 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -132,7 +132,7 @@ class PktgenTestCase(unittest.TestCase):
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110}'
+ "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
@@ -159,7 +159,7 @@ class PktgenTestCase(unittest.TestCase):
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149776, "packetsize": 60, "flows": 110}'
+ "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
@@ -648,7 +648,7 @@ class PktgenTestCase(unittest.TestCase):
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110}'
+ "packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
@@ -693,7 +693,7 @@ class PktgenTestCase(unittest.TestCase):
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110}'
+ "packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
@@ -730,7 +730,7 @@ class PktgenTestCase(unittest.TestCase):
p._iptables_get_result = mock_iptables_result
sample_output = '{"packets_per_second": 9753, "errors": 0, \
- "packets_sent": 149300, "flows": 110}'
+ "packets_sent": 149300, "flows": 110, "ppm": 0}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
p.run(result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 651614d3e..df5047a0d 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -24,6 +24,7 @@ import errno
import unittest
import mock
+from tests.unit import STL_MOCKS
from yardstick.benchmark.scenarios.networking.vnf_generic import \
SshManager, NetworkServiceTestCase, IncorrectConfig, \
open_relative_file
@@ -31,65 +32,6 @@ from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic.vnf.base import \
GenericTrafficGen, GenericVNF
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
COMPLETE_TREX_VNFD = {
'vnfd:vnfd-catalog': {
@@ -375,6 +317,9 @@ class TestNetworkServiceTestCase(unittest.TestCase):
'allowed_drop_rate': '0.8 - 1',
},
},
+ 'options': {
+ 'framesize': {'64B': 100}
+ },
'runner': {
'object': 'NetworkServiceTestCase',
'interval': 35,
@@ -414,17 +359,40 @@ class TestNetworkServiceTestCase(unittest.TestCase):
def test___init__(self):
assert self.topology
+ def test__get_ip_flow_range(self):
+ self.scenario_cfg["traffic_options"]["flow"] = \
+ self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
+ result = '152.16.100.1-152.16.100.254'
+ self.assertEqual(result, self.s._get_ip_flow_range({"tg__1": 'xe0'}))
+
def test___get_traffic_flow(self):
self.scenario_cfg["traffic_options"]["flow"] = \
self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
- result = {'flow': {'dstip4_range': '152.40.0.20',
- 'srcip4_range': '152.16.0.20', 'count': 1}}
+ self.scenario_cfg["options"] = {}
+ self.scenario_cfg['options'] = {
+ 'flow': {
+ 'src_ip': [
+ {
+ 'tg__1': 'xe0',
+ },
+ ],
+ 'dst_ip': [
+ {
+ 'tg__1': 'xe1',
+ },
+ ],
+ 'public_ip': ['1.1.1.1'],
+ },
+ }
+ result = {'flow': {'dst_ip0': '152.16.40.1-152.16.40.254',
+ 'src_ip0': '152.16.100.1-152.16.100.254'}}
+
self.assertEqual(result, self.s._get_traffic_flow())
def test___get_traffic_flow_error(self):
self.scenario_cfg["traffic_options"]["flow"] = \
"ipv4_1flow_Packets_vpe.yaml1"
- self.assertEqual({}, self.s._get_traffic_flow())
+ self.assertEqual({'flow': {}}, self.s._get_traffic_flow())
def test_get_vnf_imp(self):
vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]['class-name']
@@ -586,7 +554,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
def test___get_traffic_imix_exception(self):
with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}):
- self.assertEqual({}, self.s._get_traffic_imix())
+ self.assertEqual({'imix': {'64B': 100}}, self.s._get_traffic_imix())
def test__fill_traffic_profile(self):
with mock.patch.dict("sys.modules", STL_MOCKS):
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index 3b9f99b08..de5bae2f3 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -28,8 +28,6 @@ from yardstick.benchmark.scenarios.networking import vsperf_dpdk
@mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.subprocess')
@mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
-@mock.patch("yardstick.benchmark.scenarios.networking.vsperf_dpdk.open",
- mock.mock_open())
class VsperfDPDKTestCase(unittest.TestCase):
def setUp(self):
diff --git a/tests/unit/network_services/helpers/test_samplevnf_helper.py b/tests/unit/network_services/helpers/test_samplevnf_helper.py
index b89668577..608f31747 100644
--- a/tests/unit/network_services/helpers/test_samplevnf_helper.py
+++ b/tests/unit/network_services/helpers/test_samplevnf_helper.py
@@ -198,6 +198,8 @@ class TestMultiPortConfig(unittest.TestCase):
opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
opnfv_vnf.get_netmask_gateway6 = mock.Mock(return_value=u'255.255.255.0')
opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
opnfv_vnf.rules = ''
self.assertIsNotNone(opnfv_vnf.generate_rule_config())
opnfv_vnf.rules = 'new'
diff --git a/tests/unit/network_services/traffic_profile/test_fixed.py b/tests/unit/network_services/traffic_profile/test_fixed.py
index 8b44719a1..84843178e 100644
--- a/tests/unit/network_services/traffic_profile/test_fixed.py
+++ b/tests/unit/network_services/traffic_profile/test_fixed.py
@@ -16,68 +16,11 @@
#
from __future__ import absolute_import
+
import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 6dba64af9..b2cb9dfea 100644
--- a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -20,65 +20,7 @@ from __future__ import division
import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_acl.py b/tests/unit/network_services/traffic_profile/test_prox_acl.py
index 252c655da..be172f26b 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_acl.py
+++ b/tests/unit/network_services/traffic_profile/test_prox_acl.py
@@ -14,69 +14,11 @@
#
from __future__ import absolute_import
-import unittest
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index 74e6121a7..72b86709c 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -14,69 +14,11 @@
#
from __future__ import absolute_import
-import unittest
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_profile.py b/tests/unit/network_services/traffic_profile/test_prox_profile.py
index a2ad0333f..14223da0f 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_profile.py
+++ b/tests/unit/network_services/traffic_profile/test_prox_profile.py
@@ -14,69 +14,11 @@
#
from __future__ import absolute_import
-import unittest
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_prox_ramp.py b/tests/unit/network_services/traffic_profile/test_prox_ramp.py
index 19e6ff8cb..357298759 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_ramp.py
+++ b/tests/unit/network_services/traffic_profile/test_prox_ramp.py
@@ -14,69 +14,11 @@
#
from __future__ import absolute_import
-import unittest
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/traffic_profile/test_rfc2544.py b/tests/unit/network_services/traffic_profile/test_rfc2544.py
index dcaf43dc5..aef0b93de 100644
--- a/tests/unit/network_services/traffic_profile/test_rfc2544.py
+++ b/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -17,68 +17,12 @@
from __future__ import absolute_import
from __future__ import division
+
import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -115,9 +59,9 @@ class TestRFC2544Profile(unittest.TestCase):
'outer_l3v4': {'dstip4': '1.1.1.1-1.15.255.255',
'proto': 'udp',
'srcip4': '90.90.1.1-90.105.255.255',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32, 'count': 1},
'outer_l4': {'srcport': '2001',
- 'dsrport': '1234'}}},
+ 'dsrport': '1234', 'count': 1}}},
'private_1': {'ipv4':
{'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
@@ -127,9 +71,9 @@ class TestRFC2544Profile(unittest.TestCase):
'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
'proto': 'udp',
'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32, 'count': 1},
'outer_l4': {'dstport': '2001',
- 'srcport': '1234'}}},
+ 'srcport': '1234', 'count': 1}}},
'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
diff --git a/tests/unit/network_services/traffic_profile/test_traffic_profile.py b/tests/unit/network_services/traffic_profile/test_traffic_profile.py
index fd769e6e0..9a78c36a3 100644
--- a/tests/unit/network_services/traffic_profile/test_traffic_profile.py
+++ b/tests/unit/network_services/traffic_profile/test_traffic_profile.py
@@ -16,69 +16,12 @@
#
from __future__ import absolute_import
-import unittest
+import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -117,9 +60,11 @@ class TestTrexProfile(unittest.TestCase):
'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
'proto': 'udp',
'srcip4': '9.9.1.1-90.1.2.2',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
'outer_l4': {'srcport': '2001',
- 'dsrport': '1234'}}},
+ 'dsrport': '1234',
+ 'count': 1}}},
'private': {'ipv4':
{'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
@@ -131,9 +76,10 @@ class TestTrexProfile(unittest.TestCase):
'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
'proto': 'udp',
'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32, 'count': 1},
'outer_l4': {'dstport': '2001',
- 'srcport': '1234'}}},
+ 'srcport': '1234',
+ 'count': 1}}},
'schema': 'isb:traffic_profile:0.1'}
PROFILE_v6 = {'description': 'Traffic profile to run RFC2544 latency',
'name': 'rfc2544',
@@ -149,9 +95,11 @@ class TestTrexProfile(unittest.TestCase):
'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
'proto': 'udp',
'srcip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
'outer_l4': {'srcport': '2001',
- 'dsrport': '1234'}}},
+ 'dsrport': '1234',
+ 'count': 1}}},
'private':
{'ipv6': {'outer_l2': {'framesize':
{'64B': '100', '1518B': '0',
@@ -163,9 +111,11 @@ class TestTrexProfile(unittest.TestCase):
'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
'proto': 'udp',
'srcip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
- 'dscp': 0, 'ttl': 32},
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
'outer_l4': {'dstport': '2001',
- 'srcport': '1234'}}},
+ 'srcport': '1234',
+ 'count': 1}}},
'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index a63a59d48..7570067b9 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -16,70 +16,13 @@
#
from __future__ import absolute_import
+
import unittest
import mock
import os
+from tests.unit import STL_MOCKS
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
index bf226d2c8..f214d66f6 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
@@ -19,68 +19,10 @@ from __future__ import absolute_import
import os
import unittest
-
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_iniparser.py b/tests/unit/network_services/vnf_generic/vnf/test_iniparser.py
index 53481ddd0..b74e5d9fd 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_iniparser.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_iniparser.py
@@ -14,70 +14,13 @@
#
from __future__ import absolute_import
+
import unittest
from contextlib import contextmanager
-
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
index 90ec3f374..98eccae4f 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
@@ -23,69 +23,10 @@ import unittest
from collections import OrderedDict
from itertools import repeat, chain
from contextlib import contextmanager
-
import mock
+from tests.unit import STL_MOCKS
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index 453100b90..c727cb7fb 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -19,71 +19,13 @@ from __future__ import absolute_import
import os
import unittest
-
import mock
from copy import deepcopy
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+from tests.unit import STL_MOCKS
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index 07a862a8e..455e44ec9 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -18,11 +18,13 @@
# Unittest for yardstick.network_services.vnf_generic.vnf.sample_vnf
from __future__ import absolute_import
+
import unittest
import mock
from copy import deepcopy
from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
from yardstick.benchmark.contexts.base import Context
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.traffic_profile.base import TrafficProfile
@@ -34,66 +36,6 @@ class MockError(BaseException):
pass
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
index cda44127e..5c81aa886 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
@@ -16,69 +16,13 @@
#
from __future__ import absolute_import
+
import unittest
import mock
import subprocess
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
index 949bfb3d4..45bbfaea3 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
@@ -16,74 +16,16 @@
#
from __future__ import absolute_import
+
import unittest
import mock
from multiprocessing import Queue
from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper"
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
index 1a01b9e15..12abadf98 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
@@ -14,76 +14,17 @@
#
from __future__ import absolute_import
+
import unittest
import mock
from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
NAME = 'vnf__1'
-
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
index 8f7f05772..ca8150cb2 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -16,68 +16,13 @@
#
from __future__ import absolute_import
+
import os
import unittest
import mock
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
index 7dc303852..ad8c6494e 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -16,70 +16,14 @@
#
from __future__ import absolute_import
+
import unittest
import mock
-SSH_HELPER = "yardstick.ssh.SSH"
+from tests.unit import STL_MOCKS
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+SSH_HELPER = "yardstick.ssh.SSH"
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -289,7 +233,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
def test_collect_kpi(self, ssh):
mock_ssh(ssh)
trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
- self.assertIsNone(trex_traffic_gen.collect_kpi())
+ self.assertEqual(trex_traffic_gen.collect_kpi(), {})
@mock.patch(SSH_HELPER)
def test_listen_traffic(self, ssh):
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
index 6fb5d080f..65370dfa5 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
@@ -16,74 +16,16 @@
#
from __future__ import absolute_import
+
import unittest
import mock
from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
NAME = 'vnf_1'
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
index 08bf06b74..f0d75d57b 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
@@ -16,70 +16,13 @@
#
from __future__ import absolute_import
+
import unittest
import mock
import os
+from tests.unit import STL_MOCKS
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
index d817b164c..7dae89f40 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
@@ -16,69 +16,13 @@
#
from __future__ import absolute_import
+
import unittest
import mock
import os
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index 80b4a5108..5e66390e3 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -16,77 +16,18 @@
#
from __future__ import absolute_import
+import six.moves.configparser as configparser
import os
import unittest
-
-import six.moves.configparser as configparser
import mock
from multiprocessing import Process, Queue
+from tests.unit import STL_MOCKS
from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
-STL_MOCKS = {
- 'stl': mock.MagicMock(),
- 'stl.trex_stl_lib': mock.MagicMock(),
- 'stl.trex_stl_lib.base64': mock.MagicMock(),
- 'stl.trex_stl_lib.binascii': mock.MagicMock(),
- 'stl.trex_stl_lib.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.copy': mock.MagicMock(),
- 'stl.trex_stl_lib.datetime': mock.MagicMock(),
- 'stl.trex_stl_lib.functools': mock.MagicMock(),
- 'stl.trex_stl_lib.imp': mock.MagicMock(),
- 'stl.trex_stl_lib.inspect': mock.MagicMock(),
- 'stl.trex_stl_lib.json': mock.MagicMock(),
- 'stl.trex_stl_lib.linecache': mock.MagicMock(),
- 'stl.trex_stl_lib.math': mock.MagicMock(),
- 'stl.trex_stl_lib.os': mock.MagicMock(),
- 'stl.trex_stl_lib.platform': mock.MagicMock(),
- 'stl.trex_stl_lib.pprint': mock.MagicMock(),
- 'stl.trex_stl_lib.random': mock.MagicMock(),
- 'stl.trex_stl_lib.re': mock.MagicMock(),
- 'stl.trex_stl_lib.scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.socket': mock.MagicMock(),
- 'stl.trex_stl_lib.string': mock.MagicMock(),
- 'stl.trex_stl_lib.struct': mock.MagicMock(),
- 'stl.trex_stl_lib.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.threading': mock.MagicMock(),
- 'stl.trex_stl_lib.time': mock.MagicMock(),
- 'stl.trex_stl_lib.traceback': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
- 'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
- 'stl.trex_stl_lib.types': mock.MagicMock(),
- 'stl.trex_stl_lib.utils': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.common': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.json': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.os': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.random': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.re': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.string': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
- 'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
- 'stl.trex_stl_lib.warnings': mock.MagicMock(),
- 'stl.trex_stl_lib.yaml': mock.MagicMock(),
- 'stl.trex_stl_lib.zlib': mock.MagicMock(),
- 'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index c8d53e324..e52c1076c 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -163,7 +163,8 @@ class HeatContext(Context):
network.physical_network,
network.provider,
network.segmentation_id,
- network.port_security_enabled)
+ network.port_security_enabled,
+ network.network_type)
template.add_subnet(network.subnet_stack_name, network.stack_name,
network.subnet_cidr,
network.enable_dhcp,
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
index aa144ab50..ee7ea7d83 100644
--- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -40,3 +40,7 @@ stress-cpu:
block-io:
inject_script: ha_tools/disk/block_io.bash
recovery_script: ha_tools/disk/recovery_disk_io.bash
+
+kill-corosync:
+ inject_script: ha_tools/fault_process_kill.bash
+ recovery_script: ha_tools/node/reboot_node.bash \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash b/yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash
new file mode 100644
index 000000000..1ee8c9c2f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# reboot node
+
+reboot \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash
new file mode 100644
index 000000000..78dd27628
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get floating ip of a serer
+# parameter: $1 - server name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack ${SECURE} server list -f value | grep $1 | awk '{print $5}' \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash
new file mode 100644
index 000000000..0f67c021e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# list servers
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack ${SECURE} server list \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash
new file mode 100644
index 000000000..68707cf4f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get pacemaker resource status
+
+pcs resource show \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash
new file mode 100644
index 000000000..7a02ccf29
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get pacemaker resource status of hosts
+# parameter: $1 - resource name $2 status
+
+pcs resource show | grep $1 -A 3 | grep $2 \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash
new file mode 100644
index 000000000..f4870fdae
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get vip host in pacemaker
+# parameter: $1 - virtual ip name
+
+pcs resource show| grep -w $1 | awk '{print $4}' \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/operation_conf.yaml b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
index 1c39385a9..dc5169196 100644
--- a/yardstick/benchmark/scenarios/availability/operation_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/operation_conf.yaml
@@ -25,4 +25,13 @@ swift-download-file:
nova-create-flavor:
action_script: ha_tools/nova/create_flavor.bash
- rollback_script: ha_tools/nova/delete_flavor.bash \ No newline at end of file
+ rollback_script: ha_tools/nova/delete_flavor.bash
+
+get-floatingip:
+ action_script: ha_tools/nova/get_server_floatingip.bash
+ rollback_script: ha_tools/nova/list_servers.bash
+
+get-vip-host:
+ action_script: ha_tools/pacemaker/get_vip_host.bash
+ rollback_script: ha_tools/pacemaker/get_resource_status.bash
+
diff --git a/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
index 0494a71a7..451cc0f11 100644
--- a/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
@@ -18,4 +18,6 @@ service-checker:
nova-instance-checker:
verify_script: ha_tools/nova/show_instances.bash
nova-flavor-checker:
- verify_script: ha_tools/nova/show_flavors.bash \ No newline at end of file
+ verify_script: ha_tools/nova/show_flavors.bash
+pacemaker-resource-checker:
+ verify_script: ha_tools/pacemaker/get_resource_status_host.bash \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 17ad79f29..c7ed1d6ec 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -26,6 +26,7 @@ class ScenarioGeneral(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.intermediate_variables = {}
+ self.pass_flag = True
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -63,6 +64,7 @@ class ScenarioGeneral(base.Scenario):
if v == 0:
result['sla_pass'] = 0
verify_result = False
+ self.pass_flag = False
LOG.info(
"\033[92m The service process not found in the host \
envrioment, the HA test case NOT pass")
@@ -74,9 +76,12 @@ envrioment, the HA test case NOT pass")
"the HA test case PASS! \033[0m")
else:
result['sla_pass'] = 0
+ self.pass_flag = False
LOG.info(
"\033[91m Aoh, the HA test case FAIL,"
"please check the detail debug information! \033[0m")
def teardown(self):
self.director.knockoff()
+
+ assert self.pass_flag, "The HA test case NOT passed"
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 2f0012ecf..d0f5e9e4d 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,6 +29,7 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
+ self.pass_flag = True
def setup(self):
"""scenario setup"""
@@ -73,6 +74,7 @@ class ServiceHA(base.Scenario):
for k, v in self.data.items():
if v == 0:
result['sla_pass'] = 0
+ self.pass_flag = False
LOG.info("The service process not found in the host envrioment, \
the HA test case NOT pass")
return
@@ -81,6 +83,7 @@ the HA test case NOT pass")
LOG.info("The HA test case PASS the SLA")
else:
result['sla_pass'] = 0
+ self.pass_flag = False
assert sla_pass is True, "The HA test case NOT pass the SLA"
return
@@ -90,6 +93,8 @@ the HA test case NOT pass")
for attacker in self.attackers:
attacker.recover()
+ assert self.pass_flag, "The HA test case NOT passed"
+
def _test(): # pragma: no cover
"""internal test function"""
diff --git a/yardstick/benchmark/scenarios/availability/util.py b/yardstick/benchmark/scenarios/availability/util.py
index 6fef622bd..d288fcbc1 100644
--- a/yardstick/benchmark/scenarios/availability/util.py
+++ b/yardstick/benchmark/scenarios/availability/util.py
@@ -51,6 +51,8 @@ def build_shell_command(param_config, remote=True, intermediate_variables=None):
def read_stdout_item(stdout, key):
+ if key == "all":
+ return stdout
for item in stdout.splitlines():
if key in item:
attributes = item.split("|")
diff --git a/yardstick/benchmark/scenarios/compute/computecapacity.bash b/yardstick/benchmark/scenarios/compute/computecapacity.bash
index 68741a94f..d49638fe3 100644
--- a/yardstick/benchmark/scenarios/compute/computecapacity.bash
+++ b/yardstick/benchmark/scenarios/compute/computecapacity.bash
@@ -27,12 +27,11 @@ run_capacity()
# Number of logical cores
THREAD=$(grep 'processor' /proc/cpuinfo | sort -u | wc -l)
# Total memory size
- MEMORY=$(grep 'MemTotal' /proc/meminfo | sort -u)
- ME=$(echo $MEMORY | awk '/ /{printf "%s %s", $2, $3}')
+ MEMORY=$(grep 'MemTotal' /proc/meminfo | sort -u | awk '{print $2}')
+
# Cache size per CPU
- CACHE=$(grep 'cache size' /proc/cpuinfo | sort -u)
- CA=$(echo $CACHE | awk '/ /{printf "%s", $4}')
- CACHES=$[$CA * $CPU]
+ CACHE=$(grep 'cache size' /proc/cpuinfo | sort -u | awk '{print $4}')
+ CACHES=$[$CACHE * $CPU]
HT_Value=$[$HT_Para * $CORES]
if [ $HT_Value -eq $THREAD ]; then
HT_OPEN=1
@@ -48,8 +47,8 @@ output_json()
\"Cpu_number\":\"$CPU\", \
\"Core_number\":\"$CORES\", \
\"Thread_number\":\"$THREAD\", \
- \"Memory_size\": \"$ME\", \
- \"Cache_size\": \"$CACHES KB\", \
+ \"Memory_size\": \"$MEMORY\", \
+ \"Cache_size\": \"$CACHES\", \
\"HT_Open\": \"$HT_OPEN\" \
}"
}
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index cee87a545..6c0446bb7 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -41,10 +41,18 @@ class QemuMigrate(base.Scenario):
def _put_files(self, client):
setup_options = self.scenario_cfg["setup_options"]
+ rpm_dir = setup_options["rpm_dir"]
script_dir = setup_options["script_dir"]
+ image_dir = setup_options["image_dir"]
+ LOG.debug("Send RPMs from %s to workspace %s",
+ rpm_dir, self.WORKSPACE)
+ client.put(rpm_dir, self.WORKSPACE, recursive=True)
LOG.debug("Send scripts from %s to workspace %s",
script_dir, self.WORKSPACE)
client.put(script_dir, self.WORKSPACE, recursive=True)
+ LOG.debug("Send guest image from %s to workspace %s",
+ image_dir, self.WORKSPACE)
+ client.put(image_dir, self.WORKSPACE, recursive=True)
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
@@ -143,10 +151,17 @@ def _test(): # pragma: no cover
"qmp_sock_dst": "/tmp/qmp-sock-dst",
"max_down_time": 0.10
}
+ sla = {
+ "max_totaltime": 10,
+ "max_downtime": 0.10,
+ "max_setuptime": 0.50,
+ }
args = {
- "options": options
+ "options": options,
+ "sla": sla
}
result = {}
+
migrate = QemuMigrate(args, ctx)
migrate.run(result)
print(result)
diff --git a/yardstick/benchmark/scenarios/lib/create_keypair.py b/yardstick/benchmark/scenarios/lib/create_keypair.py
index 5610de651..2185bfa5d 100644
--- a/yardstick/benchmark/scenarios/lib/create_keypair.py
+++ b/yardstick/benchmark/scenarios/lib/create_keypair.py
@@ -57,8 +57,10 @@ class CreateKeypair(base.Scenario):
self.key_filename + ".pub")
if keypair:
+ result.update({"keypair_create": 1})
LOG.info("Create keypair successful!")
else:
+ result.update({"keypair_create": 0})
LOG.info("Create keypair failed!")
try:
keys = self.scenario_cfg.get('output', '').split()
diff --git a/yardstick/benchmark/scenarios/lib/create_network.py b/yardstick/benchmark/scenarios/lib/create_network.py
new file mode 100644
index 000000000..cffff132a
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/create_network.py
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateNetwork(base.Scenario):
+ """Create an OpenStack network"""
+
+ __scenario_type__ = "CreateNetwork"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.openstack = self.options.get("openstack_paras", None)
+
+ self.neutron_client = op_utils.get_neutron_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ openstack_paras = {'network': self.openstack}
+ network_id = op_utils.create_neutron_net(self.neutron_client,
+ openstack_paras)
+ if network_id:
+ result.update({"network_create": 1})
+ LOG.info("Create network successful!")
+ else:
+ result.update({"network_create": 0})
+ LOG.error("Create network failed!")
+
+ try:
+ keys = self.scenario_cfg.get('output', '').split()
+ except KeyError:
+ pass
+ else:
+ values = [network_id]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_port.py b/yardstick/benchmark/scenarios/lib/create_port.py
new file mode 100644
index 000000000..6a3a23a10
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/create_port.py
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreatePort(base.Scenario):
+ """Create an OpenStack flavor"""
+
+ __scenario_type__ = "CreatePort"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.openstack = self.options.get("openstack_paras", None)
+
+ self.neutron_client = op_utils.get_neutron_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ openstack_paras = {'port': self.openstack}
+ port = self.neutron_client.create_port(openstack_paras)
+
+ if port:
+ result.update({"Port_Create": 1})
+ LOG.info("Create Port successful!")
+ else:
+ result.update({"Port_Create": 0})
+ LOG.error("Create Port failed!")
+
+ check_result = port['port']['id']
+
+ try:
+ keys = self.scenario_cfg.get('output', '').split()
+ except KeyError:
+ pass
+ else:
+ values = [check_result]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_router.py b/yardstick/benchmark/scenarios/lib/create_router.py
new file mode 100644
index 000000000..9aa57ebb2
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/create_router.py
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateRouter(base.Scenario):
+ """Create an OpenStack router"""
+
+ __scenario_type__ = "CreateRouter"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.openstack = self.options.get("openstack_paras", None)
+
+ self.neutron_client = op_utils.get_neutron_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ openstack_paras = {'router': self.openstack}
+ router_id = op_utils.create_neutron_router(self.neutron_client,
+ openstack_paras)
+ if router_id:
+ result.update({"network_create": 1})
+ LOG.info("Create router successful!")
+ else:
+ result.update({"network_create": 0})
+ LOG.error("Create router failed!")
+
+ check_result = router_id
+
+ try:
+ keys = self.scenario_cfg.get('output', '').split()
+ except KeyError:
+ pass
+ else:
+ values = [check_result]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_sec_group.py b/yardstick/benchmark/scenarios/lib/create_sec_group.py
new file mode 100644
index 000000000..3d1aec9e8
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/create_sec_group.py
@@ -0,0 +1,65 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateSecgroup(base.Scenario):
+ """Create an OpenStack security group"""
+
+ __scenario_type__ = "CreateSecgroup"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.sg_name = self.options.get("sg_name", "yardstick_sec_group")
+ self.description = self.options.get("description", None)
+ self.neutron_client = op_utils.get_neutron_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ sg_id = op_utils.create_security_group_full(self.neutron_client,
+ sg_name=self.sg_name,
+ sg_description=self.description)
+
+ if sg_id:
+ result.update({"sg_create": 1})
+ LOG.info("Create security group successful!")
+ else:
+ result.update({"sg_create": 0})
+ LOG.error("Create security group failed!")
+
+ try:
+ keys = self.scenario_cfg.get('output', '').split()
+ except KeyError:
+ pass
+ else:
+ values = [sg_id]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_server.py b/yardstick/benchmark/scenarios/lib/create_server.py
index 45c0bfde9..273b0045a 100644
--- a/yardstick/benchmark/scenarios/lib/create_server.py
+++ b/yardstick/benchmark/scenarios/lib/create_server.py
@@ -59,8 +59,10 @@ class CreateServer(base.Scenario):
vm = op_utils.create_instance_and_wait_for_active(self.openstack)
if vm:
+ result.update({"instance_create": 1})
LOG.info("Create server successful!")
else:
+ result.update({"instance_create": 0})
LOG.error("Create server failed!")
try:
diff --git a/yardstick/benchmark/scenarios/lib/create_subnet.py b/yardstick/benchmark/scenarios/lib/create_subnet.py
new file mode 100644
index 000000000..c34af8a9e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/create_subnet.py
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateSubnet(base.Scenario):
+ """Create an OpenStack flavor"""
+
+ __scenario_type__ = "CreateSubnet"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.openstack = self.options.get("openstack_paras", None)
+
+ self.neutron_client = op_utils.get_neutron_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ openstack_paras = {'subnets': [self.openstack]}
+ subnet_id = op_utils.create_neutron_subnet(self.neutron_client,
+ openstack_paras)
+ if subnet_id:
+ result.update({"subnet_create": 1})
+ LOG.info("Create subnet successful!")
+ else:
+ result.update({"subnet_create": 0})
+ LOG.error("Create subnet failed!")
+
+ check_result = subnet_id
+
+ try:
+ keys = self.scenario_cfg.get('output', '').split()
+ except KeyError:
+ pass
+ else:
+ values = [check_result]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_floating_ip.py b/yardstick/benchmark/scenarios/lib/delete_floating_ip.py
new file mode 100644
index 000000000..4314952fb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/delete_floating_ip.py
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteFloatingIp(base.Scenario):
+ """Delete an OpenStack floating ip """
+
+ __scenario_type__ = "DeleteFloatingIp"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.floating_ip_id = self.options.get("floating_ip_id", None)
+
+ self.nova_client = op_utils.get_nova_client()
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ status = op_utils.delete_floating_ip(nova_client=self.nova_client,
+ floatingip_id=self.floating_ip_id)
+ if status:
+ result.update({"delete_floating_ip": 1})
+ LOG.info("Delete floating ip successful!")
+ else:
+ result.update({"delete_floating_ip": 0})
+ LOG.error("Delete floating ip failed!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_keypair.py b/yardstick/benchmark/scenarios/lib/delete_keypair.py
new file mode 100644
index 000000000..135139959
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/delete_keypair.py
@@ -0,0 +1,56 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteKeypair(base.Scenario):
+ """Delete an OpenStack keypair"""
+
+ __scenario_type__ = "DeleteKeypair"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.key_name = self.options.get("key_name", "yardstick_key")
+
+ self.nova_client = op_utils.get_nova_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ status = op_utils.delete_keypair(self.nova_client,
+ self.key_name)
+
+ if status:
+ result.update({"delete_keypair": 1})
+ LOG.info("Delete keypair successful!")
+ else:
+ result.update({"delete_keypair": 0})
+ LOG.info("Delete keypair failed!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_volume.py b/yardstick/benchmark/scenarios/lib/delete_volume.py
new file mode 100644
index 000000000..ea2b85812
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/delete_volume.py
@@ -0,0 +1,55 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteVolume(base.Scenario):
+ """Delete an OpenStack volume"""
+
+ __scenario_type__ = "DeleteVolume"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.volume_id = self.options.get("volume_id", None)
+
+ self.cinder_client = op_utils.get_cinder_client()
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ status = op_utils.delete_volume(self.cinder_client, self.volume_id)
+
+ if status:
+ result.update({"delete_volume": 1})
+ LOG.info("Delete volume successful!")
+ else:
+ result.update({"delete_volume": 0})
+ LOG.info("Delete volume failed!")
diff --git a/yardstick/benchmark/scenarios/lib/detach_volume.py b/yardstick/benchmark/scenarios/lib/detach_volume.py
new file mode 100644
index 000000000..0b02a3a81
--- /dev/null
+++ b/yardstick/benchmark/scenarios/lib/detach_volume.py
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DetachVolume(base.Scenario):
+ """Detach a volume from an instance"""
+
+ __scenario_type__ = "DetachVolume"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = self.scenario_cfg['options']
+
+ self.server_id = self.options.get("server_id", "TestServer")
+ self.volume_id = self.options.get("volume_id", None)
+
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ status = op_utils.detach_volume(self.server_id, self.volume_id)
+
+ if status:
+ result.update({"detach_volume": 1})
+ LOG.info("Detach volume from server successful!")
+ else:
+ result.update({"detach_volume": 0})
+ LOG.info("Detach volume from server failed!")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index 1e0a5fcbb..a9e7aa6a3 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -11,6 +11,7 @@ from __future__ import print_function
import os
import logging
+import math
import pkg_resources
from oslo_serialization import jsonutils
@@ -357,15 +358,15 @@ class Pktgen(base.Scenario):
result.update(jsonutils.loads(stdout))
- result['packets_received'] = self._iptables_get_result()
+ received = result['packets_received'] = self._iptables_get_result()
+ sent = result['packets_sent']
result['packetsize'] = packetsize
+ # compatible with python3 /
+ ppm = math.ceil(1000000.0 * (sent - received) / sent)
+
+ result['ppm'] = ppm
if "sla" in self.scenario_cfg:
- sent = result['packets_sent']
- received = result['packets_received']
- ppm = 1000000 * (sent - received) / sent
- # if ppm is 1, then 11 out of 10 million is no pass
- ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 9f8efa6dc..4510bcfba 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -14,13 +14,17 @@
""" NSPerf specific scenario definition """
from __future__ import absolute_import
-import logging
+import logging
import errno
-import os
+import ipaddress
+import os
+import sys
import re
from itertools import chain
+
+import six
from operator import itemgetter
from collections import defaultdict
@@ -31,8 +35,10 @@ from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic import vnfdgen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.traffic_profile.base import TrafficProfile
+from yardstick.network_services.utils import get_nsb_option
from yardstick import ssh
+
LOG = logging.getLogger(__name__)
@@ -126,19 +132,50 @@ class NetworkServiceTestCase(base.Scenario):
self.collector = None
self.traffic_profile = None
+ def _get_ip_flow_range(self, ip_start_range):
+
+ node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+ if node_name is not None:
+ node = self.context_cfg["nodes"].get(node_name, {})
+ try:
+ # the ip_range is the interface name
+ interface = node.get("interfaces", {})[range_or_interface]
+ except KeyError:
+ ip = "0.0.0.0"
+ mask = "255.255.255.0"
+ else:
+ ip = interface["local_ip"]
+ # we can't default these values, they must both exist to be valid
+ mask = interface["netmask"]
+
+ ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
+ hosts = list(ipaddr.hosts())
+ ip_addr_range = "{}-{}".format(hosts[0], hosts[-1])
+ else:
+ # we are manually specifying the range
+ ip_addr_range = range_or_interface
+ return ip_addr_range
+
def _get_traffic_flow(self):
+ flow = {}
try:
- with open(self.scenario_cfg["traffic_options"]["flow"]) as fflow:
- flow = yaml_load(fflow)
- except (KeyError, IOError, OSError):
+ fflow = self.scenario_cfg["options"]["flow"]
+ for index, src in enumerate(fflow.get("src_ip", [])):
+ flow["src_ip{}".format(index)] = self._get_ip_flow_range(src)
+
+ for index, dst in enumerate(fflow.get("dst_ip", [])):
+ flow["dst_ip{}".format(index)] = self._get_ip_flow_range(dst)
+
+ for index, publicip in enumerate(fflow.get("publicip", [])):
+ flow["public_ip{}".format(index)] = publicip
+ except KeyError:
flow = {}
- return flow
+ return {"flow": flow}
def _get_traffic_imix(self):
try:
- with open(self.scenario_cfg["traffic_options"]["imix"]) as fimix:
- imix = yaml_load(fimix)
- except (KeyError, IOError, OSError):
+ imix = {"imix": self.scenario_cfg['options']['framesize']}
+ except KeyError:
imix = {}
return imix
@@ -404,6 +441,9 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
:param context_cfg:
:return:
"""
+ trex_lib_path = get_nsb_option('trex_client_lib')
+ sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
+
if scenario_cfg is None:
scenario_cfg = self.scenario_cfg
@@ -451,7 +491,6 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
for vnf in chain(traffic_runners, non_traffic_runners):
LOG.info("Instantiating %s", vnf.name)
vnf.instantiate(self.scenario_cfg, self.context_cfg)
- for vnf in chain(traffic_runners, non_traffic_runners):
LOG.info("Waiting for %s to instantiate", vnf.name)
vnf.wait_for_instantiate()
except RuntimeError:
@@ -484,7 +523,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
for vnf in self.vnfs:
# Result example:
# {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("vnf")
+ LOG.debug("collect KPI for %s", vnf.name)
result.update(self.collector.get_kpi(vnf))
def teardown(self):
diff --git a/yardstick/benchmark/scenarios/storage/storagecapacity.bash b/yardstick/benchmark/scenarios/storage/storagecapacity.bash
index f963782d8..96db6e1be 100644
--- a/yardstick/benchmark/scenarios/storage/storagecapacity.bash
+++ b/yardstick/benchmark/scenarios/storage/storagecapacity.bash
@@ -17,7 +17,7 @@ OUTPUT_FILE=/tmp/storagecapacity-out.log
# run disk_size test
run_disk_size()
{
- fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2,$7}' > $OUTPUT_FILE
+ fdisk -l | grep '^Disk.*bytes' | awk -F [:,\ ] '{print $2,$7}' > $OUTPUT_FILE
}
# write the disk size to stdout in json format
@@ -35,7 +35,7 @@ output_disk_size()
run_block_size()
{
echo -n "" > $OUTPUT_FILE
- blkdevices=`fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2}'`
+ blkdevices=`fdisk -l | grep '^Disk.*bytes' | awk -F [:,\ ] '{print $2}'`
blkdevices=($blkdevices)
for bd in "${blkdevices[@]}";do
blk_size=`blockdev --getbsz $bd`
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index fe394fd4d..ec683e94b 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -59,6 +59,7 @@ if not SERVER_IP:
# dir
CONF_DIR = get_param('dir.conf', '/etc/yardstick')
+IMAGE_DIR = get_param('dir.images', '/home/opnfv/images/')
REPOS_DIR = get_param('dir.repos', '/home/opnfv/repos/yardstick')
RELENG_DIR = get_param('dir.releng', '/home/opnfv/repos/releng')
LOG_DIR = get_param('dir.log', '/tmp/yardstick/')
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 540d8d641..c862a6ba2 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -11,6 +11,7 @@ from __future__ import absolute_import
import os
import time
+import sys
import logging
from keystoneauth1 import loading
@@ -423,6 +424,15 @@ def delete_flavor(flavor_id): # pragma: no cover
return True
+def delete_keypair(nova_client, key): # pragma: no cover
+ try:
+ nova_client.keypairs.delete(key=key)
+ return True
+ except Exception:
+ log.exception("Error [delete_keypair(nova_client)]")
+ return False
+
+
# *********************************************
# NEUTRON
# *********************************************
@@ -437,6 +447,36 @@ def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
'fixed_ips') if j['ip_address'] == ip_address), None)
+def create_neutron_net(neutron_client, json_body): # pragma: no cover
+ try:
+ network = neutron_client.create_network(body=json_body)
+ return network['network']['id']
+ except Exception:
+ log.error("Error [create_neutron_net(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
+def create_neutron_subnet(neutron_client, json_body): # pragma: no cover
+ try:
+ subnet = neutron_client.create_subnet(body=json_body)
+ return subnet['subnets'][0]['id']
+ except Exception:
+ log.error("Error [create_neutron_subnet")
+ raise Exception("operation error")
+ return None
+
+
+def create_neutron_router(neutron_client, json_body): # pragma: no cover
+ try:
+ router = neutron_client.create_router(json_body)
+ return router['router']['id']
+ except Exception:
+ log.error("Error [create_neutron_router(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
def create_floating_ip(neutron_client, extnet_id): # pragma: no cover
props = {'floating_network_id': extnet_id}
try:
@@ -449,6 +489,129 @@ def create_floating_ip(neutron_client, extnet_id): # pragma: no cover
return {'fip_addr': fip_addr, 'fip_id': fip_id}
+def delete_floating_ip(nova_client, floatingip_id): # pragma: no cover
+ try:
+ nova_client.floating_ips.delete(floatingip_id)
+ return True
+ except Exception:
+ log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+ return False
+
+
+def get_security_groups(neutron_client): # pragma: no cover
+ try:
+ security_groups = neutron_client.list_security_groups()[
+ 'security_groups']
+ return security_groups
+ except Exception:
+ log.error("Error [get_security_groups(neutron_client)]")
+ return None
+
+
+def get_security_group_id(neutron_client, sg_name): # pragma: no cover
+ security_groups = get_security_groups(neutron_client)
+ id = ''
+ for sg in security_groups:
+ if sg['name'] == sg_name:
+ id = sg['id']
+ break
+ return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description): # pragma: no cover
+ json_body = {'security_group': {'name': sg_name,
+ 'description': sg_description}}
+ try:
+ secgroup = neutron_client.create_security_group(json_body)
+ return secgroup['security_group']
+ except Exception:
+ log.error("Error [create_security_group(neutron_client, '%s', "
+ "'%s')]" % (sg_name, sg_description))
+ return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+ port_range_min=None, port_range_max=None,
+ **json_body): # pragma: no cover
+ # We create a security group in 2 steps
+ # 1 - we check the format and set the json body accordingly
+ # 2 - we call neturon client to create the security group
+
+ # Format check
+ json_body.update({'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id, 'protocol': protocol}})
+ # parameters may be
+ # - both None => we do nothing
+ # - both Not None => we add them to the json description
+ # but one cannot be None is the other is not None
+ if (port_range_min is not None and port_range_max is not None):
+ # add port_range in json description
+ json_body['security_group_rule']['port_range_min'] = port_range_min
+ json_body['security_group_rule']['port_range_max'] = port_range_max
+ log.debug("Security_group format set (port range included)")
+ else:
+ # either both port range are set to None => do nothing
+ # or one is set but not the other => log it and return False
+ if port_range_min is None and port_range_max is None:
+ log.debug("Security_group format set (no port range mentioned)")
+ else:
+ log.error("Bad security group format."
+ "One of the port range is not properly set:"
+ "range min: {},"
+ "range max: {}".format(port_range_min,
+ port_range_max))
+ return False
+
+ # Create security group using neutron client
+ try:
+ neutron_client.create_security_group_rule(json_body)
+ return True
+ except Exception:
+ log.exception("Impossible to create_security_group_rule,"
+ "security group rule probably already exists")
+ return False
+
+
+def create_security_group_full(neutron_client,
+ sg_name, sg_description): # pragma: no cover
+ sg_id = get_security_group_id(neutron_client, sg_name)
+ if sg_id != '':
+ log.info("Using existing security group '%s'..." % sg_name)
+ else:
+ log.info("Creating security group '%s'..." % sg_name)
+ SECGROUP = create_security_group(neutron_client,
+ sg_name,
+ sg_description)
+ if not SECGROUP:
+ log.error("Failed to create the security group...")
+ return None
+
+ sg_id = SECGROUP['id']
+
+ log.debug("Security group '%s' with ID=%s created successfully."
+ % (SECGROUP['name'], sg_id))
+
+ log.debug("Adding ICMP rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(neutron_client, sg_id,
+ 'ingress', 'icmp'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ log.debug("Adding SSH rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+ return sg_id
+
+
# *********************************************
# GLANCE
# *********************************************
@@ -523,3 +686,33 @@ def create_volume(cinder_client, volume_name, volume_size,
log.exception("Error [create_volume(cinder_client, %s)]",
(volume_name, volume_size))
return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cover
+ try:
+ if forced:
+ try:
+ cinder_client.volumes.detach(volume_id)
+ except:
+ log.error(sys.exc_info()[0])
+ cinder_client.volumes.force_delete(volume_id)
+ else:
+ while True:
+ volume = get_cinder_client().volumes.get(volume_id)
+ if volume.status.lower() == 'available':
+ break
+ cinder_client.volumes.delete(volume_id)
+ return True
+ except Exception:
+ log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+ return False
+
+
+def detach_volume(server_id, volume_id): # pragma: no cover
+ try:
+ get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+ return True
+ except Exception:
+ log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
+ server_id, volume_id)
+ return False
diff --git a/yardstick/network_services/helpers/cpu.py b/yardstick/network_services/helpers/cpu.py
index a5ba6c31e..8c21754ff 100644
--- a/yardstick/network_services/helpers/cpu.py
+++ b/yardstick/network_services/helpers/cpu.py
@@ -13,6 +13,9 @@
# limitations under the License.
+import io
+
+
class CpuSysCores(object):
def __init__(self, connection=""):
@@ -20,8 +23,9 @@ class CpuSysCores(object):
self.connection = connection
def _open_cpuinfo(self):
- lines = []
- lines = self.connection.execute("cat /proc/cpuinfo")[1].split(u'\n')
+ cpuinfo = io.BytesIO()
+ self.connection.get_file_obj("/proc/cpuinfo", cpuinfo)
+ lines = cpuinfo.getvalue().decode('utf-8').splitlines()
return lines
def _get_core_details(self, lines):
diff --git a/yardstick/network_services/helpers/samplevnf_helper.py b/yardstick/network_services/helpers/samplevnf_helper.py
index dbaa47c19..ae5451020 100644
--- a/yardstick/network_services/helpers/samplevnf_helper.py
+++ b/yardstick/network_services/helpers/samplevnf_helper.py
@@ -36,13 +36,13 @@ link {0} up
ACTION_TEMPLATE = """\
p action add {0} accept
-p action add {0} fwd
+p action add {0} fwd {0}
p action add {0} count
"""
FW_ACTION_TEMPLATE = """\
p action add {0} accept
-p action add {0} fwd
+p action add {0} fwd {0}
p action add {0} count
p action add {0} conntrack
"""
@@ -87,9 +87,18 @@ class MultiPortConfig(object):
return default
@staticmethod
- def make_ip_addr(ip, mask_len):
+ def make_ip_addr(ip, mask):
+ """
+ :param ip: ip adddress
+ :type ip: str
+ :param mask: /24 prefix of 255.255.255.0 netmask
+ :type mask: str
+ :return: interface
+ :rtype: IPv4Interface
+ """
+
try:
- return ipaddress.ip_interface(six.text_type('/'.join([ip, mask_len])))
+ return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
except (TypeError, ValueError):
# None so we can skip later
return None
@@ -213,7 +222,7 @@ class MultiPortConfig(object):
return
try:
- self.start_core = 'h{}'.format(int(self.start_core))
+ self.start_core = '{}h'.format(int(self.start_core))
except ValueError:
self.start_core = int(self.start_core[:-1]) + 1
@@ -279,18 +288,19 @@ class MultiPortConfig(object):
for port in port_pair:
port_num = int(port[-1])
interface = self.interfaces[port_num]
- # port0_ip = ipaddress.ip_interface(six.text_type(
- # "%s/%s" % (interface["virtual-interface"]["local_ip"],
- # interface["virtual-interface"]["netmask"])))
+ # We must use the dst because we are on the VNF and we need to
+ # reach the TG.
dst_port0_ip = \
ipaddress.ip_interface(six.text_type(
"%s/%s" % (interface["virtual-interface"]["dst_ip"],
interface["virtual-interface"]["netmask"])))
arp_vars = {
- "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
+ "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.network.network_address.exploded),
"port0_netmask_hex": ip_to_hex(dst_port0_ip.network.netmask.exploded),
+ # this is the port num that contains port0 subnet and next_hop_ip_hex
"port_num": port_num,
# next hop is dst in this case
+ # must be within subnet
"next_hop_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
}
arp_config.append(arp_route_tbl_tmpl.format(**arp_vars))
@@ -302,20 +312,25 @@ class MultiPortConfig(object):
self.swq += self.lb_count
swq_out_str = self.make_range_str('SWQ{}', self.swq, offset=self.lb_count)
self.swq += self.lb_count
- mac_iter = (self.interfaces[int(x[-1])]['virtual-interface']['local_mac']
- for port_pair in self.port_pair_list for x in port_pair)
+ # ports_mac_list is disabled for some reason
+ # mac_iter = (self.interfaces[int(x[-1])]['virtual-interface']['local_mac']
+ # for port_pair in self.port_pair_list for x in port_pair)
pktq_in_iter = ('RXQ{}'.format(float(x[0][-1])) for x in self.port_pair_list)
arpicmp_data = {
'core': self.gen_core(self.start_core),
'pktq_in': swq_in_str,
'pktq_out': swq_out_str,
- 'ports_mac_list': ' '.join(mac_iter),
+ # we need to disable ports_mac_list?
+ # it looks like ports_mac_list is no longer required
+ # 'ports_mac_list': ' '.join(mac_iter),
'pktq_in_prv': ' '.join(pktq_in_iter),
'prv_to_pub_map': self.set_priv_to_pub_mapping(),
'arp_route_tbl': self.generate_arp_route_tbl(),
- # can't use empty string, defaul to ()
- 'nd_route_tbl': "()",
+ # nd_route_tbl must be set or we get segault on random OpenStack IPv6 traffic
+ # 'nd_route_tbl': "(0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)"
+ # safe default? route discard prefix to localhost
+ 'nd_route_tbl': "(0100::,64,0,::1)"
}
self.pktq_out_os = swq_out_str.split(' ')
# why?
@@ -520,12 +535,13 @@ class MultiPortConfig(object):
arp_config = []
for port_pair in self.port_pair_list:
for port in port_pair:
- gateway = self.get_ports_gateway(port)
- # omit entries with no gateway
- if not gateway:
- continue
+ # ignore gateway, always use TG IP
+ # gateway = self.get_ports_gateway(port)
dst_mac = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
- arp_config.append((port[-1], gateway, dst_mac, self.txrx_pipeline))
+ dst_ip = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
+ # arp_config.append((port[-1], gateway, dst_mac, self.txrx_pipeline))
+ # so dst_mac is the TG dest mac, so we need TG dest IP.
+ arp_config.append((port[-1], dst_ip, dst_mac, self.txrx_pipeline))
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config))
@@ -533,12 +549,12 @@ class MultiPortConfig(object):
arp_config6 = []
for port_pair in self.port_pair_list:
for port in port_pair:
- gateway6 = self.get_ports_gateway6(port)
- # omit entries with no gateway
- if not gateway6:
- continue
+ # ignore gateway, always use TG IP
+ # gateway6 = self.get_ports_gateway6(port)
dst_mac6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
- arp_config6.append((port[-1], gateway6, dst_mac6, self.txrx_pipeline))
+ dst_ip6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
+ # arp_config6.append((port[-1], gateway6, dst_mac6, self.txrx_pipeline))
+ arp_config6.append((port[-1], dst_ip6, dst_mac6, self.txrx_pipeline))
return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config6))
@@ -556,13 +572,17 @@ class MultiPortConfig(object):
return ''.join((template.format(port) for port in port_list))
def get_ip_from_port(self, port):
- return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+ # we can't use gateway because in OpenStack gateways interfer with floating ip routing
+ # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+ ip = self.interfaces[port]["virtual-interface"]["local_ip"]
+ netmask = self.interfaces[port]["virtual-interface"]["netmask"]
+ return self.make_ip_addr(ip, netmask)
- def get_ip_and_prefixlen_from_ip_of_port(self, port):
+ def get_network_and_prefixlen_from_ip_of_port(self, port):
ip_addr = self.get_ip_from_port(port)
# handle cases with no gateway
if ip_addr:
- return ip_addr.ip.exploded, ip_addr.network.prefixlen
+ return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
else:
return None, None
@@ -576,25 +596,25 @@ class MultiPortConfig(object):
src_port = int(port_pair[0][-1])
dst_port = int(port_pair[1][-1])
- src_ip, src_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(port_pair[0])
- dst_ip, dst_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(port_pair[1])
- # ignore entires with empty values
- if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
- new_rules.append((cmd, self.txrx_pipeline, src_ip, src_prefix_len,
- dst_ip, dst_prefix_len, dst_port))
- new_rules.append((cmd, self.txrx_pipeline, dst_ip, dst_prefix_len,
- src_ip, src_prefix_len, src_port))
-
- src_ip = self.get_ports_gateway6(port_pair[0])
- src_prefix_len = self.get_netmask_gateway6(port_pair[0])
- dst_ip = self.get_ports_gateway6(port_pair[1])
- dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
+ src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_port)
+ dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_port)
# ignore entires with empty values
- if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
- new_ipv6_rules.append((cmd, self.txrx_pipeline, src_ip, src_prefix_len,
- dst_ip, dst_prefix_len, dst_port))
- new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_ip, dst_prefix_len,
- src_ip, src_prefix_len, src_port))
+ if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+ new_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
+ dst_net, dst_prefix_len, dst_port))
+ new_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
+ src_net, src_prefix_len, src_port))
+
+ # src_net = self.get_ports_gateway6(port_pair[0])
+ # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
+ # dst_net = self.get_ports_gateway6(port_pair[1])
+ # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
+ # # ignore entires with empty values
+ # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+ # new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
+ # dst_net, dst_prefix_len, dst_port))
+ # new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
+ # src_net, src_prefix_len, src_port))
acl_apply = "\np %s applyruleset" % cmd
new_rules_config = '\n'.join(pattern.format(*values) for values
@@ -607,7 +627,9 @@ class MultiPortConfig(object):
script_data = {
'link_config': self.generate_link_config(),
'arp_config': self.generate_arp_config(),
- 'arp_config6': self.generate_arp_config6(),
+ # disable IPv6 for now
+ # 'arp_config6': self.generate_arp_config6(),
+ 'arp_config6': "",
'actions': '',
'rules': '',
}
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index 2fb4a8e8e..48bcd3118 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -27,7 +27,7 @@ from oslo_config import cfg
from yardstick import ssh
from yardstick.network_services.nfvi.collectd import AmqpConsumer
-from yardstick.network_services.utils import provision_tool
+from yardstick.network_services.utils import get_nsb_option
LOG = logging.getLogger(__name__)
@@ -196,10 +196,21 @@ class ResourceProfile(object):
self._provide_config_file(bin_path, 'collectd.conf', kwargs)
def _start_collectd(self, connection, bin_path):
- LOG.debug("Starting collectd to collect NFVi stats")
connection.execute('sudo pkill -9 collectd')
- collectd = os.path.join(bin_path, "collectd.sh")
- provision_tool(connection, collectd)
+ bin_path = get_nsb_option("bin_path")
+ collectd_path = os.path.join(bin_path, "collectd", "collectd")
+ exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
+ if exit_status != 0:
+ LOG.warning("%s is not present disabling", collectd_path)
+ # disable auto-provisioning because it requires Internet access
+ # collectd_installer = os.path.join(bin_path, "collectd.sh")
+ # provision_tool(connection, collectd)
+ # http_proxy = os.environ.get('http_proxy', '')
+ # https_proxy = os.environ.get('https_proxy', '')
+ # connection.execute("sudo %s '%s' '%s'" % (
+ # collectd_installer, http_proxy, https_proxy))
+ return
+ LOG.debug("Starting collectd to collect NFVi stats")
self._prepare_collectd_conf(bin_path)
# Reset amqp queue
@@ -211,15 +222,8 @@ class ResourceProfile(object):
connection.execute("sudo rabbitmqctl start_app")
connection.execute("sudo service rabbitmq-server restart")
- # Run collectd
-
- http_proxy = os.environ.get('http_proxy', '')
- https_proxy = os.environ.get('https_proxy', '')
- connection.execute("sudo %s '%s' '%s'" %
- (collectd, http_proxy, https_proxy))
LOG.debug("Start collectd service.....")
- connection.execute(
- "sudo %s" % os.path.join(bin_path, "collectd", "collectd"))
+ connection.execute("sudo %s" % collectd_path)
LOG.debug("Done")
def initiate_systemagent(self, bin_path):
diff --git a/yardstick/network_services/traffic_profile/fixed.py b/yardstick/network_services/traffic_profile/fixed.py
index ebc1e61f2..b7cd03773 100644
--- a/yardstick/network_services/traffic_profile/fixed.py
+++ b/yardstick/network_services/traffic_profile/fixed.py
@@ -16,10 +16,10 @@
from __future__ import absolute_import
from yardstick.network_services.traffic_profile.base import TrafficProfile
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from stl.trex_stl_lib import api as Pkt
+from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from trex_stl_lib import api as Pkt
class FixedProfile(TrafficProfile):
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index b07bc9d5a..a3b803673 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -17,9 +17,9 @@ from __future__ import absolute_import
from __future__ import division
import logging
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
+from trex_stl_lib.trex_stl_streams import STLTXCont
from yardstick.network_services.traffic_profile.traffic_profile \
import TrexProfile
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/traffic_profile.py
index 3e1f8d89f..7bbe89268 100644
--- a/yardstick/network_services/traffic_profile/traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/traffic_profile.py
@@ -21,16 +21,17 @@ from random import SystemRandom
import six
from yardstick.network_services.traffic_profile.base import TrafficProfile
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
-from stl.trex_stl_lib.trex_stl_streams import STLProfile
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
-from stl.trex_stl_lib import api as Pkt
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
+from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_streams import STLProfile
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
+from trex_stl_lib import api as Pkt
class TrexProfile(TrafficProfile):
@@ -132,7 +133,7 @@ class TrexProfile(TrafficProfile):
pkt_offset='Ether.dst')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
- def set_src_ip4(self, src_ip4):
+ def set_src_ip4(self, src_ip4, count=1):
""" set source ipv4 address fields """
src_ips = src_ip4.split('-')
min_value = src_ips[0]
@@ -141,12 +142,12 @@ class TrexProfile(TrafficProfile):
src_ip4 = min_value
self._set_ip_fields(src=src_ip4)
else:
- stl_vm_flow_var = STLVmFlowVar(name="ip4_src",
- min_value=min_value,
- max_value=max_value,
- size=4,
- op='random',
- step=1)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_src",
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_src',
pkt_offset='IP.src')
@@ -154,7 +155,7 @@ class TrexProfile(TrafficProfile):
stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
self.vm_flow_vars.append(stl_vm_fix_ipv4)
- def set_dst_ip4(self, dst_ip4):
+ def set_dst_ip4(self, dst_ip4, count=1):
""" set destination ipv4 address fields """
dst_ips = dst_ip4.split('-')
min_value = dst_ips[0]
@@ -163,12 +164,12 @@ class TrexProfile(TrafficProfile):
dst_ip4 = min_value
self._set_ip_fields(dst=dst_ip4)
else:
- stl_vm_flow_var = STLVmFlowVar(name="dst_ip4",
- min_value=min_value,
- max_value=max_value,
- size=4,
- op='random',
- step=1)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="dst_ip4",
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dst_ip4',
pkt_offset='IP.dst')
@@ -240,7 +241,7 @@ class TrexProfile(TrafficProfile):
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
- def set_src_port(self, src_port):
+ def set_src_port(self, src_port, count=1):
""" set packet source port """
src_ports = str(src_port).split('-')
min_value = int(src_ports[0])
@@ -250,18 +251,18 @@ class TrexProfile(TrafficProfile):
self._set_udp_fields(sport=src_port)
else:
max_value = int(src_ports[1])
- stl_vm_flow_var = STLVmFlowVar(name="port_src",
- min_value=min_value,
- max_value=max_value,
- size=2,
- op='random',
- step=1)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_src",
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_src',
pkt_offset=self.udp_sport)
self.vm_flow_vars.append(stl_vm_wr_flow_var)
- def set_dst_port(self, dst_port):
+ def set_dst_port(self, dst_port, count=1):
""" set packet destnation port """
dst_ports = str(dst_port).split('-')
min_value = int(dst_ports[0])
@@ -271,12 +272,13 @@ class TrexProfile(TrafficProfile):
self._set_udp_fields(dport=dst_port)
else:
max_value = int(dst_ports[1])
- stl_vm_flow_var = STLVmFlowVar(name="port_dst",
- min_value=min_value,
- max_value=max_value,
- size=2,
- op='random',
- step=1)
+ stl_vm_flow_var = \
+ STLVmFlowVarRepeatableRandom(name="port_dst",
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_dst',
pkt_offset=self.udp_dport)
@@ -335,9 +337,9 @@ class TrexProfile(TrafficProfile):
if 'dscp' in outer_l3v4:
self.set_dscp(outer_l3v4['dscp'])
if 'srcip4' in outer_l3v4:
- self.set_src_ip4(outer_l3v4['srcip4'])
+ self.set_src_ip4(outer_l3v4['srcip4'], outer_l3v4['count'])
if 'dstip4' in outer_l3v4:
- self.set_dst_ip4(outer_l3v4['dstip4'])
+ self.set_dst_ip4(outer_l3v4['dstip4'], outer_l3v4['count'])
def set_outer_l3v6_fields(self, outer_l3v6):
""" setup outer l3v6 fields from traffic profile """
@@ -367,9 +369,9 @@ class TrexProfile(TrafficProfile):
def set_outer_l4_fields(self, outer_l4):
""" setup outer l4 fields from traffic profile """
if 'srcport' in outer_l4:
- self.set_src_port(outer_l4['srcport'])
+ self.set_src_port(outer_l4['srcport'], outer_l4['count'])
if 'dstport' in outer_l4:
- self.set_dst_port(outer_l4['dstport'])
+ self.set_dst_port(outer_l4['dstport'], outer_l4['count'])
def generate_imix_data(self, packet_definition):
""" generate packet size for a given traffic profile """
diff --git a/yardstick/network_services/utils.py b/yardstick/network_services/utils.py
index 0264bbc1c..d52e27c15 100644
--- a/yardstick/network_services/utils.py
+++ b/yardstick/network_services/utils.py
@@ -30,7 +30,10 @@ OPTS = [
help='bin_path for VNFs location.'),
cfg.StrOpt('trex_path',
default=os.path.join(NSB_ROOT, 'trex/scripts'),
- help='trex automation lib pathh.'),
+ help='trex automation lib path.'),
+ cfg.StrOpt('trex_client_lib',
+ default=os.path.join(NSB_ROOT, 'trex_client/stl'),
+ help='trex python library path.'),
]
CONF.register_opts(OPTS, group="nsb")
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 0434f6aef..7a756837e 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -37,9 +37,9 @@ from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.utils import get_nsb_option
-from stl.trex_stl_lib.trex_stl_client import STLClient
-from stl.trex_stl_lib.trex_stl_client import LoggerApi
-from stl.trex_stl_lib.trex_stl_exceptions import STLError
+from trex_stl_lib.trex_stl_client import STLClient
+from trex_stl_lib.trex_stl_client import LoggerApi
+from trex_stl_lib.trex_stl_exceptions import STLError
from yardstick.ssh import AutoConnectSSH
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index d94a9a6e6..15c9c0e1d 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -98,7 +98,7 @@ class TrexRfcResourceHelper(TrexResourceHelper):
def collect_kpi(self):
self.rfc2544_helper.iteration.value += 1
- super(TrexRfcResourceHelper, self).collect_kpi()
+ return super(TrexRfcResourceHelper, self).collect_kpi()
class TrexTrafficGenRFC(TrexTrafficGen):
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index c21a47473..8c7b1e429 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -265,7 +265,7 @@ name (i.e. %s).\
self.resources[name]['properties']['mountpoint'] = mountpoint
def add_network(self, name, physical_network='physnet1', provider=None,
- segmentation_id=None, port_security_enabled=None):
+ segmentation_id=None, port_security_enabled=None, network_type=None):
"""add to the template a Neutron Net"""
log.debug("adding Neutron::Net '%s'", name)
if provider is None:
@@ -280,12 +280,14 @@ name (i.e. %s).\
'type': 'OS::Neutron::ProviderNet',
'properties': {
'name': name,
- 'network_type': 'vlan',
+ 'network_type': 'flat' if network_type is None else network_type,
'physical_network': physical_network,
},
}
if segmentation_id:
self.resources[name]['properties']['segmentation_id'] = segmentation_id
+ if network_type is None:
+ self.resources[name]['properties']['network_type'] = 'vlan'
# if port security is not defined then don't add to template:
# some deployments don't have port security plugin installed
if port_security_enabled is not None:
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 8ac3eaa3a..a024cf64a 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -423,6 +423,12 @@ class SSH(object):
if mode is not None:
sftp.chmod(remotepath, mode)
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
class AutoConnectSSH(SSH):
@@ -471,6 +477,10 @@ class AutoConnectSSH(SSH):
self._connect()
return super(AutoConnectSSH, self).put_file_obj(file_obj, remote_path, mode)
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
def provision_tool(self, tool_path, tool_file=None):
self._connect()
return super(AutoConnectSSH, self).provision_tool(tool_path, tool_file)