aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO1
-rw-r--r--INFO.yaml84
-rw-r--r--ansible/infra_deploy.yml3
-rw-r--r--ansible/nsb_setup.yml24
-rw-r--r--ansible/roles/create_samplevnfs_image/tasks/main.yml2
-rw-r--r--ansible/roles/infra_create_network/tasks/create_xml.yaml106
-rw-r--r--ansible/roles/infra_create_network/tasks/main.yml22
-rw-r--r--ansible/roles/infra_create_vms/tasks/configure_vm.yml342
-rw-r--r--ansible/roles/infra_create_vms/tasks/create_interfaces.yml65
-rw-r--r--ansible/roles/infra_create_vms/tasks/main.yml28
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml48
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml29
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/main.yml40
-rw-r--r--ansible/roles/install_dpdk/tasks/main.yml8
-rw-r--r--ansible/roles/install_trex/tasks/main.yml3
-rw-r--r--dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json5817
-rw-r--r--docker/Dockerfile2
-rw-r--r--docker/Dockerfile.aarch64.patch2
-rwxr-xr-xdocs/testing/developer/devguide/devguide_nsb_prox.rst1226
-rw-r--r--docs/testing/developer/devguide/images/PROX_BNG_QOS.pngbin0 -> 134443 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Baremetal_config.pngbin0 -> 89189 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.pngbin0 -> 83907 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Gen_GUI.pngbin0 -> 236854 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.pngbin0 -> 105591 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Hardware_Arch.pngbin0 -> 322529 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Openstack_stack_list.pngbin0 -> 11178 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.pngbin0 -> 189101 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.pngbin0 -> 143152 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_SUT_GUI.pngbin0 -> 150147 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Software_Arch.pngbin0 -> 38458 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Test_BM_Script.pngbin0 -> 76705 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.pngbin0 -> 90040 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Traffic_profile.pngbin0 -> 95758 bytes
-rw-r--r--docs/testing/developer/devguide/images/PROX_Yardstick_config.pngbin0 -> 86567 bytes
-rw-r--r--docs/testing/user/userguide/04-installation.rst6
-rw-r--r--docs/testing/user/userguide/15-list-of-tcs.rst3
-rw-r--r--docs/testing/user/userguide/opnfv_yardstick_tc056.rst2
-rw-r--r--etc/infra/infra_deploy.yaml.sample9
-rwxr-xr-xnsb_setup.sh2
-rw-r--r--requirements.txt1
-rw-r--r--samples/storage_bottlenecks.yaml77
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg10
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg16
-rw-r--r--samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml4
-rw-r--r--samples/vnf_samples/nsut/prox/tc_prox_baremetal_lw_aftr-4.yaml2
-rw-r--r--samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml18
-rw-r--r--samples/vnf_samples/traffic_profiles/prox_binsearch.yaml6
-rw-r--r--samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml48
-rw-r--r--samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml48
-rw-r--r--samples/vnf_samples/vnf_descriptors/prox_vnf.yaml (renamed from samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml)0
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml46
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml47
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml (renamed from samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml)0
-rwxr-xr-xsetup.py1
-rw-r--r--test-requirements.txt3
-rwxr-xr-xtests/ci/prepare_env.sh27
-rw-r--r--tests/unit/benchmark/core/test_plugin.py102
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py34
-rw-r--r--tests/unit/benchmark/scenarios/lib/test_delete_network.py36
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py244
-rw-r--r--tests/unit/benchmark/scenarios/test_base.py53
-rw-r--r--tests/unit/network_services/helpers/test_dpdkbindnic_helper.py2
-rw-r--r--tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py58
-rw-r--r--tests/unit/network_services/traffic_profile/test_prox_binsearch.py94
-rw-r--r--tests/unit/network_services/traffic_profile/test_traffic_profile.py237
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py2
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py16
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py26
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py10
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py53
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py11
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py12
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py15
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py7
-rwxr-xr-xtools/virt_ci_rampup.sh2
-rw-r--r--yardstick/benchmark/contexts/heat.py6
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py6
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py23
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py13
-rw-r--r--yardstick/benchmark/core/task.py25
-rwxr-xr-xyardstick/benchmark/runners/base.py24
-rw-r--r--yardstick/benchmark/scenarios/base.py36
-rw-r--r--yardstick/benchmark/scenarios/lib/create_floating_ip.py26
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_network.py9
-rw-r--r--yardstick/common/ansible_common.py5
-rw-r--r--yardstick/common/constants.py11
-rw-r--r--yardstick/common/exceptions.py13
-rw-r--r--yardstick/common/openstack_utils.py193
-rw-r--r--yardstick/dispatcher/influxdb.py34
-rw-r--r--yardstick/network_services/helpers/dpdkbindnic_helper.py24
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py27
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py51
-rw-r--r--yardstick/network_services/traffic_profile/traffic_profile.py92
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py42
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py26
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py18
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py39
-rw-r--r--yardstick/orchestrator/heat.py247
-rwxr-xr-xyardstick/resources/scripts/install/ovs_deploy.bash23
-rw-r--r--yardstick/tests/fixture.py47
-rw-r--r--yardstick/tests/unit/benchmark/__init__.py (renamed from tests/unit/benchmark/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/__init__.py (renamed from tests/unit/benchmark/contexts/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml (renamed from tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml (renamed from tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml (renamed from tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_sample.yaml (renamed from tests/unit/benchmark/contexts/nodes_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_sample_new.yaml (renamed from tests/unit/benchmark/contexts/nodes_sample_new.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml (renamed from tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml (renamed from tests/unit/benchmark/contexts/nodes_sample_ovs.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml (renamed from tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/__init__.py (renamed from tests/unit/benchmark/contexts/standalone/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/nodes_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone/nodes_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py (renamed from tests/unit/benchmark/contexts/standalone/test_model.py)83
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py (renamed from tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py)105
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py (renamed from tests/unit/benchmark/contexts/standalone/test_sriov.py)72
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone_sample.yaml (renamed from tests/unit/benchmark/contexts/standalone_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_dummy.py (renamed from tests/unit/benchmark/contexts/test_dummy.py)0
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py (renamed from tests/unit/benchmark/contexts/test_heat.py)52
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_kubernetes.py (renamed from tests/unit/benchmark/contexts/test_kubernetes.py)121
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_model.py (renamed from tests/unit/benchmark/contexts/test_model.py)2
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_node.py (renamed from tests/unit/benchmark/contexts/test_node.py)5
-rw-r--r--yardstick/tests/unit/benchmark/core/__init__.py (renamed from tests/unit/benchmark/core/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/core/test_plugin.py142
-rw-r--r--yardstick/tests/unit/benchmark/core/test_report.py (renamed from tests/unit/benchmark/core/test_report.py)4
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py (renamed from tests/unit/benchmark/core/test_task.py)139
-rw-r--r--yardstick/tests/unit/benchmark/core/test_testcase.py (renamed from tests/unit/benchmark/core/test_testcase.py)0
-rw-r--r--yardstick/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml (renamed from tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml (renamed from tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/runner/__init__.py (renamed from tests/unit/benchmark/runner/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_base.py (renamed from tests/unit/benchmark/runner/test_base.py)53
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_search.py (renamed from tests/unit/benchmark/runner/test_search.py)31
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/__init__.py (renamed from tests/unit/benchmark/scenarios/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/__init__.py (renamed from tests/unit/benchmark/scenarios/availability/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py (renamed from tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py)8
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_general.py (renamed from tests/unit/benchmark/scenarios/availability/test_attacker_general.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_process.py (renamed from tests/unit/benchmark/scenarios/availability/test_attacker_process.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py (renamed from tests/unit/benchmark/scenarios/availability/test_basemonitor.py)28
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_baseoperation.py (renamed from tests/unit/benchmark/scenarios/availability/test_baseoperation.py)38
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py (renamed from tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py)54
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_director.py (renamed from tests/unit/benchmark/scenarios/availability/test_director.py)4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_command.py (renamed from tests/unit/benchmark/scenarios/availability/test_monitor_command.py)55
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_general.py (renamed from tests/unit/benchmark/scenarios/availability/test_monitor_general.py)4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py (renamed from tests/unit/benchmark/scenarios/availability/test_monitor_multi.py)12
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py (renamed from tests/unit/benchmark/scenarios/availability/test_monitor_process.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_operation_general.py (renamed from tests/unit/benchmark/scenarios/availability/test_operation_general.py)4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py (renamed from tests/unit/benchmark/scenarios/availability/test_result_checker_general.py)4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py (renamed from tests/unit/benchmark/scenarios/availability/test_scenario_general.py)52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py (renamed from tests/unit/benchmark/scenarios/availability/test_serviceha.py)35
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_util.py (renamed from tests/unit/benchmark/scenarios/availability/test_util.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/__init__.py (renamed from tests/unit/benchmark/scenarios/compute/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt (renamed from tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt (renamed from tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt (renamed from tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt (renamed from tests/unit/benchmark/scenarios/compute/memload_sample_output.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py (renamed from tests/unit/benchmark/scenarios/compute/test_cachestat.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py (renamed from tests/unit/benchmark/scenarios/compute/test_computecapacity.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py (renamed from tests/unit/benchmark/scenarios/compute/test_cpuload.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py (renamed from tests/unit/benchmark/scenarios/compute/test_cyclictest.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py (renamed from tests/unit/benchmark/scenarios/compute/test_lmbench.py)4
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py (renamed from tests/unit/benchmark/scenarios/compute/test_memload.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py (renamed from tests/unit/benchmark/scenarios/compute/test_plugintest.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py (renamed from tests/unit/benchmark/scenarios/compute/test_qemumigrate.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py (renamed from tests/unit/benchmark/scenarios/compute/test_ramspeed.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py (renamed from tests/unit/benchmark/scenarios/compute/test_spec_cpu.py)5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py (renamed from tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py (renamed from tests/unit/benchmark/scenarios/compute/test_unixbench.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/dummy/__init__.py (renamed from tests/unit/benchmark/scenarios/dummy/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/dummy/test_dummy.py (renamed from tests/unit/benchmark/scenarios/dummy/test_dummy.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/__init__.py (renamed from tests/unit/benchmark/scenarios/lib/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_add_memory_load.py (renamed from tests/unit/benchmark/scenarios/lib/test_add_memory_load.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py (renamed from tests/unit/benchmark/scenarios/lib/test_attach_volume.py)5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_connectivity.py (renamed from tests/unit/benchmark/scenarios/lib/test_check_connectivity.py)14
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_numa_info.py (renamed from tests/unit/benchmark/scenarios/lib/test_check_numa_info.py)7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py (renamed from tests/unit/benchmark/scenarios/lib/test_check_value.py)24
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_flavor.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_flavor.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py58
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_image.py)30
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_keypair.py)17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_network.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_network.py)9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_port.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_port.py)7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_router.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_router.py)9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_sec_group.py)9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_server.py)8
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_subnet.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_subnet.py)13
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py (renamed from tests/unit/benchmark/scenarios/lib/test_create_volume.py)41
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_flavor.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_flavor.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_image.py)3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_keypair.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py49
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_port.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_port.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_router.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_server.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py (renamed from tests/unit/benchmark/scenarios/lib/test_delete_volume.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py (renamed from tests/unit/benchmark/scenarios/lib/test_detach_volume.py)1
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py (renamed from tests/unit/benchmark/scenarios/lib/test_get_flavor.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py (renamed from tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_numa_info.py (renamed from tests/unit/benchmark/scenarios/lib/test_get_numa_info.py)5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py (renamed from tests/unit/benchmark/scenarios/lib/test_get_server.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_server_ip.py (renamed from tests/unit/benchmark/scenarios/lib/test_get_server_ip.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/__init__.py (renamed from tests/unit/benchmark/scenarios/networking/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/imix_voice.yaml (renamed from tests/unit/benchmark/scenarios/networking/imix_voice.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json (renamed from tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json (renamed from tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml (renamed from tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml (renamed from tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml)0
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json (renamed from tests/unit/benchmark/scenarios/networking/netperf_sample_output.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt (renamed from tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt (renamed from tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py (renamed from tests/unit/benchmark/scenarios/networking/test_iperf3.py)0
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py (renamed from tests/unit/benchmark/scenarios/networking/test_netperf.py)0
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py (renamed from tests/unit/benchmark/scenarios/networking/test_netperf_node.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_netutilization.py (renamed from tests/unit/benchmark/scenarios/networking/test_netutilization.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py (renamed from tests/unit/benchmark/scenarios/networking/test_networkcapacity.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_nstat.py (renamed from tests/unit/benchmark/scenarios/networking/test_nstat.py)9
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py (renamed from tests/unit/benchmark/scenarios/networking/test_ping.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py (renamed from tests/unit/benchmark/scenarios/networking/test_ping6.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py (renamed from tests/unit/benchmark/scenarios/networking/test_pktgen.py)149
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py (renamed from tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py)8
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py (renamed from tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py)6
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_sfc.py (renamed from tests/unit/benchmark/scenarios/networking/test_sfc.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py (renamed from tests/unit/benchmark/scenarios/networking/test_vnf_generic.py)63
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py (renamed from tests/unit/benchmark/scenarios/networking/test_vsperf.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py221
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml (renamed from tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml (renamed from tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/parser/__init__.py (renamed from tests/unit/benchmark/scenarios/parser/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/parser/test_parser.py (renamed from tests/unit/benchmark/scenarios/parser/test_parser.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/__init__.py (renamed from tests/unit/benchmark/scenarios/storage/__init__.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json (renamed from tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json (renamed from tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json (renamed from tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_bonnie.py (renamed from tests/unit/benchmark/scenarios/storage/test_bonnie.py)3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py (renamed from tests/unit/benchmark/scenarios/storage/test_fio.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py (renamed from tests/unit/benchmark/scenarios/storage/test_storagecapacity.py)0
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py (renamed from tests/unit/benchmark/scenarios/storage/test_storperf.py)18
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/test_base.py106
-rw-r--r--yardstick/tests/unit/common/test_openstack_utils.py57
-rw-r--r--yardstick/tests/unit/orchestrator/test_heat.py584
250 files changed, 10393 insertions, 2576 deletions
diff --git a/INFO b/INFO
index 35b282845..1a49af295 100644
--- a/INFO
+++ b/INFO
@@ -22,6 +22,7 @@ ross.b.brattain@intel.com
chenjiankun1@huawei.com
rodolfo.alonso.hernandez@intel.com
emma.l.foley@intel.com
+abhijit.sinha@intel.com
Link to TSC approval: http://meetbot.opnfv.org/meetings/
Link to approval of additional submitters:
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 000000000..730cd4a6b
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,84 @@
+---
+project: 'Test framework for verifying infrastructure compliance (yardstick)'
+project_creation_date: 'April 28th, 2015'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_yardstick_ptl
+ name: 'Ross Brattain'
+ email: 'ross.b.brattain@intel.com'
+ id: 'rbbratta'
+ company: 'intel.com'
+ timezone: 'PST'
+primary_contact: *opnfv_yardstick_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/Yardstick'
+ key: 'Yardstick'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[yardstick]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-yardstick'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: 'https://wiki.opnfv.org/display/yardstick/Yardstick+Meetings'
+ url: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-yardstick'
+ repeats: 'weekly'
+ time: '08:30 UTC'
+repositories:
+ - 'yardstick'
+committers:
+ - <<: *opnfv_yardstick_ptl
+ - name: 'Jörgen Karlsson'
+ email: 'jorgen.w.karlsson@ericsson.com'
+ company: 'ericsson.com'
+ id: 'jnon'
+ - name: 'Kubi'
+ email: 'jean.gaoliang@huawei.com'
+ company: 'huawei.com'
+ id: 'kubi'
+ - name: 'Rex Lee'
+ email: 'limingjiang@huawei.com'
+ company: 'huawei.com'
+ id: 'rexlee8776'
+ - name: 'Jing Lu'
+ email: 'lvjing5@huawei.com'
+ company: 'huawei.com'
+ id: 'JingLu5'
+ - name: 'zhihui wu'
+ email: 'wu.zhihui1@zte.com.cn'
+ company: 'zte.com.cn'
+ id: 'wu.zhihui'
+ - name: 'Trevor Cooper'
+ email: 'trevor.cooper@intel.com'
+ company: 'intel.com'
+ id: 'trev'
+ - name: 'Jack Chan'
+ email: 'chenjiankun1@huawei.com'
+ company: 'huawei.com'
+ id: 'chenjiankun'
+ - name: 'Emma Foley'
+ email: 'emma.l.foley@intel.com'
+ company: 'intel.com'
+ id: 'elfoley'
+ - name: 'Rodolfo Alonso Hernandez'
+ email: 'rodolfo.alonso.hernandez@intel.com'
+ company: 'intel.com'
+ id: 'rodolfo.ah'
+ - name: 'Kanglin Yin'
+ email: '14_ykl@tongji.edu.cn'
+ company: 'tongji.edu.cn'
+ id: 'tjuyinkanglin'
+ - name: 'Abhijit Sinha'
+ email: 'abhijit.sinha@intel.com'
+ company: 'intel.com'
+ id: 'abhijitsinha'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/'
+ # yamllint enable rule:line-length
diff --git a/ansible/infra_deploy.yml b/ansible/infra_deploy.yml
index 10f53fbad..4ad21af00 100644
--- a/ansible/infra_deploy.yml
+++ b/ansible/infra_deploy.yml
@@ -16,3 +16,6 @@
roles:
- infra_check_requirements
+ - infra_destroy_previous_configuration
+ - infra_create_network
+ - infra_create_vms
diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml
index bfe5d2349..98a59f984 100644
--- a/ansible/nsb_setup.yml
+++ b/ansible/nsb_setup.yml
@@ -12,18 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-#- name: Prepare baremetal machine
-# include: ubuntu_server_baremetal_deploy_samplevnfs.yml
-# vars:
-# YARD_IMG_ARCH: amd64
-#
-#- name: Install jumphost dependencies and configure docker
-# hosts: jumphost
-# environment:
-# "{{ proxy_env }}"
-# roles:
-# - install_dependencies
-# - docker
+- name: Prepare baremetal machine
+ include: ubuntu_server_baremetal_deploy_samplevnfs.yml
+ vars:
+ YARD_IMG_ARCH: amd64
+
+- name: Install jumphost dependencies and configure docker
+ hosts: jumphost
+ environment:
+ "{{ proxy_env }}"
+ roles:
+ - install_dependencies
+ - docker
- name: "handle all openstack stuff when: openrc_file is defined"
include: prepare_openstack.yml
diff --git a/ansible/roles/create_samplevnfs_image/tasks/main.yml b/ansible/roles/create_samplevnfs_image/tasks/main.yml
index c83cccab5..ab7371a12 100644
--- a/ansible/roles/create_samplevnfs_image/tasks/main.yml
+++ b/ansible/roles/create_samplevnfs_image/tasks/main.yml
@@ -19,6 +19,6 @@
is_public: yes
disk_format: qcow2
container_format: bare
- filename: "{{ raw_imgfile }}"
+ filename: "{{ imgfile }}"
properties:
hw_vif_multiqueue_enabled: true
diff --git a/ansible/roles/infra_create_network/tasks/create_xml.yaml b/ansible/roles/infra_create_network/tasks/create_xml.yaml
new file mode 100644
index 000000000..a01c7974a
--- /dev/null
+++ b/ansible/roles/infra_create_network/tasks/create_xml.yaml
@@ -0,0 +1,106 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define XML file name
+ set_fact:
+ xml_file: "{{ '/tmp/'+item.name+'.xml' }}"
+
+- name: Delete the file, if exists
+ file:
+ path: "{{ xml_file }}"
+ state: absent
+
+- name: Create a new empty file
+ file:
+ path: "{{ xml_file }}"
+ state: touch
+
+- name: Add root "network" node
+ blockinfile:
+ path: "{{ xml_file }}"
+ marker: ""
+ content: |
+ <network>
+ </network>
+
+- name: Add new children nodes to "network" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network
+ add_children:
+ - name: "{{ item.name }}"
+ - bridge
+ - ip
+ pretty_print: yes
+
+- name: Add "name" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: name
+ value: "{{ item.name }}"
+ pretty_print: yes
+
+- name: Add "stp" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: stp
+ value: "on"
+ pretty_print: yes
+
+- name: Add "delay" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: delay
+ value: "0"
+ pretty_print: yes
+
+- name: Add "address" attribute to "ip" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/ip
+ attribute: address
+ value: "{{ item.host_ip }}"
+ pretty_print: yes
+
+- name: Add "netmask" attribute to "ip" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/ip
+ attribute: netmask
+ value: "{{ item.netmask }}"
+ pretty_print: yes
+
+- name: Define the networks
+ virt_net:
+ command: define
+ name: "{{ item.name }}"
+ xml: "{{ lookup('file', xml_file) }}"
+
+- name: Set autostart to yes
+ virt_net:
+ autostart: yes
+ name: "{{ item.name }}"
+
+- name: Start the networks
+ virt_net:
+ command: start
+ name: "{{ item.name }}"
+
+- name: Remove XML file
+ file:
+ path: "{{ xml_file }}"
+ state: absent
diff --git a/ansible/roles/infra_create_network/tasks/main.yml b/ansible/roles/infra_create_network/tasks/main.yml
new file mode 100644
index 000000000..eba4a3a49
--- /dev/null
+++ b/ansible/roles/infra_create_network/tasks/main.yml
@@ -0,0 +1,22 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- name: Create XML file
+ include_tasks: create_xml.yaml
+ with_items: "{{ infra_deploy_vars.networks }}"
diff --git a/ansible/roles/infra_create_vms/tasks/configure_vm.yml b/ansible/roles/infra_create_vms/tasks/configure_vm.yml
new file mode 100644
index 000000000..10201cf2a
--- /dev/null
+++ b/ansible/roles/infra_create_vms/tasks/configure_vm.yml
@@ -0,0 +1,342 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Remove directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: absent
+
+- name: Create directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: directory
+ mode: 0755
+
+- name: Define user-data file name
+ set_fact:
+ user_data: "{{ '/tmp/'+node_item.hostname+'/user-data' }}"
+
+- name: Define image-dir
+ set_fact:
+ image_dir: "{{ '/var/lib/libvirt/images/' }}"
+
+- name: Create a new empty file for user-data
+ file:
+ path: "{{ user_data }}"
+ state: touch
+
+- name: Add user-data
+ blockinfile:
+ path: "{{ user_data }}"
+ marker: "MARKER"
+ content: |
+ #cloud-config
+ preserve_hostname: False
+ hostname: {{ node_item.hostname }}
+ output:
+ all: ">> /var/log/cloud-init.log"
+ ssh_pwauth: True
+ bootcmd:
+ - echo 127.0.0.1 {{ node_item.hostname }} >> /etc/hosts
+ users:
+ - name: {{ node_item.user }}
+ lock-passwd: False
+ plain_text_passwd: {{ node_item.password }}
+ chpasswd: { expire: False }
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ ssh_pwauth: True
+
+- name: Remove the marker
+ lineinfile:
+ dest: "{{ user_data }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Define network-config file name
+ set_fact:
+ network_config: "{{ '/tmp/'+node_item.hostname+'/network-config' }}"
+
+- name: Create a new empty file for network-config
+ file:
+ path: "{{ network_config }}"
+ state: touch
+
+- name: Add network-data
+ blockinfile:
+ path: "{{ network_config }}"
+ marker: "MARKER"
+ content: |
+ version: 2
+ ethernets:
+
+- name: Define meta-data file name
+ set_fact:
+ meta_data: "{{ '/tmp/'+node_item.hostname+'/meta-data' }}"
+
+- name: Create a new empty file for meta-data
+ file:
+ path: "{{ meta_data }}"
+ state: touch
+
+- name: Add meta-data
+ blockinfile:
+ path: "{{ meta_data }}"
+ marker: "MARKER"
+ content: |
+ instance-id: {{ node_item.hostname }}
+ local-hostname: {{ node_item.hostname }}
+
+- name: Remove the marker
+ lineinfile:
+ dest: "{{ meta_data }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Define xml file name
+ set_fact:
+ xml_file: "{{ '/tmp/'+node_item.hostname+'/'+node_item.hostname+'.xml' }}"
+
+- name: Create a new empty file for xml file
+ file:
+ path: "{{ xml_file }}"
+ state: touch
+
+- name: Add root "domain" node
+ blockinfile:
+ path: "{{ xml_file }}"
+ marker: ""
+ content: |
+ <domain>
+ </domain>
+
+- name: Add "type" attribute to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain
+ attribute: type
+ value: "kvm"
+ pretty_print: yes
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain
+ add_children:
+ - name: "{{ node_item.hostname }}"
+ - memory: "{{ node_item.ram }}"
+ - vcpu: "{{ node_item.vcpus }}"
+ - os
+ - cpu
+ - devices
+ pretty_print: yes
+
+- name: Add "unit" attribute to "memory" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/memory
+ attribute: unit
+ value: "MB"
+ pretty_print: yes
+
+- name: Add "placement" attribute to "vcpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/vcpu
+ attribute: placement
+ value: "static"
+ pretty_print: yes
+
+- name: Add new children nodes to "os" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os
+ add_children:
+ - type: "hvm"
+ - boot
+ pretty_print: yes
+
+- name: Add "arch" attribute to "type" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os/type
+ attribute: arch
+ value: "x86_64"
+ pretty_print: yes
+
+- name: Add "dev" attribute to "boot" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os/boot
+ attribute: dev
+ value: "hd"
+ pretty_print: yes
+
+- name: Add new children nodes to "cpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu
+ add_children:
+ - cache
+ pretty_print: yes
+
+- name: Add "mode" attribute to "cpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu
+ attribute: mode
+ value: "host-passthrough"
+ pretty_print: yes
+
+- name: Add "mode" attribute to "cache" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu/cache
+ attribute: mode
+ value: "passthrough"
+ pretty_print: yes
+
+- name: Add new children nodes to "devices" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - disk:
+ type: file
+ device: disk
+ - controller:
+ type: virtio-serial
+ index: '0'
+ - serial:
+ type: pty
+ - console:
+ type: pty
+ tty: '/dev/pts/14'
+ pretty_print: yes
+
+- name: Add new children nodes to "disk" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/disk
+ add_children:
+ - driver:
+ name: qemu
+ type: qcow2
+ - source:
+ file: "{{ '/var/lib/libvirt/images/'+node_item.hostname+'.qcow2' }}"
+ - target:
+ dev: vda
+ bus: virtio
+ - alias:
+ name: virtio-disk0
+ pretty_print: yes
+
+- name: Add new children nodes to "devices" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - disk:
+ type: file
+ device: cdrom
+ pretty_print: yes
+
+- name: Add new children nodes to "disk" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/disk
+ add_children:
+ - source:
+ file: "{{ '/var/lib/libvirt/images/'+node_item.hostname+'-ci-data.img' }}"
+ - target:
+ dev: hdb
+ bus: ide
+ - readonly
+ pretty_print: yes
+
+- name: Configure controller
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/controller
+ add_children:
+ - alias:
+ name: virtio-serial0
+ pretty_print: yes
+
+- name: Configure serial
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/serial
+ add_children:
+ - source:
+ path: '/dev/pts/14'
+ - target:
+ port: '0'
+ - alias:
+ name: 'serial0'
+ pretty_print: yes
+
+- name: Configure console
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/console
+ add_children:
+ - source:
+ path: '/dev/pts/14'
+ - target:
+ port: '0'
+ type: 'serial'
+ - alias:
+ name: 'serial0'
+ pretty_print: yes
+
+- set_fact:
+ slot_address: 5
+
+- name: Populate network-config and add interface to xml file
+ include_tasks: create_interfaces.yml
+ extra_vars: "{{ network_config, xml_file , slot_address, mac_address_counter }}"
+ loop_control:
+ loop_var: interface_item
+ with_items: "{{ node_item.interfaces }}"
+
+- name: Create directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: directory
+ mode: 0755
+
+- name: Generate iso image
+ shell: >
+ genisoimage -output {{ image_dir+node_item.hostname+'-ci-data.img' }} -volid cidata -joliet
+ -r {{ '/tmp/'+node_item.hostname+'/network-config' }} {{ '/tmp/'+node_item.hostname+'/user-data' }} {{ '/tmp/'+node_item.hostname+'/meta-data' }}
+ &>> {{ '/tmp/'+node_item.hostname+'/hostname.log' }}
+
+- name: Copy and convert the ubuntu image
+ shell: >
+ qemu-img convert -O qcow2 {{ node_item.image }} {{ image_dir+node_item.hostname+'.qcow2' }}
+
+- name: Copy and convert the ubuntu image
+ shell: >
+ qemu-img resize {{ image_dir+node_item.hostname+'.qcow2' }} {{ node_item.disk }}MB
+
+- name: Define the VMs
+ virt:
+ command: define
+ name: "{{ node_item.hostname }}"
+ xml: "{{ lookup('file', '/tmp/'+node_item.hostname+'/'+node_item.hostname+'.xml') }}"
+
+- name: Start the VMs
+ virt:
+ command: create
+ name: "{{ node_item.hostname }}"
diff --git a/ansible/roles/infra_create_vms/tasks/create_interfaces.yml b/ansible/roles/infra_create_vms/tasks/create_interfaces.yml
new file mode 100644
index 000000000..124421b56
--- /dev/null
+++ b/ansible/roles/infra_create_vms/tasks/create_interfaces.yml
@@ -0,0 +1,65 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Add network-data
+ blockinfile:
+ path: "{{ network_config }}"
+ insertafter: 'ethernets:'
+ marker: "MARKER"
+ block: |2
+ {{ 'enp0s%d:'| format( slot_address | int) }}
+ match:
+ mac_address: {{ '52:54:00:5d:7d:%02x'| format( mac_address_counter | int) }}
+ addresses:
+ - {{ interface_item.ip }}/{{ interface_item.netmask }}
+
+
+- name: Remove the marker introduced in network-data
+ lineinfile:
+ dest: "{{ network_config }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - interface:
+ type: 'bridge'
+ pretty_print: yes
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/interface
+ add_children:
+ - source:
+ bridge: "{{ interface_item.network }}"
+ - model:
+ type: 'virtio'
+ - address:
+ type: 'pci'
+ domain: '0x0000'
+ bus: '0x00'
+ slot: "{{ '0x%02x'| format( slot_address | int) }}"
+ function: '0x0'
+ - mac:
+ address: "{{ '52:54:00:5d:7d:%02x'| format( mac_address_counter | int) }}"
+ pretty_print: yes
+
+- set_fact:
+ slot_address: "{{ slot_address | int + 1 }}"
+- set_fact:
+ mac_address_counter: "{{ mac_address_counter | int + 1 }}"
diff --git a/ansible/roles/infra_create_vms/tasks/main.yml b/ansible/roles/infra_create_vms/tasks/main.yml
new file mode 100644
index 000000000..62a023e7e
--- /dev/null
+++ b/ansible/roles/infra_create_vms/tasks/main.yml
@@ -0,0 +1,28 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- set_fact:
+ mac_address_counter: 0
+
+- name: Create XML file
+ include_tasks: configure_vm.yml
+ extra_vars: "{{ mac_address_counter }}"
+ loop_control:
+ loop_var: node_item
+ with_items: "{{ infra_deploy_vars.nodes }}"
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
new file mode 100644
index 000000000..314ee30af
--- /dev/null
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
@@ -0,0 +1,48 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Destroy old networks created by virt
+ virt_net:
+ name: "{{ network_item.name }}"
+ command: destroy
+ when: network_item.name in virt_nets.list_nets
+
+# Ignoring erros as network can be created without being defined.
+# This can happen if a user manually creates a network using the virsh command.
+# If the network is not defined the undefine code will throw an error.
+- name: Undefine old networks defined by virt
+ virt_net:
+ name: "{{ network_item.name }}"
+ command: undefine
+ when: network_item.name in virt_nets.list_nets
+ ignore_errors: yes
+
+- name: Check if "ovs-vsctl" command is present
+ command: which ovs-vsctl
+ register: ovs_vsctl_present
+ ignore_errors: yes
+
+- name: Destroy OVS bridge if it exists
+ command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}"
+ when: ovs_vsctl_present.rc == 0
+
+- name: Check if linux bridge is present
+ stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}"
+ register: check_linux_bridge
+
+- name: Remove linux bridge if it exists
+ shell: |
+ ifconfig "{{ network_item.name }}" down
+ brctl delbr "{{ network_item.name }}"
+ when: check_linux_bridge.stat.exists
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
new file mode 100644
index 000000000..5e43ee81e
--- /dev/null
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
@@ -0,0 +1,29 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Ignore errors as VM can be destroyed without been undefined.
+- name: Destroy old VMs
+ virt:
+ command: destroy
+ name: "{{ node_item.hostname }}"
+ when: node_item.hostname in virt_vms.list_vms
+ ignore_errors: yes
+
+# Ignore errors as VM can be running while undefined
+- name: Undefine old VMs
+ virt:
+ command: undefine
+ name: "{{ node_item.hostname }}"
+ when: node_item.hostname in virt_vms.list_vms
+ ignore_errors: yes
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
new file mode 100644
index 000000000..e6c2c0229
--- /dev/null
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
@@ -0,0 +1,40 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- name: List virt-nets
+ virt_net: command=list_nets
+ register: virt_nets
+
+- name: List VMs
+ virt: command=list_vms
+ register: virt_vms
+
+- name: Destroy old VMs
+ include_tasks: delete_vm.yml
+ extra_vars: "{{ virt_vms }}"
+ loop_control:
+ loop_var: node_item
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Delete old networks
+ include_tasks: delete_network.yml
+ extra_vars: "{{ virt_nets }}"
+ loop_control:
+ loop_var: network_item
+ with_items: "{{ infra_deploy_vars.networks }}"
diff --git a/ansible/roles/install_dpdk/tasks/main.yml b/ansible/roles/install_dpdk/tasks/main.yml
index 01ad4baf1..e82ad8363 100644
--- a/ansible/roles/install_dpdk/tasks/main.yml
+++ b/ansible/roles/install_dpdk/tasks/main.yml
@@ -121,11 +121,3 @@
remote_src: yes
force: yes
mode: 0755
-
-- name: make dpdk_nic_bind.py for backwards compatibility
- copy:
- src: "{{ dpdk_devbind[dpdk_version] }}"
- dest: "{{ INSTALL_BIN_PATH }}/dpdk_nic_bind.py"
- remote_src: yes
- force: yes
- mode: 0755
diff --git a/ansible/roles/install_trex/tasks/main.yml b/ansible/roles/install_trex/tasks/main.yml
index 7ba1fc833..9113c887f 100644
--- a/ansible/roles/install_trex/tasks/main.yml
+++ b/ansible/roles/install_trex/tasks/main.yml
@@ -31,9 +31,6 @@
dest: "{{ INSTALL_BIN_PATH }}/trex_client"
state: link
-# Don't use trex/scripts/dpdk_nic_bind.py use DPDK usertools/dpdk-devbind.py
-#- command: cp "{{ INSTALL_BIN_PATH }}/trex/scripts/dpdk_nic_bind.py" "{{ INSTALL_BIN_PATH }}"
-
- name: add scripts to PYTHONPATH
lineinfile:
dest: /etc/environment
diff --git a/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json
new file mode 100644
index 000000000..3c78ab18d
--- /dev/null
+++ b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json
@@ -0,0 +1,5817 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_YARDSTICK",
+ "label": "yardstick",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "influxdb",
+ "pluginName": "InfluxDB"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "4.4.3"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "influxdb",
+ "name": "InfluxDB",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "singlestat",
+ "name": "Singlestat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "text",
+ "name": "Text",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": []
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "refresh": false,
+ "rows": [
+ {
+ "collapse": false,
+ "height": "100px",
+ "panels": [
+ {
+ "content": "<h5 style=\"font-family:Verdana\"> <a style=\"color:#31A7D3\"><a style=\"font: 32px '#31A7D3'\"><center>OPNFV_Yardstick_NSB_PROX_BM_L2FWD_4Port_Test</center> </a></h5>\n<center>\n<p>The application does Port forwarding without touching packets. It will take packets in from one port and forward them unmodified to another port </p>\n<p>The KPI is the number of packets per second for a specified packet size with an accepted minimal packet loss </p>\n</center>",
+ "editable": true,
+ "error": false,
+ "id": 3,
+ "links": [],
+ "mode": "html",
+ "span": 12,
+ "title": "",
+ "type": "text"
+ },
+ {
+ "content": "<h5 style=\"font-family:Verdana\"> <a style=\"color:#31A7D3\"><a style=\"font: 22px '#31A7D3'\"><center>Throughput</center> </a></h5>\n",
+ "editable": true,
+ "error": false,
+ "height": "40",
+ "id": 7,
+ "links": [],
+ "minSpan": 12,
+ "mode": "html",
+ "span": 12,
+ "title": "",
+ "type": "text"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "300px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 6,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Cumulative Packets Sents",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.xe0.out_packets\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.packets_fwd"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cumulative Load Sent by Generator",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets Per Second",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 9,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "TG xe-0 in packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe0.in_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-1 in packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe1.in_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-2 in packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe2.in_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-3 in packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "D",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe3.in_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Load Received by Generator",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets Per Second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 43,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "TG xe-0 Out packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe0.out_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-1 Out packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe1.out_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-2 Out packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe2.out_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "TG xe-3 Out packets",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "D",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.xe3.out_packets"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Load Sent by Generator",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets Per Second",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "New row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "300px",
+ "panels": [
+ {
+ "content": "<h5 style=\"font-family:Verdana\"> <a style=\"color:#31A7D3\"><a style=\"font: 22px '#31A7D3'\"><center>Prox L2Fwd Traffic Gen stats</center> </a></h5>\n",
+ "editable": true,
+ "error": false,
+ "height": "40",
+ "id": 8,
+ "links": [],
+ "minSpan": 12,
+ "mode": "html",
+ "span": 12,
+ "title": "",
+ "type": "text"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "300",
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "SUT Packets Received",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.curr_packets_in"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "SUT Stats - Load Received By SUT",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets per Second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "300",
+ "id": 39,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "SUT Packets Sent",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.curr_packets_in"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "SUT Stats - Load Forwarded By SUT",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets per Second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "New row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Load Requested by Generator",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.TxThroughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "Rx Throughput",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "hide": true,
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.RxThroughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Theoretical Throughput",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets Per Second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Packet Size",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.PktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(216, 200, 27, 0.27)",
+ "op": "gt",
+ "value": 2
+ },
+ {
+ "colorMode": "custom",
+ "fill": true,
+ "fillColor": "rgba(234, 112, 112, 0.22)",
+ "op": "gt",
+ "value": 2
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Packet size",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": "Packet Size",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "New row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "fill": 1,
+ "id": 10,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "SUCCESS Tx Total",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Success_tx_total"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "SUCCESS Rx Total",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Success_rx_total"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "SUCCESS ALLOWABLE LOST PACKETS",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Success_can_be_lost"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "SUCCESS CRITERIA: TX Total = Rx Total + Tolerated Loss",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets Per Second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "35",
+ "id": 12,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.duration"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Test Interval",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30",
+ "id": 11,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "Test Duration",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.test_duration"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Test Duration",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30",
+ "id": 13,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "Test Precision",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.test_precision"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Test Precision",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30",
+ "id": 14,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "Tolerated Loss",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.tolerated_loss"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Tolerated Loss",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "New row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "30",
+ "panels": [
+ {
+ "content": "<center>Packet size</center>",
+ "height": "30px",
+ "id": 15,
+ "links": [],
+ "mode": "html",
+ "span": 4,
+ "title": "",
+ "type": "text"
+ },
+ {
+ "content": "<center>Theoretical Max Throughput (Million Packets Per Second)</center>",
+ "height": "30px",
+ "id": 16,
+ "links": [],
+ "mode": "html",
+ "span": 4,
+ "title": "",
+ "type": "text"
+ },
+ {
+ "content": "<center>Max Actual Throughput (Million Packets Per Second)</center>",
+ "height": "30px",
+ "id": 17,
+ "links": [],
+ "mode": "html",
+ "span": 4,
+ "title": "",
+ "type": "text"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "30px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 0,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 18,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "Theoretical Max Throughput (Mpps)",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "64"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 19,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "Max Throughput (Mpps)",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "64"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 20,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "64"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": "30",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": null,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 21,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "128"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 22,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "128"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 23,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "128"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "30px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 0,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 24,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSiuze",
+ "operator": "=",
+ "value": "256"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 25,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "256"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 26,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "256"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": -82,
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 27,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "512"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 28,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "512"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 29,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "512"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": "30px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 30,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1024"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 31,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1024"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 32,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1024"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": "30px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 33,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_pktSize"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1280"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 34,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_theor_max_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1280"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "30px",
+ "id": 35,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.Result_Actual_throughput"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "tg__0.Result_pktSize",
+ "operator": "=",
+ "value": "1280"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "30",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 44,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 45,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 4,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 46,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "40px",
+ "panels": [
+ {
+ "content": "<h5 style=\"font-family:Verdana\"> <a style=\"color:#31A7D3\"><a style=\"font: 22px '#31A7D3'\"><center>Latency</center> </a></h5>",
+ "height": "40",
+ "id": 41,
+ "links": [],
+ "mode": "html",
+ "span": 12,
+ "title": "",
+ "type": "text"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "fill": 1,
+ "height": "300px",
+ "id": 47,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "xe0 Latency Avg",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyAvg.5"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "xe0 Latency Max",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyMax.5"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "xe0 Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "usec",
+ "logBase": 1,
+ "max": "65000",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "fill": 1,
+ "height": "300px",
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "xe1 Latency Avg",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyAvg.6"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "xe1 Latency Max",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyMax.6"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "xe1 Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "usec",
+ "logBase": 1,
+ "max": "65000",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "fill": 1,
+ "height": "300px",
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "xe2 Latency Avg",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyAvg.7"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "xe2 Latency Max",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyMax.7"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "xe2 Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "usec",
+ "logBase": 1,
+ "max": "65000",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "fill": 1,
+ "height": "300px",
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "xe3 Latency Avg",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyAvg.8"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "xe3 Latency Max",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "tg__0.LatencyMax.8"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "xe3 Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "usec",
+ "logBase": 1,
+ "max": "65000",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "40px",
+ "panels": [
+ {
+ "content": "<h5 style=\"font-family:Verdana\"> <a style=\"color:#31A7D3\"><a style=\"font: 22px '#31A7D3'\"><center>SUT CPU Utilization</center> </a></h5>",
+ "height": "40px",
+ "id": 51,
+ "links": [],
+ "mode": "html",
+ "span": 12,
+ "title": "",
+ "type": "text"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 0 Utilization",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.0.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 0 Utilization - Master Core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 1 Utilization - L2FWD XE0 to XE1",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.1.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 1 Utilization - L2FWD XE0 to XE1",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 54,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 2 Utilization",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.2.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 2 Utilization - L2FWD XE1 to XE0",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 55,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 3 Utilization",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.3.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 3 Utilization - L2FWD XE2 to XE3",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 56,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 4 Utilization",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.4.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 4 Utilization - L2FWD XE3 to XE2",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_YARDSTICK}",
+ "decimals": 5,
+ "fill": 1,
+ "height": "300px",
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "CPU 5 Utilization",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tc_prox_baremetal_l2fwd-4",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "vnf__0.collect_stats.core.cpu.5.percent-user"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "distinct"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU 5 Utilization",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "% Utilization",
+ "logBase": 1,
+ "max": "100",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "yardstick",
+ "NSB",
+ "Prox",
+ "L2fwd",
+ "4Port",
+ "BM"
+ ],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "2018-02-12T15:17:27.733Z",
+ "to": "2018-02-12T16:44:28.270Z"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Prox_BM_L2FWD-4Port_MultiSize",
+ "version": 29
+} \ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
index ddd8dfaf8..46e52d557 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -26,7 +26,7 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
RUN easy_install -U setuptools==30.0.0
-RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0
+RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
RUN mkdir -p ${REPOS_DIR}
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index ca933514a..24e3952fb 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -39,7 +39,7 @@ index 2ee5b4c..23e5ea5 100644
+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \
+ apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean
RUN easy_install -U setuptools==30.0.0
- RUN pip install appdirs==1.4.0 pyopenssl==17.5.0
+ RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0
@@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
diff --git a/docs/testing/developer/devguide/devguide_nsb_prox.rst b/docs/testing/developer/devguide/devguide_nsb_prox.rst
new file mode 100755
index 000000000..fc533b2cf
--- /dev/null
+++ b/docs/testing/developer/devguide/devguide_nsb_prox.rst
@@ -0,0 +1,1226 @@
+Introduction
+=============
+
+This document describes the steps to create a new NSB PROX test based on
+existing PROX functionalities. NSB PROX provides is a simple approximation
+of an operation and can be used to develop best practices and TCO models
+for Telco customers, investigate the impact of new Intel compute,
+network and storage technologies, characterize performance, and develop
+optimal system architectures and configurations.
+
+.. contents::
+
+Prerequisites
+=============
+
+In order to integrate PROX tests into NSB, the following prerequisites are required.
+
+.. _`dpdk wiki page`: http://dpdk.org/
+.. _`yardstick wiki page`: https://wiki.opnfv.org/display/yardstick/
+.. _`Prox documentation`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation
+.. _`openstack wiki page`: https://wiki.openstack.org/wiki/Main_Page
+.. _`grafana getting started`: http://docs.grafana.org/guides/gettingstarted/
+.. _`opnfv grafana dashboard`: https://wiki.opnfv.org/display/yardstick/How+to+work+with+grafana+dashboard
+.. _`Prox command line`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation#Command_line_options
+.. _`grafana deployment`: https://wiki.opnfv.org/display/yardstick/How+to+deploy+InfluxDB+and+Grafana+locally
+.. _`Prox options`: https://01.org/intel-data-plane-performance-demonstrators/documentation/prox-documentation#.5Beal_options.5D
+.. _`NSB Installation`: http://artifacts.opnfv.org/yardstick/docs/userguide/index.html#document-09-installation
+
+* A working knowledge of Yardstick. See `yardstick wiki page`_.
+* A working knowledge of PROX. See `Prox documentation`_.
+* Knowledge of Openstack. See `openstack wiki page`_.
+* Knowledge of how to use Grafana. See `grafana getting started`_.
+* How to Deploy InfluxDB & Grafana. See `grafana deployment`_.
+* How to use Grafana in OPNFV/Yardstick. See `opnfv grafana dashboard`_.
+* How to install NSB. See `NSB Installation`_
+
+Sample Prox Test Hardware Architecture
+======================================
+
+The following is a diagram of a sample NSB PROX Hardware Architecture
+for both NSB PROX on Bare metal and on Openstack.
+
+In this example when running yardstick on baremetal, yardstick will
+run on the deployment node, the generator will run on the deployment node
+and the SUT(SUT) will run on the Controller Node.
+
+
+.. image:: images/PROX_Hardware_Arch.png
+ :width: 800px
+ :alt: Sample NSB PROX Hard Architecture
+
+Prox Test Architecture
+======================
+
+In order to create a new test, one must understand the architecture of
+the test.
+
+A NSB Prox test architecture is composed of:
+
+* A traffic generator. This provides blocks of data on 1 or more ports
+ to the SUT.
+ The traffic generator also consumes the result packets from the system
+ under test.
+* A SUT consumes the packets generated by the packet
+ generator, and applies one or more tasks to the packets and return the
+ modified packets to the traffic generator.
+
+ This is an example of a sample NSB PROX test architecture.
+
+.. image:: images/PROX_Software_Arch.png
+ :width: 800px
+ :alt: NSB PROX test Architecture
+
+This diagram is of a sample NSB PROX test application.
+
+* Traffic Generator
+
+ * Generator Tasks - Composted of 1 or more tasks (It is possible to
+ have multiple tasks sending packets to same port No. See Tasks Ai and Aii
+ plus Di and Dii)
+
+ * Task Ai - Generates Packets on Port 0 of Traffic Generator
+ and send to Port 0 of SUT Port 0
+ * Task Aii - Generates Packets on Port 0 of Traffic Generator
+ and send to Port 0 of SUT Port 0
+ * Task B - Generates Packets on Port 1 of Traffic Generator
+ and send to Port 1 of SUT Port 1
+ * Task C - Generates Packets on Port 2 of Traffic Generator
+ and send to Port 2 of SUT Port 2
+ * Task Di - Generates Packets on Port 3 of Traffic Generator
+ and send to Port 3 of SUT Port 3
+ * Task Dii - Generates Packets on Port 0 of Traffic Generator
+ and send to Port 0 of SUT Port 0
+
+ * Verifier Tasks - Composed of 1 or more tasks which receives
+ packets from SUT
+
+ * Task E - Receives packets on Port 0 of Traffic Generator sent
+ from Port 0 of SUT Port 0
+ * Task F - Receives packets on Port 1 of Traffic Generator sent
+ from Port 1 of SUT Port 1
+ * Task G - Receives packets on Port 2 of Traffic Generator sent
+ from Port 2 of SUT Port 2
+ * Task H - Receives packets on Port 3 of Traffic Generator sent
+ from Port 3 of SUT Port 3
+
+* SUT
+
+ * Receiver Tasks - Receives packets from generator - Composed on 1 or
+ more tasks which consume the packs sent from Traffic Generator
+
+ * Task A - Receives Packets on Port 0 of System-Under-Test from
+ Traffic Generator Port 0, and forwards packets to Task E
+ * Task B - Receives Packets on Port 1 of System-Under-Test from
+ Traffic Generator Port 1, and forwards packets to Task E
+ * Task C - Receives Packets on Port 2 of System-Under-Test from
+ Traffic Generator Port 2, and forwards packets to Task E
+ * Task D - Receives Packets on Port 3 of System-Under-Test from
+ Traffic Generator Port 3, and forwards packets to Task E
+
+ * Processing Tasks - Composed of multiple tasks in series which carry
+ out some processing on received packets before forwarding to the
+ task.
+
+ * Task E - This receives packets from the Receiver Tasks,
+ carries out some operation on the data and forwards to result
+ packets to the next task in the sequence - Task F
+ * Task F - This receives packets from the previous Task - Task
+ E, carries out some operation on the data and forwards to result
+ packets to the next task in the sequence - Task G
+ * Task G - This receives packets from the previous Task - Task F
+ and distributes the result packages to the Transmitter tasks
+
+ * Transmitter Tasks - Composed on 1 or more tasks which send the
+ processed packets back to the Traffic Generator
+
+ * Task H - Receives Packets from Task G of System-Under-Test and
+ sends packets to Traffic Generator Port 0
+ * Task I - Receives Packets from Task G of System-Under-Test and
+ sends packets to Traffic Generator Port 1
+ * Task J - Receives Packets from Task G of System-Under-Test and
+ sends packets to Traffic Generator Port 2
+ * Task K - Receives Packets From Task G of System-Under-Test and
+ sends packets to Traffic Generator Port 3
+
+NSB Prox Test
+=============
+
+A NSB Prox test is composed of the following components :-
+
+* Test Description File. Usually called
+ ``tc_prox_<context>_<test>-<ports>.yaml`` where
+
+ * <context> is either ``baremetal`` or ``heat_context``
+ * <test> is the a one or 2 word description of the test.
+ * <ports> is the number of ports used
+
+ Example tests ``tc_prox_baremetal_l2fwd-2.yaml`` or
+ ``tc_prox_heat_context_vpe-4.yaml``. This file describes the components
+ of the test, in the case of openstack the network description and
+ server descriptions, in the case of baremetal the hardware
+ description location. It also contains the name of the Traffic Generator, the SUT config file
+ and the traffic profile description, all described below. See nsb-test-description-label_
+
+* Traffic Profile file. Example ``prox_binsearch.yaml``. This describes the packet size, tolerated
+ loss, initial line rate to start traffic at, test interval etc See nsb-traffic-profile-label_
+
+* Traffic Generator Config file. Usually called ``gen_<test>-<ports>.cfg``.
+
+ This describes the activity of the traffic generator
+
+ * What each core of the traffic generator does,
+ * The packet of data sent by a core on a port of the traffic generator
+ to the system under test
+ * What core is used to wait on what port for data from the system
+ under test.
+
+ Example traffic generator config file ``gen_l2fwd-4.cfg``
+ See nsb-traffic-generator-label_
+
+* SUT Config file. Usually called ``handle_<test>-<ports>.cfg``.
+
+ This describes the activity of the SUTs
+
+ * What each core of the does,
+ * What cores receives packets from what ports
+ * What cores perform operations on the packets and pass the packets onto
+ another core
+ * What cores receives packets from what cores and transmit the packets on
+ the ports to the Traffic Verifier tasks of the Traffic Generator.
+
+ Example traffic generator config file ``handle_l2fwd-4.cfg``
+ See nsb-sut-generator-label_
+
+* NSB PROX Baremetal Configuration file. Usually called
+ ``prox-baremetal-<ports>.yaml``
+
+ * <ports> is the number of ports used
+
+ This is required for baremetal only. This describes hardware, NICs,
+ IP addresses, Network drivers, usernames and passwords.
+ See baremetal-config-label_
+
+* Grafana Dashboard. Usually called
+ ``Prox_<context>_<test>-<port>-<DateAndTime>.json`` where
+
+ * <context> Is either ``BM`` or ``heat``
+ * <test> Is the a one or 2 word description of the test.
+ * <port> is the number of ports used express as ``2Port`` or ``4Port``
+ * <DateAndTime> is the Date and Time expressed as a string.
+
+ Example grafana dashboard ``Prox_BM_L2FWD-4Port-1507804504588.json``
+
+Other files may be required. These are test specific files and will be
+covered later.
+
+.. _nsb-test-description-label:
+
+**Test Description File**
+
+Here we will discuss the test description for both
+baremetal and openstack.
+
+*Test Description File for Baremetal*
+-------------------------------------
+
+This section will introduce the meaning of the Test case description
+file. We will use ``tc_prox_baremetal_l2fwd-2.yaml`` as an example to
+show you how to understand the test description file.
+
+.. image:: images/PROX_Test_BM_Script.png
+ :width: 800px
+ :alt: NSB PROX Test Description File
+
+Now let's examine the components of the file in detail
+
+1. ``traffic_profile`` - This specifies the traffic profile for the
+ test. In this case ``prox_binsearch.yaml`` is used. See nsb-traffic-profile-label_
+
+2. ``topology`` - This is either ``prox-tg-topology-1.yaml`` or
+ ``prox-tg-topology-2.yaml`` or ``prox-tg-topology-4.yaml``
+ depending on number of ports required.
+
+3. ``nodes`` - This names the Traffic Generator and the System
+ under Test. Does not need to change.
+
+4. ``prox_path`` - Location of the Prox executable on the traffic
+ generator (Either baremetal or Openstack Virtual Machine)
+
+5. ``prox_config`` - This is the ``SUT Config File``.
+ In this case it is ``handle_l2fwd-2.cfg``
+
+ A number of additional parameters can be added. This example
+ is taken from VPE::
+
+ options:
+ vnf__0:
+ prox_path: /opt/nsb_bin/prox
+ prox_config: ``configs/handle_vpe-4.cfg``
+ prox_args:
+ ``-t``: ````
+ prox_files:
+ ``configs/vpe_ipv4.lua`` : ````
+ ``configs/vpe_dscp.lua`` : ````
+ ``configs/vpe_cpe_table.lua`` : ````
+ ``configs/vpe_user_table.lua`` : ````
+ ``configs/vpe_rules.lua`` : ````
+ prox_generate_parameter: True
+
+ ``prox_files`` - this specified that a number of addition files
+ need to be provided for the test to run correctly. This files
+ could provide routing information,hashing information or a
+ hashing algorithm and ip/mac information.
+
+ ``prox_generate_parameter`` - this specifies that the NSB application
+ is required to provide information to the nsb Prox in the form
+ of a file called ``parameters.lua``, which contains information
+ retrieved from either the hardware or the openstack configuration.
+
+6. ``prox_args`` - this specifies the command line arguments to start
+ prox. See `prox command line`_.
+
+7. ``prox_config`` - This specifies the Traffic Generator config file.
+
+8. ``runner`` - This is set to ``Duration`` - This specified that the
+ test run for a set duration. Other runner types are available
+ but it is recommend to use ``Duration``
+
+9. ``context`` - This is ``context`` for a 2 port Baremetal configuration.
+ If a 4 port configuration was required then file
+ ``prox-baremetal-4.yaml`` would be used. This is the NSB Prox
+ baremetal configuration file.
+
+.. _nsb-traffic-profile-label:
+
+*Traffic Profile file*
+----------------------
+
+This describes the details of the traffic flow. In this case ``prox_binsearch.yaml`` is used.
+
+.. image:: images/PROX_Traffic_profile.png
+ :width: 800px
+ :alt: NSB PROX Traffic Profile
+
+
+1. ``name`` - The name of the traffic profile. This name should match the name specified in the
+ ``traffic_profile`` field in the Test Description File.
+
+2. ``traffic_type`` - This specifies the type of traffic pattern generated, This name matches
+ class name of the traffic generator See::
+
+ network_services/traffic_profile/prox_binsearch.py class ProxBinSearchProfile(ProxProfile)
+
+ In this case it lowers the traffic rate until the number of packets
+ sent is equal to the number of packets received (plus a
+ tolerated loss). Once it achieves this it increases the traffic
+ rate in order to find the highest rate with no traffic loss.
+
+ Custom traffic types can be created by creating a new traffic profile class.
+
+3. ``tolerated_loss`` - This specifies the percentage of packets that can be lost/dropped before
+ we declare success or failure. Success is Transmitted-Packets from Traffic Generator is greater than or equal to
+ packets received by Traffic Generator plus tolerated loss.
+
+4. ``test_precision`` - This specifies the precision of the test results. For some tests the success criteria
+ may never be achieved because the test precision may be greater than the successful throughput. For finer
+ results increase the precision by making this value smaller.
+
+5. ``packet_sizes`` - This specifies the range of packets size this test is run for.
+
+6. ``duration`` - This specifies the sample duration that the test uses to check for success or failure.
+
+7. ``lower_bound`` - This specifies the test initial lower bound sample rate. On success this value is increased.
+
+8. ``upper_bound`` - This specifies the test initial upper bound sample rate. On success this value is decreased.
+
+Other traffic profiles exist eg prox_ACL.yaml which does not
+compare what is received with what is transmitted. It just
+sends packet at max rate.
+
+It is possible to create custom traffic profiles with by
+creating new file in the same folder as prox_binsearch.yaml.
+See this prox_vpe.yaml as example::
+
+ schema: ``nsb:traffic_profile:0.1``
+
+ name: prox_vpe
+ description: Prox vPE traffic profile
+
+ traffic_profile:
+ traffic_type: ProxBinSearchProfile
+ tolerated_loss: 100.0 #0.001
+ test_precision: 0.01
+ # The minimum size of the Ethernet frame for the vPE test is 68 bytes.
+ packet_sizes: [68]
+ duration: 5
+ lower_bound: 0.0
+ upper_bound: 100.0
+
+*Test Description File for Openstack*
+-------------------------------------
+
+We will use ``tc_prox_heat_context_l2fwd-2.yaml`` as a example to show
+you how to understand the test description file.
+
+.. image:: images/PROX_Test_HEAT_Script.png
+ :width: 800px
+ :alt: NSB PROX Test Description File
+
+Now lets examine the components of the file in detail
+
+Sections 1 to 8 are exactly the same in Baremetal and in Heat. Section
+``9`` is replaced with sections A to F. Section 9 was for a baremetal
+configuration file. This has no place in a heat configuration.
+
+A. ``image`` - yardstick-samplevnfs. This is the name of the image
+ created during the installation of NSB. This is fixed.
+
+B. ``flavor`` - The flavor is created dynamically. However we could
+ use an already existing flavor if required. In that case the
+ flavor would be named::
+
+ flavor: yardstick-flavor
+
+C. ``extra_specs`` - This allows us to specify the number of
+ cores sockets and hyperthreading assigned to it. In this case
+ we have 1 socket with 10 codes and no hyperthreading enabled.
+
+D. ``placement_groups`` - default. Do not change for NSB PROX.
+
+E. ``servers`` - ``tg_0`` is the traffic generator and ``vnf_0``
+ is the system under test.
+
+F. ``networks`` - is composed of a management network labeled ``mgmt``
+ and one uplink network labeled ``uplink_0`` and one downlink
+ network labeled ``downlink_0`` for 2 ports. If this was a 4 port
+ configuration there would be 2 extra downlink ports. See this
+ example from a 4 port l2fwd test.::
+
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ uplink_0:
+ cidr: '10.0.2.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ downlink_0:
+ cidr: '10.0.3.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ downlink_1:
+ cidr: '10.0.4.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+ downlink_2:
+ cidr: '10.0.5.0/24'
+ gateway_ip: 'null'
+ port_security_enabled: False
+ enable_dhcp: 'false'
+
+.. _nsb-traffic-generator-label:
+
+*Traffic Generator Config file*
+-------------------------------
+
+This section will describe the traffic generator config file.
+This is the same for both baremetal and heat. See this example
+of ``gen_l2fwd_multiflow-2.cfg`` to explain the options.
+
+.. image:: images/PROX_Gen_2port_cfg.png
+ :width: 1400px
+ :alt: NSB PROX Gen Config File
+
+The configuration file is divided into multiple sections, each
+of which is used to define some parameters and options.::
+
+ [eal options]
+ [variables]
+ [port 0]
+ [port 1]
+ [port .]
+ [port Z]
+ [defaults]
+ [global]
+ [core 0]
+ [core 1]
+ [core 2]
+ [core .]
+ [core Z]
+
+See `prox options`_ for details
+
+Now let's examine the components of the file in detail
+
+1. ``[eal options]`` - This specified the EAL (Environmental
+ Abstraction Layer) options. These are default values and
+ are not changed. See `dpdk wiki page`_.
+
+2. ``[variables]`` - This section contains variables, as
+ the name suggests. Variables for Core numbers, mac
+ addresses, ip addresses etc. They are assigned as a
+ ``key = value`` where the key is used in place of the value.
+
+ .. caution::
+ A special case for valuables with a value beginning with
+ ``@@``. These values are dynamically updated by the NSB
+ application at run time. Values like MAC address,
+ IP Address etc.
+
+3. ``[port 0]`` - This section describes the DPDK Port. The number
+ following the keyword ``port`` usually refers to the DPDK Port
+ Id. usually starting from ``0``. Because you can have multiple
+ ports this entry usually repeated. Eg. For a 2 port setup
+ ``[port0]`` and ``[port 1]`` and for a 4 port setup ``[port 0]``,
+ ``[port 1]``, ``[port 2]`` and ``[port 3]``::
+
+ [port 0]
+ name=p0
+ mac=hardware
+ rx desc=2048
+ tx desc=2048
+ promiscuous=yes
+
+ a. In this example ``name = p0`` assigned the name ``p0`` to the
+ port. Any name can be assigned to a port.
+ b. ``mac=hardware`` sets the MAC address assigned by the hardware
+ to data from this port.
+ c. ``rx desc=2048`` sets the number of available descriptors to
+ allocate for receive packets. This can be changed and can
+ effect performance.
+ d. ``tx desc=2048`` sets the number of available descriptors to
+ allocate for transmit packets. This can be changed and can
+ effect performance.
+ e. ``promiscuous=yes`` this enables promiscuous mode for this port.
+
+4. ``[defaults]`` - Here default operations and settings can be over
+ written. In this example ``mempool size=4K`` the number of mbufs
+ per task is altered. Altering this value could effect
+ performance. See `prox options`_ for details.
+
+5. ``[global]`` - Here application wide setting are supported. Things
+ like application name, start time, duration and memory
+ configurations can be set here. In this example.::
+
+ [global]
+ start time=5
+ name=Basic Gen
+
+ a. ``start time=5`` Time is seconds after which average
+ stats will be started.
+ b. ``name=Basic Gen`` Name of the configuration.
+
+6. ``[core 0]`` - This core is designated the master core. Every
+ Prox application must have a master core. The master mode must
+ be assigned to exactly one task, running alone on one core.::
+
+ [core 0]
+ mode=master
+
+7. ``[core 1]`` - This describes the activity on core 1. Cores can
+ be configured by means of a set of [core #] sections, where
+ # represents either:
+
+ a. an absolute core number: e.g. on a 10-core, dual socket
+ system with hyper-threading,
+ cores are numbered from 0 to 39.
+
+ b. PROX allows a core to be identified by a core number, the
+ letter 's', and a socket number.
+
+ It is possible to write a baremetal and an openstack test which use
+ the same traffic generator config file and SUT config file.
+ In this case it is advisable not to use physical
+ core numbering.
+
+ However it is also possible to write NSB Prox tests that
+ have been optimized for a particular hardware configuration.
+ In this case it is advisable to use the core numbering.
+ It is up to the user to make sure that cores from
+ the right sockets are used (i.e. from the socket on which the NIC
+ is attached to), to ensure good performance (EPA).
+
+ Each core can be assigned with a set of tasks, each running
+ one of the implemented packet processing modes.::
+
+ [core 1]
+ name=p0
+ task=0
+ mode=gen
+ tx port=p0
+ bps=1250000000
+ ; Ethernet + IP + UDP
+ pkt inline=${sut_mac0} 70 00 00 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d 98 10 64 01 98 10 64 02 13 88 13 88 00 08 55 7b
+ ; src_ip: 152.16.100.0/8
+ random=0000XXX1
+ rand_offset=29
+ ; dst_ip: 152.16.100.0/8
+ random=0000XXX0
+ rand_offset=33
+ random=0001001110001XXX0001001110001XXX
+ rand_offset=34
+
+ a. ``name=p0`` - Name assigned to the core.
+ b. ``task=0`` - Each core can run a set of tasks. Starting with ``0``.
+ Task 1 can be defined later in this core or
+ can be defined in another ``[core 1]`` section with ``task=1``
+ later in configuration file. Sometimes running
+ multiple task related to the same packet on the same physical
+ core improves performance, however sometimes it
+ is optimal to move task to a separate core. This is best
+ decided by checking performance.
+ c. ``mode=gen`` - Specifies the action carried out by this task on
+ this core. Supported modes are: classify, drop, gen, lat, genl4, nop, l2fwd, gredecap,
+ greencap, lbpos, lbnetwork, lbqinq, lb5tuple, ipv6_decap, ipv6_encap,
+ qinqdecapv4, qinqencapv4, qos, routing, impair,
+ mirror, unmpls, tagmpls, nat, decapnsh, encapnsh, police, acl
+ Which are :-
+
+ * Classify
+ * Drop
+ * Basic Forwarding (no touch)
+ * L2 Forwarding (change MAC)
+ * GRE encap/decap
+ * Load balance based on packet fields
+ * Symmetric load balancing
+ * QinQ encap/decap IPv4/IPv6
+ * ARP
+ * QoS
+ * Routing
+ * Unmpls
+ * Nsh encap/decap
+ * Policing
+ * ACL
+
+ In the traffic generator we expect a core to generate packets (``gen``)
+ and to receive packets & calculate latency (``lat``)
+ This core does ``gen`` . ie it is a traffic generator.
+
+ To understand what each of the modes support please see
+ `prox documentation`_.
+
+ d. ``tx port=p0`` - This specifies that the packets generated are
+ transmitted to port ``p0``
+ e. ``bps=1250000000`` - This indicates Bytes Per Second to
+ generate packets.
+ f. ``; Ethernet + IP + UDP`` - This is a comment. Items starting with
+ ``;`` are ignored.
+ g. ``pkt inline=${sut_mac0} 70 00 00 00 ...`` - Defines the packet
+ format as a sequence of bytes (each
+ expressed in hexadecimal notation). This defines the packet
+ that is generated. This packets begins
+ with the hexadecimal sequence assigned to ``sut_mac`` and the
+ remainder of the bytes in the string.
+ This packet could now be sent or modified by ``random=..``
+ described below before being sent to target.
+ h. ``; src_ip: 152.16.100.0/8`` - Comment
+ i. ``random=0000XXX1`` - This describes a field of the packet
+ containing random data. This string can be
+ 8,16,24 or 32 character long and represents 1,2,3 or 4
+ bytes of data. In this case it describes a byte of
+ data. Each character in string can be 0,1 or ``X``. 0 or 1
+ are fixed bit values in the data packet and ``X`` is a
+ random bit. So random=0000XXX1 generates 00000001(1),
+ 00000011(3), 00000101(5), 00000111(7),
+ 00001001(9), 00001011(11), 00001101(13) and 00001111(15)
+ combinations.
+ j. ``rand_offset=29`` - Defines where to place the previously
+ defined random field.
+ k. ``; dst_ip: 152.16.100.0/8`` - Comment
+ l. ``random=0000XXX0`` - This is another random field which
+ generates a byte of 00000000(0), 00000010(2),
+ 00000100(4), 00000110(6), 00001000(8), 00001010(10),
+ 00001100(12) and 00001110(14) combinations.
+ m. ``rand_offset=33`` - Defines where to place the previously
+ defined random field.
+ n. ``random=0001001110001XXX0001001110001XXX`` - This is
+ another random field which generates 4 bytes.
+ o. ``rand_offset=34`` - Defines where to place the previously
+ defined 4 byte random field.
+
+ Core 2 executes same scenario as Core 1. The only difference
+ in this case is that the packets are generated
+ for Port 1.
+
+8. ``[core 3]`` - This defines the activities on core 3. The purpose
+ of ``core 3`` and ``core 4`` is to receive packets
+ sent by the SUT.::
+
+ [core 3]
+ name=rec 0
+ task=0
+ mode=lat
+ rx port=p0
+ lat pos=42
+
+ a. ``name=rec 0`` - Name assigned to the core.
+ b. ``task=0`` - Each core can run a set of tasks. Starting with
+ ``0``. Task 1 can be defined later in this core or
+ can be defined in another ``[core 1]`` section with
+ ``task=1`` later in configuration file. Sometimes running
+ multiple task related to the same packet on the same
+ physical core improves performance, however sometimes it
+ is optimal to move task to a separate core. This is
+ best decided by checking performance.
+ c. ``mode=lat`` - Specifies the action carried out by this task on this core. Supported modes are: acl,
+ classify, drop, gredecap, greencap, ipv6_decap, ipv6_encap, l2fwd, lbnetwork, lbpos, lbqinq, nop,
+ police, qinqdecapv4, qinqencapv4, qos, routing, impair, lb5tuple, mirror, unmpls, tagmpls,
+ nat, decapnsh, encapnsh, gen, genl4 and lat. This task(0) per core(3) receives packets on port.
+ d. ``rx port=p0`` - The port to receive packets on ``Port 0``. Core 4 will receive packets on ``Port 1``.
+ e. ``lat pos=42`` - Describes where to put a 4-byte timestamp in the packet. Note that the packet length should
+ be longer than ``lat pos`` + 4 bytes to avoid truncation of the timestamp. It defines where the timestamp is
+ to be read from. Note that the SUT workload might cause the position of the timestamp to change
+ (i.e. due to encapsulation).
+
+.. _nsb-sut-generator-label:
+
+*SUT Config file*
+-------------------------------
+
+This section will describes the SUT(VNF) config file. This is the same for both
+baremetal and heat. See this example of ``handle_l2fwd_multiflow-2.cfg`` to explain the options.
+
+.. image:: images/PROX_Handle_2port_cfg.png
+ :width: 1400px
+ :alt: NSB PROX Handle Config File
+
+See `prox options`_ for details
+
+Now let's examine the components of the file in detail
+
+1. ``[eal options]`` - same as the Generator config file. This specified the EAL (Environmental Abstraction Layer)
+ options. These are default values and are not changed.
+ See `dpdk wiki page`_.
+
+2. ``[port 0]`` - This section describes the DPDK Port. The number following the keyword ``port`` usually refers to the DPDK Port Id. usually starting from ``0``.
+ Because you can have multiple ports this entry usually repeated. Eg. For a 2 port setup ``[port0]`` and ``[port 1]`` and for a 4 port setup ``[port 0]``, ``[port 1]``,
+ ``[port 2]`` and ``[port 3]``::
+
+ [port 0]
+ name=if0
+ mac=hardware
+ rx desc=2048
+ tx desc=2048
+ promiscuous=yes
+
+ a. In this example ``name =if0`` assigned the name ``if0`` to the port. Any name can be assigned to a port.
+ b. ``mac=hardware`` sets the MAC address assigned by the hardware to data from this port.
+ c. ``rx desc=2048`` sets the number of available descriptors to allocate for receive packets. This can be changed and can effect performance.
+ d. ``tx desc=2048`` sets the number of available descriptors to allocate for transmit packets. This can be changed and can effect performance.
+ e. ``promiscuous=yes`` this enables promiscuous mode for this port.
+
+3. ``[defaults]`` - Here default operations and settings can be over written.::
+
+ [defaults]
+ mempool size=8K
+ memcache size=512
+
+ a. In this example ``mempool size=8K`` the number of mbufs per task is altered. Altering this value could effect performance. See `prox options`_ for details.
+ b. ``memcache size=512`` - number of mbufs cached per core, default is 256 this is the cache_size. Altering this value could effect performance.
+
+4. ``[global]`` - Here application wide setting are supported. Things like application name, start time, duration and memory configurations can be set here.
+ In this example.::
+
+ [global]
+ start time=5
+ name=Basic Gen
+
+ a. ``start time=5`` Time is seconds after which average stats will be started.
+ b. ``name=Handle L2FWD Multiflow (2x)`` Name of the configuration.
+
+5. ``[core 0]`` - This core is designated the master core. Every Prox application must have a master core. The master mode must be assigned to
+ exactly one task, running alone on one core.::
+
+ [core 0]
+ mode=master
+
+6. ``[core 1]`` - This describes the activity on core 1. Cores can be configured by means of a set of [core #] sections, where # represents either:
+
+ a. an absolute core number: e.g. on a 10-core, dual socket system with hyper-threading,
+ cores are numbered from 0 to 39.
+
+ b. PROX allows a core to be identified by a core number, the letter 's', and a socket number.
+ However NSB PROX is hardware agnostic (physical and virtual configurations are the same) it
+ is advisable no to use physical core numbering.
+
+ Each core can be assigned with a set of tasks, each running one of the implemented packet processing modes.::
+
+ [core 1]
+ name=none
+ task=0
+ mode=l2fwd
+ dst mac=@@tester_mac1
+ rx port=if0
+ tx port=if1
+
+ a. ``name=none`` - No name assigned to the core.
+ b. ``task=0`` - Each core can run a set of tasks. Starting with ``0``. Task 1 can be defined later in this core or
+ can be defined in another ``[core 1]`` section with ``task=1`` later in configuration file. Sometimes running
+ multiple task related to the same packet on the same physical core improves performance, however sometimes it
+ is optimal to move task to a separate core. This is best decided by checking performance.
+ c. ``mode=l2fwd`` - Specifies the action carried out by this task on this core. Supported modes are: acl,
+ classify, drop, gredecap, greencap, ipv6_decap, ipv6_encap, l2fwd, lbnetwork, lbpos, lbqinq, nop,
+ police, qinqdecapv4, qinqencapv4, qos, routing, impair, lb5tuple, mirror, unmpls, tagmpls,
+ nat, decapnsh, encapnsh, gen, genl4 and lat. This code does ``l2fwd`` .. ie it does the L2FWD.
+
+ d. ``dst mac=@@tester_mac1`` - The destination mac address of the packet will be set to the MAC address of ``Port 1`` of destination device. (The Traffic Generator/Verifier)
+ e. ``rx port=if0`` - This specifies that the packets are received from ``Port 0`` called if0
+ f. ``tx port=if1`` - This specifies that the packets are transmitted to ``Port 1`` called if1
+
+ If this example we receive a packet on core on a port, carry out operation on the packet on the core and transmit it on on another port still using the same task on the same core.
+
+ On some implementation you may wish to use multiple tasks, like this.::
+
+ [core 1]
+ name=rx_task
+ task=0
+ mode=l2fwd
+ dst mac=@@tester_p0
+ rx port=if0
+ tx cores=1t1
+ drop=no
+
+ name=l2fwd_if0
+ task=1
+ mode=nop
+ rx ring=yes
+ tx port=if0
+ drop=no
+
+ In this example you can see Core 1/Task 0 called ``rx_task`` receives the packet from if0 and perform the l2fwd. However instead of sending the packet to a
+ port it sends it to a core see ``tx cores=1t1``. In this case it sends it to Core 1/Task 1.
+
+ Core 1/Task 1 called ``l2fwd_if0``, receives the packet, not from a port but from the ring. See ``rx ring=yes``. It does not perform any operation on the packet See ``mode=none``
+ and sends the packets to ``if0`` see ``tx port=if0``.
+
+ It is also possible to implement more complex operations be chaining multiple operations in sequence and using rings to pass packets from one core to another.
+
+ In thus example we show a Broadband Network Gateway (BNG) with Quality of Service (QoS). Communication from task to task is via rings.
+
+ .. image:: images/PROX_BNG_QOS.png
+ :width: 1000px
+ :alt: NSB PROX Config File for BNG_QOS
+
+*Baremetal Configuration file*
+------------------------------
+
+.. _baremetal-config-label:
+
+This is required for baremetal testing. It describes the IP address of the various ports, the Network devices drivers and MAC addresses and the network
+configuration.
+
+In this example we will describe a 2 port configuration. This file is the same for all 2 port NSB Prox tests on the same platforms/configuration.
+
+ .. image:: images/PROX_Baremetal_config.png
+ :width: 1000px
+ :alt: NSB PROX Yardstick Config
+
+Now lets describe the sections of the file.
+
+ 1. ``TrafficGen`` - This section describes the Traffic Generator node of the test configuration. The name of the node ``trafficgen_1`` must match the node name
+ in the ``Test Description File for Baremetal`` mentioned earlier. The password attribute of the test needs to be configured. All other parameters
+ can remain as default settings.
+ 2. ``interfaces`` - This defines the DPDK interfaces on the Traffic Generator.
+ 3. ``xe0`` is DPDK Port 0. ``lspci`` and `` ./dpdk-devbind.py -s`` can be used to provide the interface information. ``netmask`` and ``local_ip`` should not be changed
+ 4. ``xe1`` is DPDK Port 1. If more than 2 ports are required then ``xe1`` section needs to be repeated and modified accordingly.
+ 5. ``vnf`` - This section describes the SUT of the test configuration. The name of the node ``vnf`` must match the node name in the
+ ``Test Description File for Baremetal`` mentioned earlier. The password attribute of the test needs to be configured. All other parameters
+ can remain as default settings
+ 6. ``interfaces`` - This defines the DPDK interfaces on the SUT
+ 7. ``xe0`` - Same as 3 but for the ``SUT``.
+ 8. ``xe1`` - Same as 4 but for the ``SUT`` also.
+ 9. ``routing_table`` - All parameters should remain unchanged.
+ 10. ``nd_route_tbl`` - All parameters should remain unchanged.
+
+*Grafana Dashboard*
+-------------------
+
+The grafana dashboard visually displays the results of the tests. The steps required to produce a grafana dashboard are described here.
+
+.. _yardstick-config-label:
+
+ a. Configure ``yardstick`` to use influxDB to store test results. See file ``/etc/yardstick/yardstick.conf``.
+
+ .. image:: images/PROX_Yardstick_config.png
+ :width: 1000px
+ :alt: NSB PROX Yardstick Config
+
+ 1. Specify the dispatcher to use influxDB to store results.
+ 2. "target = .. " - Specify location of influxDB to store results.
+ "db_name = yardstick" - name of database. Do not change
+ "username = root" - username to use to store result. (Many tests are run as root)
+ "password = ... " - Please set to root user password
+
+ b. Deploy InfludDB & Grafana. See how to Deploy InfluxDB & Grafana. See `grafana deployment`_.
+ c. Generate the test data. Run the tests as follows .::
+
+ yardstick --debug task start tc_prox_<context>_<test>-ports.yaml
+
+ eg.::
+
+ yardstick --debug task start tc_prox_heat_context_l2fwd-4.yaml
+
+ d. Now build the dashboard for the test you just ran. The easiest way to do this is to copy an existing dashboard and rename the
+ test and the field names. The procedure to do so is described here. See `opnfv grafana dashboard`_.
+
+How to run NSB Prox Test on an baremetal environment
+====================================================
+
+In order to run the NSB PROX test.
+
+ 1. Install NSB on Traffic Generator node and Prox in SUT. See `NSB Installation`_
+
+ 2. To enter container::
+
+ docker exec -it yardstick /bin/bash
+
+ 3. Install baremetal configuration file (POD files)
+
+ a. Go to location of PROX tests in container ::
+
+ cd /home/opnfv/repos/yardstick/samples/vnf_samples/nsut/prox
+
+ b. Install prox-baremetal-2.yam and prox-baremetal-4.yaml for that topology
+ into this directory as per baremetal-config-label_
+
+ c. Install and configure ``yardstick.conf`` ::
+
+ cd /etc/yardstick/
+
+ Modify /etc/yardstick/yardstick.conf as per yardstick-config-label_
+
+ 4. Execute the test. Eg.::
+
+ yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml
+
+How to run NSB Prox Test on an Openstack environment
+====================================================
+
+In order to run the NSB PROX test.
+
+ 1. Install NSB on Openstack deployment node. See `NSB Installation`_
+
+ 2. To enter container::
+
+ docker exec -it yardstick /bin/bash
+
+ 3. Install configuration file
+
+ a. Goto location of PROX tests in container ::
+
+ cd /home/opnfv/repos/yardstick/samples/vnf_samples/nsut/prox
+
+ b. Install and configure ``yardstick.conf`` ::
+
+ cd /etc/yardstick/
+
+ Modify /etc/yardstick/yardstick.conf as per yardstick-config-label_
+
+
+ 4. Execute the test. Eg.::
+
+ yardstick --debug task start ./tc_prox_heat_context_l2fwd-4.yaml
+
+Frequently Asked Questions
+==========================
+
+Here is a list of frequently asked questions.
+
+*NSB Prox does not work on Baremetal, How do I resolve this?*
+-------------------------------------------------------------
+
+If PROX NSB does not work on baremetal, problem is either in network configuration or test file.
+
+*Solution*
+
+1. Verify network configuration. Execute existing baremetal test.::
+
+ yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml
+
+ If test does not work then error in network configuration.
+
+ a. Check DPDK on Traffic Generator and SUT via:- ::
+
+ /root/dpdk-17./usertools/dpdk-devbind.py
+
+ b. Verify MAC addresses match ``prox-baremetal-<ports>.yaml`` via ``ifconfig`` and ``dpdk-devbind``
+
+ c. Check your eth port is what you expect. You would not be the first person to think that
+ the port your cable is plugged into is ethX when in fact it is ethY. Use
+ ethtool to visually confirm that the eth is where you expect.::
+
+ ethtool -p ethX
+
+ A led should start blinking on port. (On both System-Under-Test and Traffic Generator)
+
+ d. Check cable.
+
+ Install Linux kernel network driver and ensure your ports are
+ ``bound`` to the driver via ``dpdk-devbind``. Bring up port on both
+ SUT and Traffic Generator and check connection.
+
+ i) On SUT and on Traffic Generator::
+
+ ifconfig ethX/enoX up
+
+ ii) Check link
+
+ ethtool ethX/enoX
+
+ See ``Link detected`` if ``yes`` .... Cable is good. If ``no`` you have an issue with your cable/port.
+
+2. If existing baremetal works then issue is with your test. Check the traffic generator gen_<test>-<ports>.cfg to ensure
+ it is producing a valid packet.
+
+*How do I debug NSB Prox on Baremetal?*
+---------------------------------------
+
+*Solution*
+
+1. Execute the test as follows::
+
+ yardstick --debug task start ./tc_prox_baremetal_l2fwd-4.yaml
+
+2. Login to Traffic Generator as ``root``.::
+
+ cd
+ /opt/nsb_bin/prox -f /tmp/gen_<test>-<ports>.cfg
+
+3. Login to SUT as ``root``.::
+
+ cd
+ /opt/nsb_bin/prox -f /tmp/handle_<test>-<ports>.cfg
+
+4. Now let's examine the Generator Output. In this case the output of gen_l2fwd-4.cfg.
+
+ .. image:: images/PROX_Gen_GUI.png
+ :width: 1000px
+ :alt: NSB PROX Traffic Generator GUI
+
+ Now let's examine the output
+
+ 1. Indicates the amount of data successfully transmitted on Port 0
+ 2. Indicates the amount of data successfully received on port 1
+ 3. Indicates the amount of data successfully handled for port 1
+
+ It appears what is transmitted is received.
+
+ .. Caution::
+ The number of packets MAY not exactly match because the ports are read in sequence.
+
+ .. Caution::
+ What is transmitted on PORT X may not always be received on same port. Please check the Test scenario.
+
+5. Now lets examine the SUT Output
+
+ .. image:: images/PROX_SUT_GUI.png
+ :width: 1400px
+ :alt: NSB PROX SUT GUI
+
+ Now lets examine the output
+
+ 1. What is received on 0 is transmitted on 1, received on 1 transmitted on 0,
+ received on 2 transmitted on 3 and received on 3 transmitted on 2.
+ 2. No packets are Failed.
+ 3. No Packets are discarded.
+
+ We can also dump the packets being received or transmitted via the following commands. ::
+
+ dump Arguments: <core id> <task id> <nb packets>
+ Create a hex dump of <nb_packets> from <task_id> on <core_id> showing how
+ packets have changed between RX and TX.
+ dump_rx Arguments: <core id> <task id> <nb packets>
+ Create a hex dump of <nb_packets> from <task_id> on <core_id> at RX
+ dump_tx Arguments: <core id> <task id> <nb packets>
+ Create a hex dump of <nb_packets> from <task_id> on <core_id> at TX
+
+ eg.::
+
+ dump_tx 1 0 1
+
+*NSB Prox works on Baremetal but not in Openstack. How do I resolve this?*
+--------------------------------------------------------------------------
+
+NSB Prox on Baremetal is a lot more forgiving than NSB Prox on Openstack. A badly
+formed packed may still work with PROX on Baremetal. However on
+Openstack the packet must be correct and all fields of the header correct.
+Eg A packet with an invalid Protocol ID would still work in Baremetal
+but this packet would be rejected by openstack.
+
+*Solution*
+
+ 1. Check the validity of the packet.
+ 2. Use a known good packet in your test
+ 3. If using ``Random`` fields in the traffic generator, disable them and retry.
+
+
+*How do I debug NSB Prox on Openstack?*
+---------------------------------------
+
+*Solution*
+
+1. Execute the test as follows::
+
+ yardstick --debug task start --keep-deploy ./tc_prox_heat_context_l2fwd-4.yaml
+
+2. Access docker image if required via::
+
+ docker exec -it yardstick /bin/bash
+
+3. Install openstack credentials.
+
+ Depending on your openstack deployment, the location of these credentials may vary.
+ On this platform I do this via::
+
+ scp root@10.237.222.55:/etc/kolla/admin-openrc.sh .
+ source ./admin-openrc.sh
+
+4. List Stack details
+
+ a. Get the name of the Stack.
+
+ .. image:: images/PROX_Openstack_stack_list.png
+ :width: 1000px
+ :alt: NSB PROX openstack stack list
+
+ b. Get the Floating IP of the Traffic Generator & SUT
+
+ This generates a lot of information. Please not the floating IP of the VNF and
+ the Traffic Generator.
+
+ .. image:: images/PROX_Openstack_stack_show_a.png
+ :width: 1000px
+ :alt: NSB PROX openstack stack show (Top)
+
+ From here you can see the floating IP Address of the SUT / VNF
+
+ .. image:: images/PROX_Openstack_stack_show_b.png
+ :width: 1000px
+ :alt: NSB PROX openstack stack show (Top)
+
+ From here you can see the floating IP Address of the Traffic Generator
+
+ c. Get ssh identity file
+
+ In the docker container locate the identity file.::
+
+ cd /home/opnfv/repos/yardstick/yardstick/resources/files
+ ls -lt
+
+5. Login to SUT as ``Ubuntu``.::
+
+ ssh -i ./yardstick_key-01029d1d ubuntu@172.16.2.158
+
+ Change to root::
+
+ sudo su
+
+ Now continue as baremetal.
+
+6. Login to SUT as ``Ubuntu``.::
+
+ ssh -i ./yardstick_key-01029d1d ubuntu@172.16.2.156
+
+ Change to root::
+
+ sudo su
+
+ Now continue as baremetal.
+
+*How do I resolve "Quota exceeded for resources"*
+-------------------------------------------------
+
+*Solution*
+
+This usually occurs due to 2 reasons when executing an openstack test.
+
+1. One or more stacks already exists and are consuming all resources. To resolve ::
+
+ openstack stack list
+
+ Response::
+
+ +--------------------------------------+--------------------+-----------------+----------------------+--------------+
+ | ID | Stack Name | Stack Status | Creation Time | Updated Time |
+ +--------------------------------------+--------------------+-----------------+----------------------+--------------+
+ | acb559d7-f575-4266-a2d4-67290b556f15 | yardstick-e05ba5a4 | CREATE_COMPLETE | 2017-12-06T15:00:05Z | None |
+ | 7edf21ce-8824-4c86-8edb-f7e23801a01b | yardstick-08bda9e3 | CREATE_COMPLETE | 2017-12-06T14:56:43Z | None |
+ +--------------------------------------+--------------------+-----------------+----------------------+--------------+
+
+ In this case 2 stacks already exist.
+
+ To remove stack::
+
+ openstack stack delete yardstick-08bda9e3
+ Are you sure you want to delete this stack(s) [y/N]? y
+
+2. The openstack configuration quotas are too small.
+
+ The solution is to increase the quota. Use below to query existing quotas::
+
+ openstack quota show
+
+ And to set quota::
+
+ openstack quota set <resource>
+
+*Openstack Cli fails or hangs. How do I resolve this?*
+------------------------------------------------------
+
+*Solution*
+
+If it fails due to ::
+
+ Missing value auth-url required for auth plugin password
+
+Check your shell environment for Openstack variables. One of them should contain the authentication URL ::
+
+
+ OS_AUTH_URL=``https://192.168.72.41:5000/v3``
+
+Or similar. Ensure that openstack configurations are exported. ::
+
+ cat /etc/kolla/admin-openrc.sh
+
+Result ::
+
+ export OS_PROJECT_DOMAIN_NAME=default
+ export OS_USER_DOMAIN_NAME=default
+ export OS_PROJECT_NAME=admin
+ export OS_TENANT_NAME=admin
+ export OS_USERNAME=admin
+ export OS_PASSWORD=BwwSEZqmUJA676klr9wa052PFjNkz99tOccS9sTc
+ export OS_AUTH_URL=http://193.168.72.41:35357/v3
+ export OS_INTERFACE=internal
+ export OS_IDENTITY_API_VERSION=3
+ export EXTERNAL_NETWORK=yardstick-public
+
+and visible.
+
+If the Openstack Cli appears to hang, then verify the proxys and no_proxy are set correctly.
+They should be similar to ::
+
+ FTP_PROXY="http://proxy.ir.intel.com:911/"
+ HTTPS_PROXY="http://proxy.ir.intel.com:911/"
+ HTTP_PROXY="http://proxy.ir.intel.com:911/"
+ NO_PROXY="localhost,127.0.0.1,10.237.222.55,10.237.223.80,10.237.222.134,.ir.intel.com"
+ ftp_proxy="http://proxy.ir.intel.com:911/"
+ http_proxy="http://proxy.ir.intel.com:911/"
+ https_proxy="http://proxy.ir.intel.com:911/"
+ no_proxy="localhost,127.0.0.1,10.237.222.55,10.237.223.80,10.237.222.134,.ir.intel.com"
+
+Where
+
+ 1) 10.237.222.55 = IP Address of deployment node
+ 2) 10.237.223.80 = IP Address of Controller node
+ 3) 10.237.222.134 = IP Address of Compute Node
+ 4) ir.intel.com = local no proxy
+
+
+
+
+
+
diff --git a/docs/testing/developer/devguide/images/PROX_BNG_QOS.png b/docs/testing/developer/devguide/images/PROX_BNG_QOS.png
new file mode 100644
index 000000000..3c720945c
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_BNG_QOS.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Baremetal_config.png b/docs/testing/developer/devguide/images/PROX_Baremetal_config.png
new file mode 100644
index 000000000..5cd914035
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Baremetal_config.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png b/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png
new file mode 100644
index 000000000..07731cabc
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Gen_2port_cfg.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Gen_GUI.png b/docs/testing/developer/devguide/images/PROX_Gen_GUI.png
new file mode 100644
index 000000000..e96aea3de
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Gen_GUI.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png b/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png
new file mode 100644
index 000000000..6505bedfd
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Handle_2port_cfg.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png b/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png
new file mode 100644
index 000000000..6e69dd6e3
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Hardware_Arch.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png
new file mode 100644
index 000000000..f67d10e6d
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_list.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png
new file mode 100644
index 000000000..00e7620e7
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_a.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png
new file mode 100644
index 000000000..bbe9b8631
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Openstack_stack_show_b.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_SUT_GUI.png b/docs/testing/developer/devguide/images/PROX_SUT_GUI.png
new file mode 100644
index 000000000..204083d1d
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_SUT_GUI.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Software_Arch.png b/docs/testing/developer/devguide/images/PROX_Software_Arch.png
new file mode 100644
index 000000000..c31f1e24a
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Software_Arch.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png
new file mode 100644
index 000000000..32530eb15
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png
new file mode 100644
index 000000000..754973b4e
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Traffic_profile.png b/docs/testing/developer/devguide/images/PROX_Traffic_profile.png
new file mode 100644
index 000000000..660bca342
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Traffic_profile.png
Binary files differ
diff --git a/docs/testing/developer/devguide/images/PROX_Yardstick_config.png b/docs/testing/developer/devguide/images/PROX_Yardstick_config.png
new file mode 100644
index 000000000..8d346b03a
--- /dev/null
+++ b/docs/testing/developer/devguide/images/PROX_Yardstick_config.png
Binary files differ
diff --git a/docs/testing/user/userguide/04-installation.rst b/docs/testing/user/userguide/04-installation.rst
index caebecc09..5bb64e3bb 100644
--- a/docs/testing/user/userguide/04-installation.rst
+++ b/docs/testing/user/userguide/04-installation.rst
@@ -464,7 +464,7 @@ Thirdly, create and configure Grafana container::
yardstick env grafana
-Then you can run a test case and visit http://host_ip:3000
+Then you can run a test case and visit http://host_ip:1948
(``admin``/``admin``) to see the results.
.. note:: Executing ``yardstick env`` command to deploy InfluxDB and Grafana
@@ -502,9 +502,9 @@ Configure influxDB::
Run Grafana::
- sudo -EH docker run -d --name grafana -p 3000:3000 grafana/grafana
+ sudo -EH docker run -d --name grafana -p 1948:3000 grafana/grafana
-Log on http://{YOUR_IP_HERE}:3000 using ``admin``/``admin`` and configure
+Log on http://{YOUR_IP_HERE}:1948 using ``admin``/``admin`` and configure
database resource to be ``{YOUR_IP_HERE}:8086``.
.. image:: images/Grafana_config.png
diff --git a/docs/testing/user/userguide/15-list-of-tcs.rst b/docs/testing/user/userguide/15-list-of-tcs.rst
index b62bf6390..47526cdda 100644
--- a/docs/testing/user/userguide/15-list-of-tcs.rst
+++ b/docs/testing/user/userguide/15-list-of-tcs.rst
@@ -80,6 +80,9 @@ H A
opnfv_yardstick_tc052.rst
opnfv_yardstick_tc053.rst
opnfv_yardstick_tc054.rst
+ opnfv_yardstick_tc056.rst
+ opnfv_yardstick_tc057.rst
+ opnfv_yardstick_tc058.rst
IPv6
----
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
index 01aa99ac2..e6e06df57 100644
--- a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
+++ b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
@@ -98,7 +98,7 @@ Yardstick Test Case Description TC056
| | |
+--------------+--------------------------------------------------------------+
|configuration | This test case needs two configuration files: |
-| | 1) test case file:opnfv_yardstick_tc056.yaml |
+| | 1) test case file: opnfv_yardstick_tc056.yaml |
| | -Attackers: see above "attackers" description |
| | -waiting_time: which is the time (seconds) from the process |
| | being killed to stoping monitors the monitors |
diff --git a/etc/infra/infra_deploy.yaml.sample b/etc/infra/infra_deploy.yaml.sample
index fb162d35b..bf07a01bc 100644
--- a/etc/infra/infra_deploy.yaml.sample
+++ b/etc/infra/infra_deploy.yaml.sample
@@ -6,13 +6,13 @@ nodes:
ip: 192.168.1.10
netmask: 255.255.255.0
user: ubuntu
- pasword: password
+ password: password
image: /tmp/image1.qcow
disk: 50000
ram: 8192
vcpus: 4
- - name Controller_Compute VM
+ - name: Controller_Compute VM
openstack_node: controller_compute
hostname: controller_compute
interfaces:
@@ -23,7 +23,7 @@ nodes:
ip: 192.20.1.20
netmask: 255.255.255.0
user: ubuntu
- pasword: password
+ password: password
image: /tmp/image_2.qcow
disk: 40000
ram: 32768
@@ -31,4 +31,5 @@ nodes:
networks:
- name: management
- host_ip: 192.168.1.1 # not mandatory
+ host_ip: 192.168.1.1
+ netmask: 255.255.255.0
diff --git a/nsb_setup.sh b/nsb_setup.sh
index 4a8e4db93..50fc017d1 100755
--- a/nsb_setup.sh
+++ b/nsb_setup.sh
@@ -63,7 +63,7 @@ for i in "${pkg[@]}"; do
fi
done
-pip install ansible==2.3.2 shade==1.17.0 docker-py==1.10.6
+pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6
ANSIBLE_SCRIPTS="ansible"
diff --git a/requirements.txt b/requirements.txt
index 88c0e659a..aacafdf93 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -49,7 +49,6 @@ pyroute2==0.4.21 # dual license GPLv2+ and Apache v2; OSI Approved GNU G
pyrsistent==0.14.1 # LICENSE.mit; OSI Approved MIT License
python-cinderclient==3.1.0 # OSI Approved Apache Software License
python-glanceclient==2.8.0 # OSI Approved Apache Software License
-python-heatclient==1.11.1 # OSI Approved Apache Software License
python-keystoneclient==3.13.0 # OSI Approved Apache Software License
python-neutronclient==6.5.0 # OSI Approved Apache Software License
python-novaclient==9.1.1 # OSI Approved Apache Software License
diff --git a/samples/storage_bottlenecks.yaml b/samples/storage_bottlenecks.yaml
new file mode 100644
index 000000000..1aa0d7e35
--- /dev/null
+++ b/samples/storage_bottlenecks.yaml
@@ -0,0 +1,77 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# Sample benchmark task config file
+# measure storage performance using fio
+#
+# For this sample just like running the command below on the test vm and
+# getting benchmark info back to the yardstick.
+#
+# sudo fio -filename=/home/ubuntu/data.raw -bs=4k -ipdepth=1 -rw=rw \
+# -ramp_time=10 -runtime=60 -name=yardstick-fio -ioengine=libaio \
+# -direct=1 -group_reporting -numjobs=1 -time_based \
+# --output-format=json
+
+schema: "yardstick:task:0.1"
+run_in_parallel: true
+
+{% set directory = directory or '/FIO_Test' %}
+{% set stack_num = stack_num or 1 %}
+{% set volume_num = volume_num or "1" %}
+{% set rw = rw or "randrw" %}
+{% set bs = bs or "4k" %}
+{% set size = size or "30g" %}
+{% set rwmixwrite = rwmixwrite or "50" %}
+{% set numjobs = numjobs or "1" %}
+{% set direct = direct or "1" %}
+{% set volume_size = volume_size or 50 %}
+
+scenarios:
+{% for num in range(stack_num) %}
+-
+ type: Fio
+ options:
+ filename: {{ directory }}/test
+ directory: {{ directory }}
+ bs: {{bs}}
+ rw: {{rw}}
+ size: {{size}}
+ rwmixwrite: {{rwmixwrite}}
+ numjobs: {{numjobs}}
+ direct: {{direct}}
+ ramp_time: 10
+
+ host: demo.storage_bottlenecks-{{num}}-{{volume_num}}
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+{% endfor %}
+
+contexts:
+{% for context_num in range(stack_num) %}
+-
+ name: storage_bottlenecks-{{context_num}}-{{volume_num}}
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ servers:
+ demo:
+ volume:
+ size: {{volume_size}}
+ volume_mountpoint: "/dev/vdb"
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: "10.0.1.0/24"
+ port_security_enabled: true
+{% endfor %} \ No newline at end of file
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
index e7fad98bf..192f2f89a 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
@@ -49,7 +49,8 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 70 00 00 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d 00 00 00 01 00 00 00 02 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 2]
name=gen 1
@@ -58,19 +59,20 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 70 00 00 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d 00 00 00 01 00 00 00 03 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 3]
name=rec 0
task=0
mode=lat
rx port=p0
-lat pos=42
+lat pos=38
[core 4]
name=rec 0
task=0
mode=lat
rx port=p1
-lat pos=42
+lat pos=38
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
index 5b79185a7..0db21b681 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
@@ -61,7 +61,8 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 3c fd fe 9f a3 a0 08 a0 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 2]
name=gen 1
@@ -70,7 +71,8 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 3c fd fe 9f a5 50 08 a0 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 3]
name=gen 2
@@ -79,7 +81,8 @@ mode=gen
tx port=p2
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac2} 3c fd fe 9f a5 50 08 a0 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac2} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 4]
name=gen 3
@@ -88,28 +91,33 @@ mode=gen
tx port=p3
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac3} 3c fd fe 9f a5 50 08 a0 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac3} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+lat pos=38
[core 5]
name=rec 0
task=0
mode=lat
rx port=p0
+lat pos=38
[core 6]
name=rec 1
task=0
mode=lat
rx port=p1
+lat pos=38
[core 7]
name=rec 2
task=0
mode=lat
rx port=p2
+lat pos=38
[core 8]
name=rec 3
task=0
mode=lat
rx port=p3
+lat pos=38 \ No newline at end of file
diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml
index 10902a7b8..f59146c0b 100644
--- a/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml
+++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
constituent-vnfd:
- member-vnf-index: '1'
vnfd-id-ref: tg__0
- VNF model: ../../vnf_descriptors/tg_prox_tpl-1.yaml
+ VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
- member-vnf-index: '2'
vnfd-id-ref: vnf__0
- VNF model: ../../vnf_descriptors/prox_vnf-1.yaml
+ VNF model: ../../vnf_descriptors/prox_vnf.yaml
vld:
- id: uplink_0
name: tg__0 to vnf__0 link 1
diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml
index 11eed52fc..63d0acc91 100644
--- a/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml
+++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
constituent-vnfd:
- member-vnf-index: '1'
vnfd-id-ref: tg__0
- VNF model: ../../vnf_descriptors/tg_prox_tpl-2.yaml
+ VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
- member-vnf-index: '2'
vnfd-id-ref: vnf__0
- VNF model: ../../vnf_descriptors/prox_vnf-2.yaml
+ VNF model: ../../vnf_descriptors/prox_vnf.yaml
vld:
- id: uplink_0
name: tg__0 to vnf__0 link 1
diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
index eda239e3b..b4b003680 100644
--- a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
+++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
constituent-vnfd:
- member-vnf-index: '1'
vnfd-id-ref: tg__0
- VNF model: ../../vnf_descriptors/tg_prox_tpl-4.yaml
+ VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
- member-vnf-index: '2'
vnfd-id-ref: vnf__0
- VNF model: ../../vnf_descriptors/prox_vnf-4.yaml
+ VNF model: ../../vnf_descriptors/prox_vnf.yaml
vld:
- id: uplink_0
name: tg__0 to vnf__0 link 1
diff --git a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_lw_aftr-4.yaml b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_lw_aftr-4.yaml
index 37af37dcc..c190910c9 100644
--- a/samples/vnf_samples/nsut/prox/tc_prox_baremetal_lw_aftr-4.yaml
+++ b/samples/vnf_samples/nsut/prox/tc_prox_baremetal_lw_aftr-4.yaml
@@ -50,5 +50,5 @@ context:
type: Node
name: yardstick
nfvi_type: baremetal
- file: /etc/yardstick/nodes/prox-baremetal-4.yml
+ file: prox-baremetal-4.yaml
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
index 2799a7ee9..2e096a126 100644
--- a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
+++ b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
@@ -13,6 +13,10 @@
# limitations under the License.
---
+{% set provider = provider or none %}
+{% set physical_networks = physical_networks or ['physnet1', 'physnet2'] %}
+{% set segmentation_id = segmentation_id or none %}
+
schema: yardstick:task:0.1
scenarios:
- type: NSPerf
@@ -68,10 +72,24 @@ context:
xe0:
cidr: '10.0.2.0/24'
gateway_ip: 'null'
+ {% if provider %}
+ provider: {{ provider }}
+ physical_network: {{ physical_networks[0] }}
+ {% if segmentation_id %}
+ segmentation_id: {{ segmentation_id }}
+ {% endif %}
+ {% endif %}
port_security_enabled: False
enable_dhcp: 'false'
xe1:
cidr: '10.0.3.0/24'
gateway_ip: 'null'
+ {% if provider %}
+ provider: {{ provider }}
+ physical_network: {{ physical_networks[1] }}
+ {% if segmentation_id %}
+ segmentation_id: {{ segmentation_id }}
+ {% endif %}
+ {% endif %}
port_security_enabled: False
enable_dhcp: 'false'
diff --git a/samples/vnf_samples/traffic_profiles/prox_binsearch.yaml b/samples/vnf_samples/traffic_profiles/prox_binsearch.yaml
index 805250ee3..e1a4f59de 100644
--- a/samples/vnf_samples/traffic_profiles/prox_binsearch.yaml
+++ b/samples/vnf_samples/traffic_profiles/prox_binsearch.yaml
@@ -21,9 +21,9 @@ traffic_profile:
traffic_type: ProxBinSearchProfile
tolerated_loss: 0.001
test_precision: 0.1
-# packet_sizes: [64, 128, 256, 512, 1024, 1280, 1518]
- packet_sizes: [64]
- duration: 10
+ packet_sizes: [64, 128, 256, 512, 1024, 1280, 1518]
+ # packet_sizes: [64]
+ duration: 30
lower_bound: 0.0
upper_bound: 100.0
diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml
deleted file mode 100644
index 13c4e9db7..000000000
--- a/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
- vnfd:
- - id: ProxApproxVnf
- name: ProxVnf
- short-name: ProxVnf
- description: PROX approximation using DPDK
- mgmt-interface:
- vdu-id: prox-baremetal
- {% if user is defined %}
- user: '{{user}}' # Value filled by vnfdgen
- {% endif %}
- {% if password is defined %}
- password: '{{password}}' # Value filled by vnfdgen
- {% endif %}
- {% if ip is defined %}
- ip: '{{ip}}' # Value filled by vnfdgen
- {% endif %}
- {% if key_filename is defined %}
- key_filename: '{{key_filename}}' # Value filled by vnfdgen
- {% endif %}
- vdu:
- - id: proxvnf-baremetal
- name: proxvnf-baremetal
- description: PROX approximation using DPDK
- vm-flavor:
- vcpu-count: '4'
- memory-mb: '4096'
- routing_table: {{ routing_table }}
- nd_route_tbl: {{ nd_route_tbl }}
- benchmark:
- kpi:
- - packets_in
- - packets_fwd
- - packets_dropped
diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml
deleted file mode 100644
index 13c4e9db7..000000000
--- a/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
- vnfd:
- - id: ProxApproxVnf
- name: ProxVnf
- short-name: ProxVnf
- description: PROX approximation using DPDK
- mgmt-interface:
- vdu-id: prox-baremetal
- {% if user is defined %}
- user: '{{user}}' # Value filled by vnfdgen
- {% endif %}
- {% if password is defined %}
- password: '{{password}}' # Value filled by vnfdgen
- {% endif %}
- {% if ip is defined %}
- ip: '{{ip}}' # Value filled by vnfdgen
- {% endif %}
- {% if key_filename is defined %}
- key_filename: '{{key_filename}}' # Value filled by vnfdgen
- {% endif %}
- vdu:
- - id: proxvnf-baremetal
- name: proxvnf-baremetal
- description: PROX approximation using DPDK
- vm-flavor:
- vcpu-count: '4'
- memory-mb: '4096'
- routing_table: {{ routing_table }}
- nd_route_tbl: {{ nd_route_tbl }}
- benchmark:
- kpi:
- - packets_in
- - packets_fwd
- - packets_dropped
diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf.yaml
index 13c4e9db7..13c4e9db7 100644
--- a/samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml
+++ b/samples/vnf_samples/vnf_descriptors/prox_vnf.yaml
diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml
deleted file mode 100644
index 730143972..000000000
--- a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
- vnfd:
- - id: ProxTrafficGen # nsb class mapping
- name: proxverifier
- short-name: proxverifier
- description: prox stateless traffic verifier
- mgmt-interface:
- vdu-id: proxgen-baremetal
- {% if user is defined %}
- user: '{{user}}' # Value filled by vnfdgen
- {% endif %}
- {% if password is defined %}
- password: '{{password}}' # Value filled by vnfdgen
- {% endif %}
- {% if ip is defined %}
- ip: '{{ip}}' # Value filled by vnfdgen
- {% endif %}
- {% if key_filename is defined %}
- key_filename: '{{key_filename}}' # Value filled by vnfdgen
- {% endif %}
- vdu:
- - id: proxgen-baremetal
- name: proxgen-baremetal
- description: prox stateless traffic verifier
- benchmark:
- kpi:
- - rx_throughput_fps
- - tx_throughput_fps
- - tx_throughput_mbps
- - rx_throughput_mbps
- - in_packets
- - out_packets
diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml
deleted file mode 100644
index 20bd12ca2..000000000
--- a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
- vnfd:
- - id: ProxTrafficGen # nsb class mapping
- name: proxverifier
- short-name: proxverifier
- description: prox stateless traffic verifier
- mgmt-interface:
- vdu-id: proxgen-baremetal
- {% if user is defined %}
- user: '{{user}}' # Value filled by vnfdgen
- {% endif %}
- {% if password is defined %}
- password: '{{password}}' # Value filled by vnfdgen
- {% endif %}
- {% if ip is defined %}
- ip: '{{ip}}' # Value filled by vnfdgen
- {% endif %}
- {% if key_filename is defined %}
- key_filename: '{{key_filename}}' # Value filled by vnfdgen
- {% endif %}
- vdu:
- - id: proxgen-baremetal
- name: proxgen-baremetal
- description: prox stateless traffic verifier
-
- benchmark:
- kpi:
- - rx_throughput_fps
- - tx_throughput_fps
- - tx_throughput_mbps
- - rx_throughput_mbps
- - in_packets
- - out_packets
diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml
index 20bd12ca2..20bd12ca2 100644
--- a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml
+++ b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml
diff --git a/setup.py b/setup.py
index 7f6571d61..881ef9272 100755
--- a/setup.py
+++ b/setup.py
@@ -53,6 +53,7 @@ setup(
'yardstick=yardstick.main:main',
'yardstick-plot=yardstick.plot.plotter:main [plot]'
],
+ 'yardstick.scenario': []
},
scripts=[
'tools/yardstick-img-modify',
diff --git a/test-requirements.txt b/test-requirements.txt
index f933df29a..ee9815c45 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -12,6 +12,9 @@ testrepository==0.0.20 # OSI Approved BSD License; OSI Approved Apache So
testtools==2.3.0 # OSI Approved MIT License
unittest2==1.1.0 # OSI Approved BSD License
+# NOTE(ralonsoh): to be removed, only for coverage support
+python-heatclient==1.8.1 # OSI Approved Apache Software License
+
# Yardstick F release <-> OpenStack Pike release
openstack_requirements==1.1.0 # OSI Approved Apache Software License
-e git+https://github.com/openstack/requirements.git@stable/pike#egg=os_requirements
diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh
index 262b74c09..caef8acce 100755
--- a/tests/ci/prepare_env.sh
+++ b/tests/ci/prepare_env.sh
@@ -85,37 +85,36 @@ verify_connectivity() {
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
if [ "$INSTALLER_TYPE" == "fuel" ]; then
- #ip_fuel="10.20.0.2"
+
+ # check the connection
verify_connectivity $INSTALLER_IP
+ pod_yaml="$YARDSTICK_REPO_DIR/etc/yardstick/nodes/fuel_baremetal/pod.yaml"
+
+ # update "ip" according to the CI env
ssh -l ubuntu ${INSTALLER_IP} -i ${SSH_KEY} ${ssh_options} \
"sudo salt -C 'ctl* or cmp*' grains.get fqdn_ip4 --out yaml">node_info
- # update node ip info according to the CI env
controller_ips=($(cat node_info|awk '/ctl/{getline; print $2}'))
compute_ips=($(cat node_info|awk '/cmp/{getline; print $2}'))
- pod_yaml="./etc/yardstick/nodes/fuel_baremetal/pod.yaml"
- node_line_num=($(grep -n node[1-5] $pod_yaml | awk -F: '{print $1}'))
- node_ID=0;
-
- # update 'user' and 'key_filename' according to CI env
- sed -i "s|node_username|${USER_NAME}|;s|node_keyfile|${SSH_KEY}|" $pod_yaml;
-
if [[ ${controller_ips[0]} ]]; then
- sed -i "${node_line_num[0]}s/node1/node$((++node_ID))/;s/ip1/${controller_ips[0]}/" $pod_yaml;
+ sed -i "s|ip1|${controller_ips[0]}|" $pod_yaml;
fi
if [[ ${controller_ips[1]} ]]; then
- sed -i "${node_line_num[1]}s/node2/node$((++node_ID))/;s/ip2/${controller_ips[1]}/" $pod_yaml;
+ sed -i "s|ip2|${controller_ips[1]}|" $pod_yaml;
fi
if [[ ${controller_ips[2]} ]]; then
- sed -i "${node_line_num[2]}s/node3/node$((++node_ID))/;s/ip3/${controller_ips[2]}/" $pod_yaml;
+ sed -i "s|ip3|${controller_ips[2]}|" $pod_yaml;
fi
if [[ ${compute_ips[0]} ]]; then
- sed -i "${node_line_num[3]}s/node4/node$((++node_ID))/;s/ip4/${compute_ips[0]}/" $pod_yaml;
+ sed -i "s|ip4|${compute_ips[0]}|" $pod_yaml;
fi
if [[ ${compute_ips[1]} ]]; then
- sed -i "${node_line_num[4]}s/node5/node$((++node_ID))/;s/ip5/${compute_ips[1]}/" $pod_yaml;
+ sed -i "s|ip5|${compute_ips[1]}|" $pod_yaml;
fi
+ # update 'user' and 'key_filename' according to the CI env
+ sed -i "s|node_username|${USER_NAME}|;s|node_keyfile|${SSH_KEY}|" $pod_yaml;
+
fi
diff --git a/tests/unit/benchmark/core/test_plugin.py b/tests/unit/benchmark/core/test_plugin.py
deleted file mode 100644
index f9c076159..000000000
--- a/tests/unit/benchmark/core/test_plugin.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python
-
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Unittest for yardstick.benchmark.core.plugin
-from __future__ import absolute_import
-import os
-from os.path import dirname as dirname
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-import unittest
-
-from yardstick.benchmark.core import plugin
-
-
-class Arg(object):
-
- def __init__(self):
- # self.input_file = ('plugin/sample_config.yaml',)
- self.input_file = [
- os.path.join(os.path.abspath(
- dirname(dirname(dirname(dirname(dirname(__file__)))))),
- 'plugin/sample_config.yaml')]
-
-
-@mock.patch('yardstick.benchmark.core.plugin.ssh')
-class pluginTestCase(unittest.TestCase):
-
- def setUp(self):
- self.result = {}
-
- def test_install(self, mock_ssh):
- p = plugin.Plugin()
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- input_file = Arg()
- p.install(input_file)
- expected_result = {}
- self.assertEqual(self.result, expected_result)
-
- def test_remove(self, mock_ssh):
- p = plugin.Plugin()
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- input_file = Arg()
- p.remove(input_file)
- expected_result = {}
- self.assertEqual(self.result, expected_result)
-
- def test_install_setup_run(self, mock_ssh):
- p = plugin.Plugin()
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- plugins = {
- "name": "sample"
- }
- deployment = {
- "ip": "10.1.0.50",
- "user": "root",
- "password": "root"
- }
- plugin_name = plugins.get("name")
- p._install_setup(plugin_name, deployment)
- self.assertIsNotNone(p.client)
-
- p._run(plugin_name)
- expected_result = {}
- self.assertEqual(self.result, expected_result)
-
- def test_remove_setup_run(self, mock_ssh):
- p = plugin.Plugin()
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- plugins = {
- "name": "sample"
- }
- deployment = {
- "ip": "10.1.0.50",
- "user": "root",
- "password": "root"
- }
- plugin_name = plugins.get("name")
- p._remove_setup(plugin_name, deployment)
- self.assertIsNotNone(p.client)
-
- p._run(plugin_name)
- expected_result = {}
- self.assertEqual(self.result, expected_result)
-
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py b/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
deleted file mode 100644
index 72dbcd7cd..000000000
--- a/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from yardstick.benchmark.scenarios.lib.create_floating_ip import CreateFloatingIp
-
-
-class CreateFloatingIpTestCase(unittest.TestCase):
-
- @mock.patch('yardstick.common.openstack_utils.create_floating_ip')
- @mock.patch('yardstick.common.openstack_utils.get_network_id')
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- def test_create_floating_ip(self, mock_create_floating_ip, mock_get_network_id, mock_get_neutron_client):
- options = {}
- args = {"options": options}
- obj = CreateFloatingIp(args, {})
- obj.run({})
- self.assertTrue(mock_create_floating_ip.called)
- self.assertTrue(mock_get_network_id.called)
- self.assertTrue(mock_get_neutron_client.called)
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_network.py b/tests/unit/benchmark/scenarios/lib/test_delete_network.py
deleted file mode 100644
index 9ccaa8232..000000000
--- a/tests/unit/benchmark/scenarios/lib/test_delete_network.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-import paramiko
-
-from yardstick.benchmark.scenarios.lib.delete_network import DeleteNetwork
-
-
-class DeleteNetworkTestCase(unittest.TestCase):
-
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- @mock.patch('yardstick.common.openstack_utils.delete_neutron_net')
- def test_delete_network(self, mock_get_neutron_client, mock_delete_neutron_net):
- options = {
- 'network_id': '123-123-123'
- }
- args = {"options": options}
- obj = DeleteNetwork(args, {})
- obj.run({})
- self.assertTrue(mock_get_neutron_client.called)
- self.assertTrue(mock_delete_neutron_net.called)
-
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
deleted file mode 100644
index fbe3ed804..000000000
--- a/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2017 Nokia
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Unittest for yardstick.benchmark.scenarios.networking.vsperf.VsperfDPDK
-
-from __future__ import absolute_import
-try:
- from unittest import mock
-except ImportError:
- import mock
-import unittest
-
-from yardstick.benchmark.scenarios.networking import vsperf_dpdk
-
-
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.subprocess')
-class VsperfDPDKTestCase(unittest.TestCase):
-
- def setUp(self):
- self.ctx = {
- "host": {
- "ip": "10.229.47.137",
- "user": "ubuntu",
- "password": "ubuntu",
- },
- }
- self.args = {
- 'task_id': "1234-5678",
- 'options': {
- 'testname': 'pvp_tput',
- 'traffic_type': 'rfc2544_throughput',
- 'frame_size': '64',
- 'test_params': 'TRAFFICGEN_DURATION=30;',
- 'trafficgen_port1': 'ens4',
- 'trafficgen_port2': 'ens5',
- 'conf_file': 'vsperf-yardstick.conf',
- 'setup_script': 'setup_yardstick.sh',
- 'moongen_helper_file': '~/moongen.py',
- 'moongen_host_ip': '10.5.201.151',
- 'moongen_port1_mac': '8c:dc:d4:ae:7c:5c',
- 'moongen_port2_mac': '8c:dc:d4:ae:7c:5d',
- 'trafficgen_port1_nw': 'test2',
- 'trafficgen_port2_nw': 'test3',
- },
- 'sla': {
- 'metrics': 'throughput_rx_fps',
- 'throughput_rx_fps': 500000,
- 'action': 'monitor',
- }
- }
-
- self._mock_ssh = mock.patch(
- 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
- self.mock_ssh = self._mock_ssh.start()
-
- self.addCleanup(self._cleanup)
-
- def _cleanup(self):
- self._mock_ssh.stop()
-
- def test_vsperf_dpdk_setup(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- def test_vsperf_dpdk_teardown(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- p.teardown()
- self.assertFalse(p.setup_done)
-
- def test_vsperf_dpdk_is_dpdk_setup_no(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
-
- result = p._is_dpdk_setup()
- self.assertFalse(result)
-
- def test_vsperf_dpdk_is_dpdk_setup_yes(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
- result = p._is_dpdk_setup()
- self.assertTrue(result)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_first(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
-
- p.dpdk_setup()
- self.assertFalse(p._is_dpdk_setup())
- self.assertTrue(p.dpdk_setup_done)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_next(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- p.dpdk_setup()
- self.assertTrue(p._is_dpdk_setup())
- self.assertTrue(p.dpdk_setup_done)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_fail(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- self.assertTrue(p.setup_done)
-
- self.assertRaises(RuntimeError, p.dpdk_setup)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_run_ok(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
- result = {}
- p.run(result)
-
- self.assertEqual(result['throughput_rx_fps'], '14797660.000')
-
- def test_vsperf_dpdk_run_falied_vsperf_execution(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
-
- def test_vsperf_dpdk_run_falied_csv_report(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/unit/benchmark/scenarios/test_base.py b/tests/unit/benchmark/scenarios/test_base.py
deleted file mode 100644
index 78e342978..000000000
--- a/tests/unit/benchmark/scenarios/test_base.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2017: Intel Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-from yardstick.benchmark.scenarios import base
-
-
-class ScenarioTestCase(unittest.TestCase):
-
- def test_get_scenario_type(self):
- scenario_type = 'dummy scenario'
-
- class DummyScenario(base.Scenario):
- __scenario_type__ = scenario_type
-
- self.assertEqual(scenario_type, DummyScenario.get_scenario_type())
-
- def test_get_scenario_type_not_defined(self):
- class DummyScenario(base.Scenario):
- pass
-
- self.assertEqual(str(None), DummyScenario.get_scenario_type())
-
- def test_get_description(self):
- docstring = """First line
- Second line
- Third line
- """
-
- class DummyScenario(base.Scenario):
- __doc__ = docstring
-
- self.assertEqual(docstring.splitlines()[0],
- DummyScenario.get_description())
-
- def test_get_description_empty(self):
- class DummyScenario(base.Scenario):
- pass
-
- self.assertEqual(str(None), DummyScenario.get_description())
diff --git a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
index 9bb5ed3a7..e30aee854 100644
--- a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
+++ b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
@@ -117,7 +117,7 @@ Other crypto devices
self.assertEqual(conn, dpdk_bind_helper.ssh_helper)
self.assertEqual(self.CLEAN_STATUS, dpdk_bind_helper.dpdk_status)
self.assertIsNone(dpdk_bind_helper.status_nic_row_re)
- self.assertIsNone(dpdk_bind_helper._dpdk_nic_bind_attr)
+ self.assertIsNone(dpdk_bind_helper._dpdk_devbind)
self.assertIsNone(dpdk_bind_helper._status_cmd_attr)
def test__dpdk_execute(self):
diff --git a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
index 616921e33..f13945abf 100644
--- a/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
+++ b/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -179,9 +179,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"dst_mac_0": "00:00:00:00:00:03",
"dst_mac_1": "00:00:00:00:00:04",
"dst_mac_2": "00:00:00:00:00:04"}
- result = r_f_c2544_profile._get_ixia_traffic_profile(
- self.PROFILE, mac, xfile="tmp",
- static_traffic=STATIC_TRAFFIC)
+ result = r_f_c2544_profile._get_ixia_traffic_profile(self.PROFILE, mac)
self.assertIsNotNone(result)
def test_get_ixia_traffic_profile(self):
@@ -225,7 +223,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"proto": "udp",
"srcip4": "152.16.40.20",
"ttl": 32,
- "count": "1"
},
"outer_l4": {
"dstport": "2001",
@@ -260,7 +257,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"proto": "udp",
"srcip4": "152.16.40.20",
"ttl": 32,
- "count": "1"
},
"outer_l3v6": {
"count": 1024,
@@ -269,7 +265,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"proto": "udp",
"srcip4": "152.16.40.20",
"ttl": 32,
- "count": "1"
},
"outer_l4": {
"dstport": "1234",
@@ -289,12 +284,11 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"dst_mac_0": "00:00:00:00:00:03",
"dst_mac_1": "00:00:00:00:00:04",
"dst_mac_2": "00:00:00:00:00:04"}
- result = r_f_c2544_profile._get_ixia_traffic_profile(
- self.PROFILE, mac, xfile="tmp", static_traffic=STATIC_TRAFFIC)
+ result = r_f_c2544_profile._get_ixia_traffic_profile(self.PROFILE, mac)
self.assertIsNotNone(result)
@mock.patch("yardstick.network_services.traffic_profile.ixia_rfc2544.open")
- def test_get_ixia_traffic_profile_v6(self, mock_open):
+ def test_get_ixia_traffic_profile_v6(self, *args):
traffic_generator = mock.Mock(autospec=TrexProfile)
traffic_generator.my_ports = [0, 1]
traffic_generator.uplink_ports = [-1]
@@ -435,8 +429,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
'outer_l4': {'dstport': '2001',
'srcport': '1234'}}},
'schema': 'isb:traffic_profile:0.1'}
- result = r_f_c2544_profile._get_ixia_traffic_profile(
- profile_data, mac, static_traffic=STATIC_TRAFFIC)
+ result = r_f_c2544_profile._get_ixia_traffic_profile(profile_data, mac)
self.assertIsNotNone(result)
def test__get_ixia_traffic_profile_default_args(self):
@@ -459,8 +452,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
ixia_obj = mock.MagicMock()
r_f_c2544_profile = IXIARFC2544Profile(self.TRAFFIC_PROFILE)
r_f_c2544_profile.rate = 100
- result = r_f_c2544_profile._ixia_traffic_generate(traffic_generator,
- traffic, ixia_obj)
+ result = r_f_c2544_profile._ixia_traffic_generate(traffic, ixia_obj)
self.assertIsNone(result)
def test_execute(self):
@@ -511,13 +503,6 @@ class TestIXIARFC2544Profile(unittest.TestCase):
self.assertEqual(r_f_c2544_profile.ports, ports_expected)
def test_get_drop_percentage(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.networks = {
- "uplink_0": ["xe0"],
- "downlink_0": ["xe1"],
- }
- traffic_generator.client = \
- mock.Mock(return_value=True)
r_f_c2544_profile = IXIARFC2544Profile(self.TRAFFIC_PROFILE)
r_f_c2544_profile.params = self.PROFILE
ixia_obj = mock.MagicMock()
@@ -541,17 +526,11 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"out_packets": 1000}
tol_min = 100.0
tolerance = 0.0
- self.assertIsNotNone(r_f_c2544_profile.get_drop_percentage(
- traffic_generator, samples,
- tol_min, tolerance, ixia_obj))
+ self.assertIsNotNone(
+ r_f_c2544_profile.get_drop_percentage(samples, tol_min, tolerance,
+ ixia_obj))
def test_get_drop_percentage_update(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.my_ports = [0, 1]
- traffic_generator.uplink_ports = [0]
- traffic_generator.downlink_ports = [1]
- traffic_generator.client = \
- mock.Mock(return_value=True)
r_f_c2544_profile = IXIARFC2544Profile(self.TRAFFIC_PROFILE)
r_f_c2544_profile.params = self.PROFILE
ixia_obj = mock.MagicMock()
@@ -575,17 +554,11 @@ class TestIXIARFC2544Profile(unittest.TestCase):
"out_packets": 1002}
tol_min = 0.0
tolerance = 1.0
- self.assertIsNotNone(r_f_c2544_profile.get_drop_percentage(
- traffic_generator, samples,
- tol_min, tolerance, ixia_obj))
+ self.assertIsNotNone(
+ r_f_c2544_profile.get_drop_percentage(samples, tol_min, tolerance,
+ ixia_obj))
def test_get_drop_percentage_div_zero(self):
- traffic_generator = mock.Mock(autospec=TrexProfile)
- traffic_generator.my_ports = [0, 1]
- traffic_generator.uplink_ports = [0]
- traffic_generator.downlink_ports = [1]
- traffic_generator.client = \
- mock.Mock(return_value=True)
r_f_c2544_profile = IXIARFC2544Profile(self.TRAFFIC_PROFILE)
r_f_c2544_profile.params = self.PROFILE
ixia_obj = mock.MagicMock()
@@ -610,9 +583,9 @@ class TestIXIARFC2544Profile(unittest.TestCase):
tol_min = 0.0
tolerance = 0.0
r_f_c2544_profile.tmp_throughput = 0
- self.assertIsNotNone(r_f_c2544_profile.get_drop_percentage(
- traffic_generator, samples,
- tol_min, tolerance, ixia_obj))
+ self.assertIsNotNone(
+ r_f_c2544_profile.get_drop_percentage(samples, tol_min, tolerance,
+ ixia_obj))
def test_get_multiplier(self):
r_f_c2544_profile = IXIARFC2544Profile(self.TRAFFIC_PROFILE)
@@ -638,8 +611,7 @@ class TestIXIARFC2544Profile(unittest.TestCase):
r_f_c2544_profile._ixia_traffic_generate = mock.Mock()
self.assertEqual(
None,
- r_f_c2544_profile.start_ixia_latency(traffic_generator,
- ixia_obj))
+ r_f_c2544_profile.start_ixia_latency(traffic_generator, ixia_obj))
if __name__ == '__main__':
diff --git a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
index c1f1c825b..1b4189b48 100644
--- a/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
+++ b/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -32,7 +32,7 @@ if stl_patch:
class TestProxBinSearchProfile(unittest.TestCase):
def test_execute_1(self):
- def target(*args, **kwargs):
+ def target(*args, **_):
runs.append(args[2])
if args[2] < 0 or args[2] > 100:
raise RuntimeError(' '.join([str(args), str(runs)]))
@@ -43,6 +43,8 @@ class TestProxBinSearchProfile(unittest.TestCase):
tp_config = {
'traffic_profile': {
'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
},
}
@@ -61,11 +63,47 @@ class TestProxBinSearchProfile(unittest.TestCase):
profile.execute_traffic(traffic_generator)
self.assertEqual(round(profile.current_lower, 2), 74.69)
- self.assertEqual(round(profile.current_upper, 2), 75.39)
- self.assertEqual(len(runs), 8)
+ self.assertEqual(round(profile.current_upper, 2), 76.09)
+ self.assertEqual(len(runs), 7)
+
+ # Result Samples inc theor_max
+ result_tuple = {"Result_Actual_throughput": 7.5e-07,
+ "Result_theor_max_throughput": 0.00012340000000000002,
+ "Result_pktSize": 200}
+ profile.queue.put.assert_called_with(result_tuple)
+
+ success_result_tuple = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 0.00012340000000000002}
+
+ calls = profile.queue.put(success_result_tuple)
+ profile.queue.put.assert_has_calls(calls)
+
+ success_result_tuple2 = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 123.4,
+ "Success_can_be_lost": 409600,
+ "Success_drop_total": 20480,
+ "Success_rx_total": 4075520,
+ "Success_tx_total": 4096000}
+
+ calls = profile.queue.put(success_result_tuple2)
+ profile.queue.put.assert_has_calls(calls)
def test_execute_2(self):
- def target(*args, **kwargs):
+ def target(*args, **_):
runs.append(args[2])
if args[2] < 0 or args[2] > 100:
raise RuntimeError(' '.join([str(args), str(runs)]))
@@ -77,6 +115,7 @@ class TestProxBinSearchProfile(unittest.TestCase):
'traffic_profile': {
'packet_sizes': [200],
'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
},
}
@@ -97,3 +136,50 @@ class TestProxBinSearchProfile(unittest.TestCase):
self.assertEqual(round(profile.current_lower, 2), 24.06)
self.assertEqual(round(profile.current_upper, 2), 25.47)
self.assertEqual(len(runs), 7)
+
+ def test_execute_3(self):
+ def target(*args, **_):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 75.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_generator = mock.MagicMock()
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxBinSearchProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.upper_bound = 100.0
+ profile.lower_bound = 99.0
+ profile.execute_traffic(traffic_generator)
+
+
+ # Result Samples
+ result_tuple = {"Result_theor_max_throughput": 0, "Result_pktSize": 200}
+ profile.queue.put.assert_called_with(result_tuple)
+
+ # Check for success_ tuple (None expected)
+ calls = profile.queue.put.mock_calls
+ for call in calls:
+ for call_detail in call[1]:
+ for k in call_detail:
+ if "Success_" in k:
+ self.assertRaises(AttributeError)
diff --git a/tests/unit/network_services/traffic_profile/test_traffic_profile.py b/tests/unit/network_services/traffic_profile/test_traffic_profile.py
index 8355c85b6..37b9a08d0 100644
--- a/tests/unit/network_services/traffic_profile/test_traffic_profile.py
+++ b/tests/unit/network_services/traffic_profile/test_traffic_profile.py
@@ -13,15 +13,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-from __future__ import absolute_import
+import ipaddress
-import unittest
import mock
+import six
+import unittest
from tests.unit import STL_MOCKS
-
+from yardstick.common import exceptions as y_exc
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -56,79 +56,90 @@ class TestTrexProfile(unittest.TestCase):
EXAMPLE_IP_ADDR = "10.0.0.1"
EXAMPLE_IPv6_ADDR = "0064:ff9b:0:0:0:0:9810:6414"
- PROFILE = {'description': 'Traffic profile to run RFC2544 latency',
- 'name': 'rfc2544',
- 'traffic_profile': {'traffic_type': 'RFC2544Profile',
- 'frame_rate': 100},
- TrafficProfile.DOWNLINK: {'ipv4': {'outer_l2': {'framesize': {'64B': '100',
- '1518B': '0',
- '128B': '0',
- '1400B': '0',
- '256B': '0',
- '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:02",
- "dstmac": "00:00:00:00:00:01"},
- 'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
- 'proto': 'udp',
- 'srcip4': '9.9.1.1-90.1.2.2',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234',
- 'count': 1}}},
- TrafficProfile.UPLINK: {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:01",
- "dstmac": "00:00:00:00:00:02"},
- 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
- 'proto': 'udp',
- 'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32, 'count': 1},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234',
- 'count': 1}}},
- 'schema': 'isb:traffic_profile:0.1'}
- PROFILE_v6 = {'description': 'Traffic profile to run RFC2544 latency',
- 'name': 'rfc2544',
- 'traffic_profile': {'traffic_type': 'RFC2544Profile',
- 'frame_rate': 100},
- TrafficProfile.DOWNLINK: {'ipv6': {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:02",
- "dstmac": "00:00:00:00:00:01"},
- 'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
- 'proto': 'udp',
- 'srcip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234',
- 'count': 1}}},
- TrafficProfile.UPLINK:
- {'ipv6': {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:01",
- "dstmac": "00:00:00:00:00:02"},
- 'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
- 'proto': 'udp',
- 'srcip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234',
- 'count': 1}}},
- 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ TrafficProfile.DOWNLINK: {
+ 'ipv4': {'outer_l2': {'framesize': {'64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
+ 'proto': 'udp',
+ 'srcip4': '9.9.1.1-90.1.2.2',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ TrafficProfile.UPLINK: {
+ 'ipv4':
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE_v6 = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ TrafficProfile.DOWNLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ TrafficProfile.UPLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
TrafficProfile.params = self.PROFILE
@@ -205,22 +216,76 @@ class TestTrexProfile(unittest.TestCase):
TrexProfile(TrafficProfile)
self.assertEqual({}, trex_profile.generate_imix_data(False))
- def test__get_start_end_ipv6(self):
- trex_profile = \
- TrexProfile(TrafficProfile)
- self.assertRaises(SystemExit, trex_profile._get_start_end_ipv6,
- "1.1.1.3", "1.1.1.1")
+ def test__count_ip_ipv4(self):
+ start, end, count = TrexProfile._count_ip('1.1.1.1', '1.2.3.4')
+ self.assertEqual('1.1.1.1', str(start))
+ self.assertEqual('1.2.3.4', str(end))
+ diff = (int(ipaddress.IPv4Address(six.u('1.2.3.4'))) -
+ int(ipaddress.IPv4Address(six.u('1.1.1.1'))))
+ self.assertEqual(diff, count)
+
+ def test__count_ip_ipv6(self):
+ start_ip = '0064:ff9b:0:0:0:0:9810:6414'
+ end_ip = '0064:ff9b:0:0:0:0:9810:6420'
+ start, end, count = TrexProfile._count_ip(start_ip, end_ip)
+ self.assertEqual(0x98106414, start)
+ self.assertEqual(0x98106420, end)
+ self.assertEqual(0x98106420 - 0x98106414, count)
+
+ def test__count_ip_ipv6_exception(self):
+ start_ip = '0064:ff9b:0:0:0:0:9810:6420'
+ end_ip = '0064:ff9b:0:0:0:0:9810:6414'
+ with self.assertRaises(y_exc.IPv6RangeError):
+ TrexProfile._count_ip(start_ip, end_ip)
+
+ def test__dscp_range_action_partial_actual_count_zero(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__dscp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_actual_count_zero(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial('field1')
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial('field1', 'not_used_count')
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
def test__general_single_action_partial(self):
trex_profile = TrexProfile(TrafficProfile)
- trex_profile._general_single_action_partial(ETHERNET)(SRC)(self.EXAMPLE_ETHERNET_ADDR)
- self.assertEqual(self.EXAMPLE_ETHERNET_ADDR, trex_profile.ether_packet.src)
+ trex_profile._general_single_action_partial(ETHERNET)(SRC)(
+ self.EXAMPLE_ETHERNET_ADDR)
+ self.assertEqual(self.EXAMPLE_ETHERNET_ADDR,
+ trex_profile.ether_packet.src)
- trex_profile._general_single_action_partial(IP)(DST)(self.EXAMPLE_IP_ADDR)
+ trex_profile._general_single_action_partial(IP)(DST)(
+ self.EXAMPLE_IP_ADDR)
self.assertEqual(self.EXAMPLE_IP_ADDR, trex_profile.ip_packet.dst)
- trex_profile._general_single_action_partial(IPv6)(DST)(self.EXAMPLE_IPv6_ADDR)
+ trex_profile._general_single_action_partial(IPv6)(DST)(
+ self.EXAMPLE_IPv6_ADDR)
self.assertEqual(self.EXAMPLE_IPv6_ADDR, trex_profile.ip6_packet.dst)
trex_profile._general_single_action_partial(UDP)(SRC_PORT)(5060)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
index 2a2647a91..f9a10149e 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -343,6 +343,6 @@ class TestAclApproxVnf(unittest.TestCase):
acl_approx_vnf.used_drivers = {"01:01.0": "i40e",
"01:01.1": "i40e"}
acl_approx_vnf.vnf_execute = mock.MagicMock()
- acl_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
+ acl_approx_vnf.dpdk_devbind = "dpdk-devbind.py"
acl_approx_vnf._resource_collect_stop = mock.Mock()
self.assertEqual(None, acl_approx_vnf.terminate())
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
index f2ce18fb3..62b3c7440 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
@@ -392,22 +392,6 @@ class TestCgnaptApproxVnf(unittest.TestCase):
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@mock.patch(SSH_HELPER)
- def test_terminate(self, ssh, *args):
- mock_ssh(ssh)
-
- vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
- cgnapt_approx_vnf._vnf_process = mock.MagicMock()
- cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
- cgnapt_approx_vnf.used_drivers = {"01:01.0": "i40e",
- "01:01.1": "i40e"}
- cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
- cgnapt_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
- cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
- self.assertEqual(None, cgnapt_approx_vnf.terminate())
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
def test__vnf_up_post(self, ssh, *args):
mock_ssh(ssh)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
index 0ac46c632..ac67cc52c 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
@@ -1730,7 +1730,7 @@ class TestProxProfileHelper(unittest.TestCase):
}
self.assertIsNone(helper._test_cores)
- expected = [12, 23]
+ expected = [3, 4]
result = helper.test_cores
self.assertEqual(result, expected)
self.assertIs(result, helper._test_cores)
@@ -1787,7 +1787,7 @@ class TestProxProfileHelper(unittest.TestCase):
}
self.assertIsNone(helper._latency_cores)
- expected = [12, 23]
+ expected = [3, 4]
result = helper.latency_cores
self.assertEqual(result, expected)
self.assertIs(result, helper._latency_cores)
@@ -1842,7 +1842,7 @@ class TestProxProfileHelper(unittest.TestCase):
}
}
- expected = [7, 8]
+ expected = [3, 4]
result = helper.get_cores(helper.PROX_CORE_GEN_MODE)
self.assertEqual(result, expected)
@@ -1984,8 +1984,8 @@ class TestProxMplsProfileHelper(unittest.TestCase):
}
}
- expected_tagged = [7]
- expected_plain = [8]
+ expected_tagged = [3]
+ expected_plain = [4]
self.assertIsNone(helper._cores_tuple)
self.assertEqual(helper.tagged_cores, expected_tagged)
self.assertEqual(helper.plain_cores, expected_plain)
@@ -2060,10 +2060,10 @@ class TestProxBngProfileHelper(unittest.TestCase):
}
}
- expected_cpe = [7]
- expected_inet = [8]
- expected_arp = [4, 3]
- expected_arp_task = [0, 4]
+ expected_cpe = [3]
+ expected_inet = [4]
+ expected_arp = [6, 9]
+ expected_arp_task = [0, 6]
expected_combined = (expected_cpe, expected_inet, expected_arp, expected_arp_task)
self.assertIsNone(helper._cores_tuple)
@@ -2131,8 +2131,8 @@ class TestProxVpeProfileHelper(unittest.TestCase):
}
}
- expected_cpe = [7]
- expected_inet = [8]
+ expected_cpe = [3]
+ expected_inet = [4]
expected_combined = (expected_cpe, expected_inet)
self.assertIsNone(helper._cores_tuple)
@@ -2245,8 +2245,8 @@ class TestProxlwAFTRProfileHelper(unittest.TestCase):
}
}
- expected_tun = [7]
- expected_inet = [8]
+ expected_tun = [3]
+ expected_inet = [4]
expected_combined = (expected_tun, expected_inet)
self.assertIsNone(helper._cores_tuple)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index 769279066..08be4865b 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -131,6 +131,8 @@ class TestProxApproxVnf(unittest.TestCase):
'packets_in',
'packets_fwd',
'packets_dropped',
+ 'curr_packets_fwd',
+ 'curr_packets_in'
],
},
'connection-point': [
@@ -329,7 +331,7 @@ class TestProxApproxVnf(unittest.TestCase):
'packets_in': 0,
'packets_dropped': 0,
'packets_fwd': 0,
- 'collect_stats': {'core': {}},
+ 'collect_stats': {'core': {}}
}
result = prox_approx_vnf.collect_kpi()
self.assertEqual(result, expected)
@@ -352,7 +354,11 @@ class TestProxApproxVnf(unittest.TestCase):
'collect_stats': {'core': {'result': 234}},
}
result = prox_approx_vnf.collect_kpi()
- self.assertEqual(result, expected)
+ self.assertEqual(result['packets_in'], expected['packets_in'])
+ self.assertEqual(result['packets_dropped'], expected['packets_dropped'])
+ self.assertEqual(result['packets_fwd'], expected['packets_fwd'])
+ self.assertNotEqual(result['packets_fwd'], 0)
+ self.assertNotEqual(result['packets_fwd'], 0)
@mock.patch(SSH_HELPER)
def test_collect_kpi_error(self, ssh, *args):
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index 1799353c4..cc4ffa5f7 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -609,37 +609,34 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
dpdk_vnf_setup_env_helper.setup_vnf_environment(),
ResourceProfile)
- def test__setup_dpdk_early_success(self):
- vnfd_helper = VnfdHelper(self.VNFD_0)
+ def test__setup_dpdk(self):
ssh_helper = mock.Mock()
- ssh_helper.execute.return_value = 0, 'output', ''
- ssh_helper.join_bin_path.return_value = 'joined_path'
- ssh_helper.provision_tool.return_value = 'provision string'
- scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
- dpdk_setup_helper._setup_hugepages = mock.Mock()
-
- self.assertIsNone(dpdk_setup_helper._setup_dpdk())
- self.assertEqual(dpdk_setup_helper.ssh_helper.execute.call_count, 2)
-
- @mock.patch('yardstick.ssh.SSH')
- def test__setup_dpdk_short(self, _):
- def execute_side(cmd):
- if 'joined_path' in cmd:
- return 0, 'output', ''
- return 1, 'bad output', 'error output'
+ ssh_helper.execute = mock.Mock()
+ ssh_helper.execute.return_value = (0, 0, 0)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY)
+ with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \
+ mock_setup_hp:
+ dpdk_setup_helper._setup_dpdk()
+ mock_setup_hp.assert_called_once()
+ ssh_helper.execute.assert_has_calls([
+ mock.call('sudo modprobe uio && sudo modprobe igb_uio'),
+ mock.call('lsmod | grep -i igb_uio')
+ ])
- vnfd_helper = VnfdHelper(self.VNFD_0)
+ def test__setup_dpdk_igb_uio_not_loaded(self):
ssh_helper = mock.Mock()
- ssh_helper.execute.side_effect = execute_side
- ssh_helper.join_bin_path.return_value = 'joined_path'
- ssh_helper.provision_tool.return_value = 'provision string'
- scenario_helper = mock.Mock()
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
- dpdk_setup_helper._setup_hugepages = mock.Mock()
-
- self.assertIsNone(dpdk_setup_helper._setup_dpdk())
- self.assertEqual(dpdk_setup_helper.ssh_helper.execute.call_count, 3)
+ ssh_helper.execute = mock.Mock()
+ ssh_helper.execute.side_effect = [(0, 0, 0), (1, 0, 0)]
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY)
+ with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \
+ mock_setup_hp:
+ with self.assertRaises(y_exceptions.DPDKSetupDriverError):
+ dpdk_setup_helper._setup_dpdk()
+ mock_setup_hp.assert_called_once()
+ ssh_helper.execute.assert_has_calls([
+ mock.call('sudo modprobe uio && sudo modprobe igb_uio'),
+ mock.call('lsmod | grep -i igb_uio')
+ ])
@mock.patch('yardstick.ssh.SSH')
def test__setup_resources(self, _):
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
index e6e4b882e..d77068137 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
@@ -151,6 +151,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
def test_instantiate(self, call, shutil, mock_makedirs):
+ # pylint: disable=unused-argument
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
@@ -174,7 +175,8 @@ class TestIxLoadTrafficGen(unittest.TestCase):
'1C/1T',
'worker_threads': 1}}
}})
- with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True) as mock_open:
+ with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open',
+ create=True) as mock_open:
mock_open.return_value = mock.MagicMock()
ixload_traffic_gen.instantiate(scenario_cfg, {})
@@ -185,6 +187,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
def test_run_traffic(self, call, shutil, main_open, min, max, len):
+ # pylint: disable=unused-argument
mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
@@ -216,6 +219,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
def test_run_traffic_csv(self, call, shutil, main_open, min, max, len):
+ # pylint: disable=unused-argument
mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
@@ -243,7 +247,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
self.assertIsNone(result)
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
- def test_terminate(self, call):
+ def test_terminate(self, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -256,6 +260,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
kpi_data = {
'HTTP Total Throughput (Kbps)': 1,
@@ -280,6 +285,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read_value_error(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
@@ -302,6 +308,7 @@ class TestIxLoadTrafficGen(unittest.TestCase):
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read_error(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
index cda3852fe..472052b0a 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
@@ -462,15 +462,3 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
self.assertIsNone(udp_replay_approx_vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG))
with self.assertRaises(RuntimeError):
udp_replay_approx_vnf.wait_for_instantiate()
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
- def test_terminate(self, ssh, *args):
- mock_ssh(ssh)
-
- udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
- udp_replay_approx_vnf._vnf_process = mock.MagicMock()
- udp_replay_approx_vnf._vnf_process.terminate = mock.Mock()
- udp_replay_approx_vnf.used_drivers = {"01:01.0": "i40e", "01:01.1": "i40e"}
- udp_replay_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
- self.assertEqual(None, udp_replay_approx_vnf.terminate())
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
index d128db0b4..f0a56665c 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
@@ -348,18 +348,3 @@ pipeline>
'rules': ""}}
self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
self.assertIsNone(vfw_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg))
-
- @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
- @mock.patch(SSH_HELPER)
- def test_terminate(self, ssh, *args):
- mock_ssh(ssh)
-
- vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- vfw_approx_vnf = FWApproxVnf(name, vnfd)
- vfw_approx_vnf._vnf_process = mock.MagicMock()
- vfw_approx_vnf.used_drivers = {"01:01.0": "i40e",
- "01:01.1": "i40e"}
- vfw_approx_vnf.vnf_execute = mock.Mock()
- vfw_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
- vfw_approx_vnf._resource_collect_stop = mock.Mock()
- self.assertIsNone(vfw_approx_vnf.terminate())
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index 1abc53688..c074dfb4c 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -163,6 +163,11 @@ class TestConfigCreate(unittest.TestCase):
self.assertEqual(config_create.downlink_ports, ['xe1'])
self.assertEqual(config_create.socket, 2)
+ def test_dpdk_port_to_link_id(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1})
+
def test_vpe_initialize(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
config_create = ConfigCreate(vnfd_helper, 2)
@@ -633,7 +638,7 @@ class TestVpeApproxVnf(unittest.TestCase):
def test_build_config(self, ssh, *args):
mock_ssh(ssh)
vpe_approx_vnf = VpeApproxSetupEnvHelper(mock.MagicMock(),
- mock.MagicMock, mock.MagicMock)
+ mock.MagicMock(), mock.MagicMock())
vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML)
vpe_approx_vnf.generate_port_pairs = mock.Mock()
vpe_approx_vnf.vnf_cfg = {
diff --git a/tools/virt_ci_rampup.sh b/tools/virt_ci_rampup.sh
index 210e6ed40..6a9f2e7cb 100755
--- a/tools/virt_ci_rampup.sh
+++ b/tools/virt_ci_rampup.sh
@@ -16,6 +16,6 @@
ANSIBLE_SCRIPTS="${0%/*}/../ansible"
cd ${ANSIBLE_SCRIPTS} &&\
-ansible-playbook \
+sudo -EH ansible-playbook \
-e rs_file='../etc/infra/infra_deploy.yaml' \
-i inventory.ini infra_deploy.yml
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 4ba543b9e..7b7f1be32 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -95,13 +95,15 @@ class HeatContext(Context):
return sorted_networks
def init(self, attrs):
- self.check_environment()
"""initializes itself from the supplied arguments"""
+ self.check_environment()
self.name = attrs["name"]
self._user = attrs.get("user")
self.template_file = attrs.get("heat_template")
+
+ self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
if self.template_file:
self.heat_parameters = attrs.get("heat_parameters")
return
@@ -113,8 +115,6 @@ class HeatContext(Context):
self._flavor = attrs.get("flavor")
- self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
-
self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
for name, pg_attrs in attrs.get(
"placement_groups", {}).items()]
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index 30170832a..14738da8a 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -310,7 +310,7 @@ class StandaloneContextHelper(object):
return driver
@classmethod
- def get_nic_details(cls, connection, networks, dpdk_nic_bind):
+ def get_nic_details(cls, connection, networks, dpdk_devbind):
for key, ports in networks.items():
if key == "mgmt":
continue
@@ -320,11 +320,11 @@ class StandaloneContextHelper(object):
driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
# Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
- bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+ bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
lshw_cmd = "lshw -c network -businfo | grep '{port}'"
link_show_cmd = "ip -s link show {interface}"
- cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
+ cmd = bind_cmd.format(dpdk_devbind=dpdk_devbind,
driver=driver, port=ports['phy_port'])
connection.execute(cmd)
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index 3755b84e9..c931d85d0 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -57,7 +57,7 @@ class OvsDpdkContext(Context):
self.file_path = None
self.sriov = []
self.first_run = True
- self.dpdk_nic_bind = ""
+ self.dpdk_devbind = ''
self.vm_names = []
self.name = None
self.nfvi_host = []
@@ -116,12 +116,12 @@ class OvsDpdkContext(Context):
]
for cmd in cmd_list:
self.connection.execute(cmd)
- bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+ bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
phy_driver = "vfio-pci"
- for _, port in self.networks.items():
+ for port in self.networks.values():
vpci = port.get("phy_port")
- self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
- driver=phy_driver, port=vpci))
+ self.connection.execute(bind_cmd.format(
+ dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
def start_ovs_serverswitch(self):
vpath = self.ovs_properties.get("vpath")
@@ -241,7 +241,7 @@ class OvsDpdkContext(Context):
return
self.connection = ssh.SSH.from_node(self.host_mgmt)
- self.dpdk_nic_bind = provision_tool(
+ self.dpdk_devbind = provision_tool(
self.connection,
os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
@@ -249,9 +249,8 @@ class OvsDpdkContext(Context):
self.check_ovs_dpdk_env()
# Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
StandaloneContextHelper.install_req_libs(self.connection)
- self.networks = StandaloneContextHelper.get_nic_details(self.connection,
- self.networks,
- self.dpdk_nic_bind)
+ self.networks = StandaloneContextHelper.get_nic_details(
+ self.connection, self.networks, self.dpdk_devbind)
self.setup_ovs()
self.start_ovs_serverswitch()
@@ -271,12 +270,12 @@ class OvsDpdkContext(Context):
self.cleanup_ovs_dpdk_env()
# Bind nics back to kernel
- bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+ bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
for port in self.networks.values():
vpci = port.get("phy_port")
phy_driver = port.get("driver")
- self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
- driver=phy_driver, port=vpci))
+ self.connection.execute(bind_cmd.format(
+ dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
# Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
for vm in self.vm_names:
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 9d8423b5f..9cca3e15c 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -41,7 +41,7 @@ class SriovContext(Context):
self.file_path = None
self.sriov = []
self.first_run = True
- self.dpdk_nic_bind = ""
+ self.dpdk_devbind = ''
self.vm_names = []
self.name = None
self.nfvi_host = []
@@ -83,15 +83,14 @@ class SriovContext(Context):
return
self.connection = ssh.SSH.from_node(self.host_mgmt)
- self.dpdk_nic_bind = provision_tool(
+ self.dpdk_devbind = provision_tool(
self.connection,
- os.path.join(get_nsb_option("bin_path"), "dpdk_nic_bind.py"))
+ os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
# Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
StandaloneContextHelper.install_req_libs(self.connection)
- self.networks = StandaloneContextHelper.get_nic_details(self.connection,
- self.networks,
- self.dpdk_nic_bind)
+ self.networks = StandaloneContextHelper.get_nic_details(
+ self.connection, self.networks, self.dpdk_devbind)
self.nodes = self.setup_sriov_context()
LOG.debug("Waiting for VM to come up...")
@@ -138,7 +137,7 @@ class SriovContext(Context):
except StopIteration:
pass
else:
- raise ValueError("Duplicate nodes!!! Nodes: %s %s",
+ raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
(node, duplicate))
node["name"] = attr_name
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 9b1b3f851..f5d2b18ac 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -57,7 +57,7 @@ class Task(object): # pragma: no cover
out_types = [s.strip() for s in dispatchers.split(',')]
output_config['DEFAULT']['dispatcher'] = out_types
- def start(self, args, **kwargs):
+ def start(self, args):
"""Start a benchmark scenario."""
atexit.register(self.atexit_handler)
@@ -69,7 +69,7 @@ class Task(object): # pragma: no cover
try:
output_config = utils.parse_ini_file(CONF_FILE)
- except Exception:
+ except Exception: # pylint: disable=broad-except
# all error will be ignore, the default value is {}
output_config = {}
@@ -120,10 +120,10 @@ class Task(object): # pragma: no cover
case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
try:
- data = self._run(scenarios, run_in_parallel, args.output_file)
+ data = self._run(scenarios, run_in_parallel, output_config)
except KeyboardInterrupt:
raise
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
else:
@@ -232,11 +232,12 @@ class Task(object): # pragma: no cover
def _do_output(self, output_config, result):
dispatchers = DispatcherBase.get(output_config)
+ dispatchers = (d for d in dispatchers if d.__dispatcher_type__ != 'Influxdb')
for dispatcher in dispatchers:
dispatcher.flush_result_data(result)
- def _run(self, scenarios, run_in_parallel, output_file):
+ def _run(self, scenarios, run_in_parallel, output_config):
"""Deploys context and calls runners"""
for context in self.contexts:
context.deploy()
@@ -247,14 +248,14 @@ class Task(object): # pragma: no cover
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
scenario["runner"] = dict(type="Duration", duration=1000000000)
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
background_runners.append(runner)
runners = []
if run_in_parallel:
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
runners.append(runner)
# Wait for runners to finish
@@ -263,12 +264,12 @@ class Task(object): # pragma: no cover
if status != 0:
raise RuntimeError(
"{0} runner status {1}".format(runner.__execution_type__, status))
- LOG.info("Runner ended, output in %s", output_file)
+ LOG.info("Runner ended")
else:
# run serially
for scenario in scenarios:
if not _is_background_scenario(scenario):
- runner = self.run_one_scenario(scenario, output_file)
+ runner = self.run_one_scenario(scenario, output_config)
status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
LOG.error('Scenario NO.%s: "%s" ERROR!',
@@ -276,7 +277,7 @@ class Task(object): # pragma: no cover
scenario.get('type'))
raise RuntimeError(
"{0} runner status {1}".format(runner.__execution_type__, status))
- LOG.info("Runner ended, output in %s", output_file)
+ LOG.info("Runner ended")
# Abort background runners
for runner in background_runners:
@@ -313,10 +314,10 @@ class Task(object): # pragma: no cover
else:
return op
- def run_one_scenario(self, scenario_cfg, output_file):
+ def run_one_scenario(self, scenario_cfg, output_config):
"""run one scenario using context"""
runner_cfg = scenario_cfg["runner"]
- runner_cfg['output_filename'] = output_file
+ runner_cfg['output_config'] = output_config
options = scenario_cfg.get('options', {})
scenario_cfg['options'] = self._parse_options(options)
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index a887fa5b3..99386a440 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -23,6 +23,7 @@ import multiprocessing
import subprocess
import time
import traceback
+from subprocess import CalledProcessError
import importlib
@@ -30,6 +31,7 @@ from six.moves.queue import Empty
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.dispatcher.base import Base as DispatcherBase
log = logging.getLogger(__name__)
@@ -39,7 +41,7 @@ def _execute_shell_command(command):
exitcode = 0
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except CalledProcessError:
exitcode = -1
output = traceback.format_exc()
log.error("exec command '%s' error:\n ", command)
@@ -137,6 +139,8 @@ class Runner(object):
Runner.release(runner)
def __init__(self, config):
+ self.task_id = None
+ self.case_name = None
self.config = config
self.periodic_action_process = None
self.output_queue = multiprocessing.Queue()
@@ -170,6 +174,8 @@ class Runner(object):
cls = getattr(module, path_split[-1])
self.config['object'] = class_name
+ self.case_name = scenario_cfg['tc']
+ self.task_id = scenario_cfg['task_id']
self.aborted.clear()
# run a potentially configured pre-start action
@@ -245,10 +251,24 @@ class Runner(object):
def get_result(self):
result = []
+
+ dispatcher = self.config['output_config']['DEFAULT']['dispatcher']
+ output_in_influxdb = 'influxdb' in dispatcher
+
while not self.result_queue.empty():
log.debug("result_queue size %s", self.result_queue.qsize())
try:
- result.append(self.result_queue.get(True, 1))
+ one_record = self.result_queue.get(True, 1)
except Empty:
pass
+ else:
+ if output_in_influxdb:
+ self._output_to_influxdb(one_record)
+
+ result.append(one_record)
return result
+
+ def _output_to_influxdb(self, record):
+ dispatchers = DispatcherBase.get(self.config['output_config'])
+ dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
+ dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 7af85834c..10a728828 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -16,20 +16,34 @@
# yardstick comment: this is a modified copy of
# rally/rally/benchmark/scenarios/base.py
-""" Scenario base class
-"""
+from stevedore import extension
-from __future__ import absolute_import
import yardstick.common.utils as utils
+def _iter_scenario_classes(scenario_type=None):
+ """Generator over all 'Scenario' subclasses
+
+ This function will iterate over all 'Scenario' subclasses defined in this
+ project and will load any class introduced by any installed plugin project,
+ defined in 'entry_points' section, under 'yardstick.scenarios' subsection.
+ """
+ extension.ExtensionManager(namespace='yardstick.scenarios',
+ invoke_on_load=False)
+ for scenario in utils.itersubclasses(Scenario):
+ if not scenario_type:
+ yield scenario
+ elif getattr(scenario, '__scenario_type__', None) == scenario_type:
+ yield scenario
+
+
class Scenario(object):
def setup(self):
""" default impl for scenario setup """
pass
- def run(self, args):
+ def run(self, *args):
""" catcher for not implemented run methods in subclasses """
raise RuntimeError("run method not implemented")
@@ -41,16 +55,15 @@ class Scenario(object):
def get_types():
"""return a list of known runner type (class) names"""
scenarios = []
- for scenario in utils.itersubclasses(Scenario):
+ for scenario in _iter_scenario_classes():
scenarios.append(scenario)
return scenarios
@staticmethod
def get_cls(scenario_type):
"""return class of specified type"""
- for scenario in utils.itersubclasses(Scenario):
- if scenario_type == scenario.__scenario_type__:
- return scenario
+ for scenario in _iter_scenario_classes(scenario_type):
+ return scenario
raise RuntimeError("No such scenario type %s" % scenario_type)
@@ -58,11 +71,8 @@ class Scenario(object):
def get(scenario_type):
"""Returns instance of a scenario runner for execution type.
"""
- for scenario in utils.itersubclasses(Scenario):
- if scenario_type == scenario.__scenario_type__:
- return scenario.__module__ + "." + scenario.__name__
-
- raise RuntimeError("No such scenario type %s" % scenario_type)
+ scenario = Scenario.get_cls(scenario_type)
+ return scenario.__module__ + "." + scenario.__name__
@classmethod
def get_scenario_type(cls):
diff --git a/yardstick/benchmark/scenarios/lib/create_floating_ip.py b/yardstick/benchmark/scenarios/lib/create_floating_ip.py
index 328566d48..7108722af 100644
--- a/yardstick/benchmark/scenarios/lib/create_floating_ip.py
+++ b/yardstick/benchmark/scenarios/lib/create_floating_ip.py
@@ -7,15 +7,13 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
import os
from yardstick.benchmark.scenarios import base
import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -30,6 +28,7 @@ class CreateFloatingIp(base.Scenario):
self.ext_net_id = os.getenv("EXTERNAL_NETWORK", "external")
self.neutron_client = op_utils.get_neutron_client()
+ self.shade_client = op_utils.get_shade_client()
self.setup_done = False
def setup(self):
@@ -37,24 +36,21 @@ class CreateFloatingIp(base.Scenario):
self.setup_done = True
- def run(self, result):
+ def run(self, *args):
"""execute the test"""
if not self.setup_done:
self.setup()
- net_id = op_utils.get_network_id(self.neutron_client, self.ext_net_id)
+ net_id = op_utils.get_network_id(self.shade_client, self.ext_net_id)
floating_info = op_utils.create_floating_ip(self.neutron_client,
extnet_id=net_id)
- if floating_info:
- LOG.info("Creating floating ip successful!")
- else:
+
+ if not floating_info:
LOG.error("Creating floating ip failed!")
+ return
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [floating_info["fip_id"], floating_info["fip_addr"]]
- return self._push_to_outputs(keys, values)
+ LOG.info("Creating floating ip successful!")
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [floating_info["fip_id"], floating_info["fip_addr"]]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_network.py b/yardstick/benchmark/scenarios/lib/delete_network.py
index e8796bf82..2e8b595f9 100644
--- a/yardstick/benchmark/scenarios/lib/delete_network.py
+++ b/yardstick/benchmark/scenarios/lib/delete_network.py
@@ -7,14 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -30,7 +28,7 @@ class DeleteNetwork(base.Scenario):
self.network_id = self.options.get("network_id", None)
- self.neutron_client = op_utils.get_neutron_client()
+ self.shade_client = op_utils.get_shade_client()
self.setup_done = False
@@ -45,7 +43,7 @@ class DeleteNetwork(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_neutron_net(self.neutron_client,
+ status = op_utils.delete_neutron_net(self.shade_client,
network_id=self.network_id)
if status:
result.update({"delete_network": 1})
@@ -53,3 +51,4 @@ class DeleteNetwork(base.Scenario):
else:
result.update({"delete_network": 0})
LOG.error("Delete network failed!")
+ return status
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index 9a4426bf9..be262c215 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -33,7 +33,7 @@ from six import StringIO
from chainmap import ChainMap
from yardstick.common.utils import Timer
-
+from yardstick.common import constants as consts
cgitb.enable(format="text")
@@ -435,6 +435,7 @@ class AnsibleCommon(object):
ansible_dict = dict(os.environ, **{
"ANSIBLE_LOG_PATH": os.path.join(directory, log_file),
"ANSIBLE_LOG_BASE": directory,
+ "ANSIBLE_ROLES_PATH": consts.ANSIBLE_ROLES_PATH,
# # required for SSH to work
# "ANSIBLE_SSH_ARGS": "-o UserKnownHostsFile=/dev/null "
# "-o GSSAPIAuthentication=no "
@@ -516,7 +517,7 @@ class AnsibleCommon(object):
# playbook dir: use include to point to files in consts.ANSIBLE_DIR
if not os.path.isdir(directory):
- raise OSError("Not a directory, %s", directory)
+ raise OSError("Not a directory, %s" % directory)
timeout = self.get_timeout(timeout, self.default_timeout)
self.counter += 1
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 32ed746df..43c2c19cb 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -7,9 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
-import os
-import errno
+import errno
+import os
from functools import reduce
import pkg_resources
@@ -40,10 +40,8 @@ def get_param(key, default=''):
try:
with open(conf_file) as f:
value = yaml_load(f)
- except IOError:
- pass
- except OSError as e:
- if e.errno != errno.EEXIST:
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
raise
else:
CONF.update(value)
@@ -85,6 +83,7 @@ YARDSTICK_ROOT_PATH = dirname(
TASK_LOG_DIR = get_param('dir.tasklog', '/var/log/yardstick/')
CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/yardstick/')
ANSIBLE_DIR = join(REPOS_DIR, 'ansible')
+ANSIBLE_ROLES_PATH = join(REPOS_DIR, 'ansible/roles/')
SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/')
TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 4780822a4..3e0635e46 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -57,3 +57,16 @@ class YardstickException(Exception):
class FunctionNotImplemented(YardstickException):
message = ('The function "%(function_name)s" is not implemented in '
'"%(class_name)" class.')
+
+
+class HeatTemplateError(YardstickException):
+ """Error in Heat during the stack deployment"""
+ message = ('Error in Heat during the creation of the OpenStack stack '
+ '"%(stack_name)"')
+
+
+class IPv6RangeError(YardstickException):
+ message = 'Start IP "%(start_ip)s" is greater than end IP "%(end_ip)s"'
+
+class DPDKSetupDriverError(YardstickException):
+ message = '"igb_uio" driver is not loaded'
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index d1223edd2..8f666e268 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -7,8 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-
import os
import time
import sys
@@ -16,11 +14,15 @@ import logging
from keystoneauth1 import loading
from keystoneauth1 import session
+import shade
+from shade import exc
+
from cinderclient import client as cinderclient
from novaclient import client as novaclient
from glanceclient import client as glanceclient
from neutronclient.neutron import client as neutronclient
+
log = logging.getLogger(__name__)
DEFAULT_HEAT_API_VERSION = '1'
@@ -170,27 +172,31 @@ def get_glance_client(): # pragma: no cover
return glanceclient.Client(get_glance_client_version(), session=sess)
+def get_shade_client():
+ return shade.openstack_cloud()
+
+
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client): # pragma: no cover
+def get_instances(nova_client):
try:
return nova_client.servers.list(search_opts={'all_tenants': 1})
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instances(nova_client)]")
def get_instance_status(nova_client, instance): # pragma: no cover
try:
return nova_client.servers.get(instance.id).status
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instance_status(nova_client)]")
def get_instance_by_name(nova_client, instance_name): # pragma: no cover
try:
return nova_client.servers.find(name=instance_name)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instance_by_name(nova_client, '%s')]",
instance_name)
@@ -198,28 +204,28 @@ def get_instance_by_name(nova_client, instance_name): # pragma: no cover
def get_aggregates(nova_client): # pragma: no cover
try:
return nova_client.aggregates.list()
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_aggregates(nova_client)]")
def get_availability_zones(nova_client): # pragma: no cover
try:
return nova_client.availability_zones.list()
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_availability_zones(nova_client)]")
def get_availability_zone_names(nova_client): # pragma: no cover
try:
return [az.zoneName for az in get_availability_zones(nova_client)]
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_availability_zone_names(nova_client)]")
def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
try:
nova_client.aggregates.create(aggregate_name, av_zone)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_aggregate(nova_client, %s, %s)]",
aggregate_name, av_zone)
return False
@@ -231,7 +237,7 @@ def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover
try:
aggregates = get_aggregates(nova_client)
_id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_aggregate_id(nova_client, %s)]",
aggregate_name)
else:
@@ -243,7 +249,7 @@ def add_host_to_aggregate(nova_client, aggregate_name,
try:
aggregate_id = get_aggregate_id(nova_client, aggregate_name)
nova_client.aggregates.add_host(aggregate_id, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
aggregate_name, compute_host)
return False
@@ -256,7 +262,7 @@ def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
try:
create_aggregate(nova_client, aggregate_name, av_zone)
add_host_to_aggregate(nova_client, aggregate_name, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_aggregate_with_host("
"nova_client, %s, %s, %s)]",
aggregate_name, av_zone, compute_host)
@@ -265,19 +271,20 @@ def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
return True
-def create_keypair(nova_client, name, key_path=None): # pragma: no cover
+def create_keypair(name, key_path=None): # pragma: no cover
try:
with open(key_path) as fpubkey:
- keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
+ keypair = get_nova_client().keypairs.create(
+ name=name, public_key=fpubkey.read())
return keypair
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_keypair(nova_client)]")
def create_instance(json_body): # pragma: no cover
try:
return get_nova_client().servers.create(**json_body)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error create instance failed")
return None
@@ -288,7 +295,7 @@ def create_instance_and_wait_for_active(json_body): # pragma: no cover
nova_client = get_nova_client()
instance = create_instance(json_body)
count = VM_BOOT_TIMEOUT / SLEEP
- for n in range(count, -1, -1):
+ for _ in range(count, -1, -1):
status = get_instance_status(nova_client, instance)
if status.lower() == "active":
return instance
@@ -300,10 +307,12 @@ def create_instance_and_wait_for_active(json_body): # pragma: no cover
return None
-def attach_server_volume(server_id, volume_id, device=None): # pragma: no cover
+def attach_server_volume(server_id, volume_id,
+ device=None): # pragma: no cover
try:
- get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
- except Exception:
+ get_nova_client().volumes.create_server_volume(server_id,
+ volume_id, device)
+ except Exception: # pylint: disable=broad-except
log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
server_id, volume_id)
return False
@@ -314,7 +323,7 @@ def attach_server_volume(server_id, volume_id, device=None): # pragma: no cov
def delete_instance(nova_client, instance_id): # pragma: no cover
try:
nova_client.servers.force_delete(instance_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_instance(nova_client, '%s')]",
instance_id)
return False
@@ -327,7 +336,7 @@ def remove_host_from_aggregate(nova_client, aggregate_name,
try:
aggregate_id = get_aggregate_id(nova_client, aggregate_name)
nova_client.aggregates.remove_host(aggregate_id, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
aggregate_name, compute_host)
return False
@@ -348,7 +357,7 @@ def delete_aggregate(nova_client, aggregate_name): # pragma: no cover
try:
remove_hosts_from_aggregate(nova_client, aggregate_name)
nova_client.aggregates.delete(aggregate_name)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_aggregate(nova_client, %s)]",
aggregate_name)
return False
@@ -366,8 +375,9 @@ def get_server_by_name(name): # pragma: no cover
def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
try:
- return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs)
- except Exception:
+ return get_nova_client().flavors.create(name, ram, vcpus,
+ disk, **kwargs)
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]",
name, ram, disk, vcpus, kwargs['is_public'])
return None
@@ -400,7 +410,7 @@ def get_flavor_by_name(name): # pragma: no cover
def check_status(status, name, iterations, interval): # pragma: no cover
- for i in range(iterations):
+ for _ in range(iterations):
try:
server = get_server_by_name(name)
except IndexError:
@@ -417,7 +427,7 @@ def check_status(status, name, iterations, interval): # pragma: no cover
def delete_flavor(flavor_id): # pragma: no cover
try:
get_nova_client().flavors.delete(flavor_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_flavor(nova_client, %s)]", flavor_id)
return False
else:
@@ -428,7 +438,7 @@ def delete_keypair(nova_client, key): # pragma: no cover
try:
nova_client.keypairs.delete(key=key)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_keypair(nova_client)]")
return False
@@ -436,33 +446,26 @@ def delete_keypair(nova_client, key): # pragma: no cover
# *********************************************
# NEUTRON
# *********************************************
-def get_network_id(neutron_client, network_name): # pragma: no cover
- networks = neutron_client.list_networks()['networks']
- return next((n['id'] for n in networks if n['name'] == network_name), None)
-
-
-def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
- ports = neutron_client.list_ports()['ports']
- return next((i['id'] for i in ports for j in i.get(
- 'fixed_ips') if j['ip_address'] == ip_address), None)
+def get_network_id(shade_client, network_name):
+ networks = shade_client.list_networks({'name': network_name})
+ if networks:
+ return networks[0]['id']
def create_neutron_net(neutron_client, json_body): # pragma: no cover
try:
network = neutron_client.create_network(body=json_body)
return network['network']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_net(neutron_client)]")
raise Exception("operation error")
- return None
-def delete_neutron_net(neutron_client, network_id): # pragma: no cover
+def delete_neutron_net(shade_client, network_id):
try:
- neutron_client.delete_network(network_id)
- return True
- except Exception:
- log.error("Error [delete_neutron_net(neutron_client, '%s')]" % network_id)
+ return shade_client.delete_network(network_id)
+ except exc.OpenStackCloudException:
+ log.error("Error [delete_neutron_net(shade_client, '%s')]", network_id)
return False
@@ -470,28 +473,27 @@ def create_neutron_subnet(neutron_client, json_body): # pragma: no cover
try:
subnet = neutron_client.create_subnet(body=json_body)
return subnet['subnets'][0]['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_subnet")
raise Exception("operation error")
- return None
def create_neutron_router(neutron_client, json_body): # pragma: no cover
try:
router = neutron_client.create_router(json_body)
return router['router']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_router(neutron_client)]")
raise Exception("operation error")
- return None
def delete_neutron_router(neutron_client, router_id): # pragma: no cover
try:
neutron_client.delete_router(router=router_id)
return True
- except Exception:
- log.error("Error [delete_neutron_router(neutron_client, '%s')]" % router_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [delete_neutron_router(neutron_client, '%s')]",
+ router_id)
return False
@@ -499,8 +501,9 @@ def remove_gateway_router(neutron_client, router_id): # pragma: no cover
try:
neutron_client.remove_gateway_router(router_id)
return True
- except Exception:
- log.error("Error [remove_gateway_router(neutron_client, '%s')]" % router_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [remove_gateway_router(neutron_client, '%s')]",
+ router_id)
return False
@@ -511,9 +514,9 @@ def remove_interface_router(neutron_client, router_id, subnet_id,
neutron_client.remove_interface_router(router=router_id,
body=json_body)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [remove_interface_router(neutron_client, '%s', "
- "'%s')]" % (router_id, subnet_id))
+ "'%s')]", router_id, subnet_id)
return False
@@ -523,7 +526,7 @@ def create_floating_ip(neutron_client, extnet_id): # pragma: no cover
ip_json = neutron_client.create_floatingip({'floatingip': props})
fip_addr = ip_json['floatingip']['floating_ip_address']
fip_id = ip_json['floatingip']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_floating_ip(neutron_client)]")
return None
return {'fip_addr': fip_addr, 'fip_id': fip_id}
@@ -533,8 +536,9 @@ def delete_floating_ip(nova_client, floatingip_id): # pragma: no cover
try:
nova_client.floating_ips.delete(floatingip_id)
return True
- except Exception:
- log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [delete_floating_ip(nova_client, '%s')]",
+ floatingip_id)
return False
@@ -543,7 +547,7 @@ def get_security_groups(neutron_client): # pragma: no cover
security_groups = neutron_client.list_security_groups()[
'security_groups']
return security_groups
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [get_security_groups(neutron_client)]")
return None
@@ -558,15 +562,16 @@ def get_security_group_id(neutron_client, sg_name): # pragma: no cover
return id
-def create_security_group(neutron_client, sg_name, sg_description): # pragma: no cover
+def create_security_group(neutron_client, sg_name,
+ sg_description): # pragma: no cover
json_body = {'security_group': {'name': sg_name,
'description': sg_description}}
try:
secgroup = neutron_client.create_security_group(json_body)
return secgroup['security_group']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_security_group(neutron_client, '%s', "
- "'%s')]" % (sg_name, sg_description))
+ "'%s')]", sg_name, sg_description)
return None
@@ -597,28 +602,27 @@ def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
else:
log.error("Bad security group format."
"One of the port range is not properly set:"
- "range min: {},"
- "range max: {}".format(port_range_min,
- port_range_max))
+ "range min: %s, range max: %s", port_range_min,
+ port_range_max)
return False
# Create security group using neutron client
try:
neutron_client.create_security_group_rule(json_body)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Impossible to create_security_group_rule,"
"security group rule probably already exists")
return False
-def create_security_group_full(neutron_client,
- sg_name, sg_description): # pragma: no cover
+def create_security_group_full(neutron_client, sg_name,
+ sg_description): # pragma: no cover
sg_id = get_security_group_id(neutron_client, sg_name)
if sg_id != '':
- log.info("Using existing security group '%s'..." % sg_name)
+ log.info("Using existing security group '%s'...", sg_name)
else:
- log.info("Creating security group '%s'..." % sg_name)
+ log.info("Creating security group '%s'...", sg_name)
SECGROUP = create_security_group(neutron_client,
sg_name,
sg_description)
@@ -628,18 +632,16 @@ def create_security_group_full(neutron_client,
sg_id = SECGROUP['id']
- log.debug("Security group '%s' with ID=%s created successfully."
- % (SECGROUP['name'], sg_id))
+ log.debug("Security group '%s' with ID=%s created successfully.",
+ SECGROUP['name'], sg_id)
- log.debug("Adding ICMP rules in security group '%s'..."
- % sg_name)
+ log.debug("Adding ICMP rules in security group '%s'...", sg_name)
if not create_secgroup_rule(neutron_client, sg_id,
'ingress', 'icmp'):
log.error("Failed to create the security group rule...")
return None
- log.debug("Adding SSH rules in security group '%s'..."
- % sg_name)
+ log.debug("Adding SSH rules in security group '%s'...", sg_name)
if not create_secgroup_rule(
neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
log.error("Failed to create the security group rule...")
@@ -664,31 +666,27 @@ def create_image(glance_client, image_name, file_path, disk_format,
container_format, min_disk, min_ram, protected, tag,
public, **kwargs): # pragma: no cover
if not os.path.isfile(file_path):
- log.error("Error: file %s does not exist." % file_path)
+ log.error("Error: file %s does not exist.", file_path)
return None
try:
image_id = get_image_id(glance_client, image_name)
if image_id is not None:
- log.info("Image %s already exists." % image_name)
+ log.info("Image %s already exists.", image_name)
else:
log.info("Creating image '%s' from '%s'...", image_name, file_path)
- image = glance_client.images.create(name=image_name,
- visibility=public,
- disk_format=disk_format,
- container_format=container_format,
- min_disk=min_disk,
- min_ram=min_ram,
- tags=tag,
- protected=protected,
- **kwargs)
+ image = glance_client.images.create(
+ name=image_name, visibility=public, disk_format=disk_format,
+ container_format=container_format, min_disk=min_disk,
+ min_ram=min_ram, tags=tag, protected=protected, **kwargs)
image_id = image.id
with open(file_path) as image_data:
glance_client.images.upload(image_id, image_data)
return image_id
- except Exception:
- log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
- image_name, file_path, public)
+ except Exception: # pylint: disable=broad-except
+ log.error(
+ "Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
+ image_name, file_path, public)
return None
@@ -696,7 +694,7 @@ def delete_image(glance_client, image_id): # pragma: no cover
try:
glance_client.images.delete(image_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
return False
else:
@@ -722,18 +720,19 @@ def create_volume(cinder_client, volume_name, volume_size,
volume = cinder_client.volumes.create(name=volume_name,
size=volume_size)
return volume
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_volume(cinder_client, %s)]",
(volume_name, volume_size))
return None
-def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cover
+def delete_volume(cinder_client, volume_id,
+ forced=False): # pragma: no cover
try:
if forced:
try:
cinder_client.volumes.detach(volume_id)
- except:
+ except Exception: # pylint: disable=broad-except
log.error(sys.exc_info()[0])
cinder_client.volumes.force_delete(volume_id)
else:
@@ -743,8 +742,8 @@ def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cov
break
cinder_client.volumes.delete(volume_id)
return True
- except Exception:
- log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+ except Exception: # pylint: disable=broad-except
+ log.exception("Error [delete_volume(cinder_client, '%s')]", volume_id)
return False
@@ -752,7 +751,7 @@ def detach_volume(server_id, volume_id): # pragma: no cover
try:
get_nova_client().volumes.delete_server_volume(server_id, volume_id)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
server_id, volume_id)
return False
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index 632b433b5..e8c7cf57b 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -11,8 +11,10 @@ from __future__ import absolute_import
import logging
import time
+import os
import requests
+from requests import ConnectionError
from yardstick.common import utils
from third_party.influxdb.influxdb_line_protocol import make_lines
@@ -38,7 +40,8 @@ class InfluxdbDispatcher(DispatchBase):
self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
- self.task_id = -1
+ self.task_id = None
+ self.tags = None
def flush_result_data(self, data):
LOG.debug('Test result all : %s', data)
@@ -57,28 +60,41 @@ class InfluxdbDispatcher(DispatchBase):
for record in data['tc_data']:
# skip results with no data because we influxdb encode empty dicts
if record.get("data"):
- self._upload_one_record(record, case, tc_criteria)
+ self.upload_one_record(record, case, tc_criteria)
return 0
- def _upload_one_record(self, data, case, tc_criteria):
+ def upload_one_record(self, data, case, tc_criteria, task_id=None):
+ if task_id:
+ self.task_id = task_id
+
+ line = self._data_to_line_protocol(data, case, tc_criteria)
+ LOG.debug('Test result line format : %s', line)
+
try:
- line = self._data_to_line_protocol(data, case, tc_criteria)
- LOG.debug('Test result line format : %s', line)
res = requests.post(self.influxdb_url,
data=line,
auth=(self.username, self.password),
timeout=self.timeout)
+ except ConnectionError as err:
+ LOG.exception('Failed to record result data: %s', err)
+ else:
if res.status_code != 204:
LOG.error('Test result posting finished with status code'
' %d.', res.status_code)
LOG.error(res.text)
- except Exception as err:
- LOG.exception('Failed to record result data: %s', err)
-
def _data_to_line_protocol(self, data, case, criteria):
msg = {}
+
+ if not self.tags:
+ self.tags = {
+ 'deploy_scenario': os.environ.get('DEPLOY_SCENARIO', 'unknown'),
+ 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
+ 'pod_name': os.environ.get('NODE_NAME', 'unknown'),
+ 'version': os.environ.get('YARDSTICK_BRANCH', 'unknown')
+ }
+
point = {
"measurement": case,
"fields": utils.flatten_dict_key(data["data"]),
@@ -93,7 +109,7 @@ class InfluxdbDispatcher(DispatchBase):
def _get_nano_timestamp(self, results):
try:
timestamp = results["timestamp"]
- except Exception:
+ except KeyError:
timestamp = time.time()
return str(int(float(timestamp) * 1000000000))
diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py
index c07613147..8c44b26c2 100644
--- a/yardstick/network_services/helpers/dpdkbindnic_helper.py
+++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py
@@ -34,11 +34,11 @@ class DpdkBindHelperException(Exception):
class DpdkBindHelper(object):
- DPDK_STATUS_CMD = "{dpdk_nic_bind} --status"
- DPDK_BIND_CMD = "sudo {dpdk_nic_bind} {force} -b {driver} {vpci}"
+ DPDK_STATUS_CMD = "{dpdk_devbind} --status"
+ DPDK_BIND_CMD = "sudo {dpdk_devbind} {force} -b {driver} {vpci}"
- NIC_ROW_RE = re.compile("([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
- "unused=([^ ]*)(?: (\*Active\*))?")
+ NIC_ROW_RE = re.compile(r"([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
+ r"unused=([^ ]*)(?: (\*Active\*))?")
SKIP_RE = re.compile('(====|<none>|^$)')
NIC_ROW_FIELDS = ['vpci', 'dev_type', 'iface', 'driver', 'unused', 'active']
@@ -64,7 +64,7 @@ class DpdkBindHelper(object):
def __init__(self, ssh_helper):
self.dpdk_status = None
self.status_nic_row_re = None
- self._dpdk_nic_bind_attr = None
+ self._dpdk_devbind = None
self._status_cmd_attr = None
self.ssh_helper = ssh_helper
@@ -74,19 +74,19 @@ class DpdkBindHelper(object):
res = self.ssh_helper.execute(*args, **kwargs)
if res[0] != 0:
raise DpdkBindHelperException('{} command failed with rc={}'.format(
- self._dpdk_nic_bind, res[0]))
+ self.dpdk_devbind, res[0]))
return res
@property
- def _dpdk_nic_bind(self):
- if self._dpdk_nic_bind_attr is None:
- self._dpdk_nic_bind_attr = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
- return self._dpdk_nic_bind_attr
+ def dpdk_devbind(self):
+ if self._dpdk_devbind is None:
+ self._dpdk_devbind = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
+ return self._dpdk_devbind
@property
def _status_cmd(self):
if self._status_cmd_attr is None:
- self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind)
+ self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_devbind=self.dpdk_devbind)
return self._status_cmd_attr
def _addline(self, active_list, line):
@@ -139,7 +139,7 @@ class DpdkBindHelper(object):
# accept single PCI or list of PCI
if isinstance(pci_addresses, six.string_types):
pci_addresses = [pci_addresses]
- cmd = self.DPDK_BIND_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind,
+ cmd = self.DPDK_BIND_CMD.format(dpdk_devbind=self.dpdk_devbind,
driver=driver,
vpci=' '.join(list(pci_addresses)),
force='--force' if force else '')
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 7881131a7..3ab157dc7 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -26,7 +26,7 @@ class IXIARFC2544Profile(TrexProfile):
UPLINK = 'uplink'
DOWNLINK = 'downlink'
- def _get_ixia_traffic_profile(self, profile_data, mac=None, xfile=None, static_traffic=None):
+ def _get_ixia_traffic_profile(self, profile_data, mac=None):
if mac is None:
mac = {}
@@ -74,12 +74,12 @@ class IXIARFC2544Profile(TrexProfile):
},
'outer_l4': value['outer_l4'],
}
- except Exception:
+ except KeyError:
continue
return result
- def _ixia_traffic_generate(self, traffic_generator, traffic, ixia_obj):
+ def _ixia_traffic_generate(self, traffic, ixia_obj):
for key, value in traffic.items():
if key.startswith((self.UPLINK, self.DOWNLINK)):
value["iload"] = str(self.rate)
@@ -106,7 +106,7 @@ class IXIARFC2544Profile(TrexProfile):
self.ports = [port for port in port_generator()]
- def execute_traffic(self, traffic_generator, ixia_obj, mac=None, xfile=None):
+ def execute_traffic(self, traffic_generator, ixia_obj, mac=None):
if mac is None:
mac = {}
if self.first_run:
@@ -114,28 +114,27 @@ class IXIARFC2544Profile(TrexProfile):
self.pg_id = 0
self.update_traffic_profile(traffic_generator)
traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
+ self._get_ixia_traffic_profile(self.full_profile, mac)
self.max_rate = self.rate
self.min_rate = 0
self.get_multiplier()
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
+ self._ixia_traffic_generate(traffic, ixia_obj)
def get_multiplier(self):
self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
multiplier = round(self.rate / self.pps, 2)
return str(multiplier)
- def start_ixia_latency(self, traffic_generator, ixia_obj,
- mac=None, xfile=None):
+ def start_ixia_latency(self, traffic_generator, ixia_obj, mac=None):
if mac is None:
mac = {}
self.update_traffic_profile(traffic_generator)
traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
+ self._get_ixia_traffic_profile(self.full_profile, mac)
+ self._ixia_traffic_generate(traffic, ixia_obj)
- def get_drop_percentage(self, traffic_generator, samples, tol_min,
- tolerance, ixia_obj, mac=None, xfile=None):
+ def get_drop_percentage(self, samples, tol_min, tolerance, ixia_obj,
+ mac=None):
if mac is None:
mac = {}
status = 'Running'
@@ -179,6 +178,6 @@ class IXIARFC2544Profile(TrexProfile):
samples['DropPercentage'] = drop_percent
return status, samples
self.get_multiplier()
- traffic = self._get_ixia_traffic_profile(self.full_profile, mac, xfile)
- self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj)
+ traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
+ self._ixia_traffic_generate(traffic, ixia_obj)
return status, samples
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index 1fd6ec41a..5700f98e5 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -16,6 +16,8 @@
from __future__ import absolute_import
import logging
+import datetime
+import time
from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
@@ -81,19 +83,66 @@ class ProxBinSearchProfile(ProxProfile):
# success, the binary search will complete on an integer multiple
# of the precision, rather than on a fraction of it.
+ theor_max_thruput = 0
+
+ result_samples = {}
+
+ # Store one time only value in influxdb
+ single_samples = {
+ "test_duration" : traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
+ "test_precision" : self.params["traffic_profile"]["test_precision"],
+ "tolerated_loss" : self.params["traffic_profile"]["tolerated_loss"],
+ "duration" : duration
+ }
+ self.queue.put(single_samples)
+ self.prev_time = time.time()
+
# throughput and packet loss from the most recent successful test
successful_pkt_loss = 0.0
for test_value in self.bounds_iterator(LOG):
result, port_samples = self._profile_helper.run_test(pkt_size, duration,
test_value, self.tolerated_loss)
+ self.curr_time = time.time()
+ diff_time = self.curr_time - self.prev_time
+ self.prev_time = self.curr_time
if result.success:
LOG.debug("Success! Increasing lower bound")
self.current_lower = test_value
successful_pkt_loss = result.pkt_loss
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+ samples["TxThroughput"] = samples["TxThroughput"] * 1000 * 1000
+
+ # store results with success tag in influxdb
+ success_samples = {'Success_' + key: value for key, value in samples.items()}
+
+ success_samples["Success_rx_total"] = int(result.rx_total / diff_time)
+ success_samples["Success_tx_total"] = int(result.tx_total / diff_time)
+ success_samples["Success_can_be_lost"] = int(result.can_be_lost / diff_time)
+ success_samples["Success_drop_total"] = int(result.drop_total / diff_time)
+ self.queue.put(success_samples)
+
+ # Store Actual throughput for result samples
+ result_samples["Result_Actual_throughput"] = \
+ success_samples["Success_RxThroughput"]
else:
LOG.debug("Failure... Decreasing upper bound")
self.current_upper = test_value
+ samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+
+ for k in samples:
+ tmp = samples[k]
+ if isinstance(tmp, dict):
+ for k2 in tmp:
+ samples[k][k2] = int(samples[k][k2] / diff_time)
- samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+ if theor_max_thruput < samples["TxThroughput"]:
+ theor_max_thruput = samples['TxThroughput']
+ self.queue.put({'theor_max_throughput': theor_max_thruput})
+
+ LOG.debug("Collect TG KPIs %s %s", datetime.datetime.now(), samples)
self.queue.put(samples)
+
+ result_samples["Result_pktSize"] = pkt_size
+ result_samples["Result_theor_max_throughput"] = theor_max_thruput/ (1000 * 1000)
+ self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/traffic_profile.py
index 2f97945c0..8cde5e4a7 100644
--- a/yardstick/network_services/traffic_profile/traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/traffic_profile.py
@@ -11,16 +11,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex Traffic Profile definitions """
-from __future__ import absolute_import
import struct
import socket
import logging
from random import SystemRandom
-import six
import ipaddress
+import six
+
+from yardstick.common import exceptions as y_exc
from yardstick.network_services.traffic_profile.base import TrafficProfile
from trex_stl_lib.trex_stl_client import STLStream
from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
@@ -70,6 +70,7 @@ class TrexProfile(TrafficProfile):
def _ethernet_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="mac_{}".format(direction),
min_value=1,
max_value=30,
@@ -77,30 +78,32 @@ class TrexProfile(TrafficProfile):
op='inc',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_{}'.format(direction),
- pkt_offset='Ether.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='mac_{}'.format(direction),
+ pkt_offset='Ether.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _ip_range_action_partial(self, direction, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
- ip1 = int(ipaddress.IPv4Address(min_value))
- ip2 = int(ipaddress.IPv4Address(max_value))
- actual_count = (ip2 - ip1)
+ _, _, actual_count = self._count_ip(min_value, max_value)
if not actual_count:
count = 1
elif actual_count < int(count):
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_{}".format(direction),
- min_value=min_value,
- max_value=max_value,
- size=4,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="ip4_{}".format(direction),
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_{}'.format(direction),
- pkt_offset='IP.{}'.format(direction))
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip4_{}'.format(direction),
+ pkt_offset='IP.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
self.vm_flow_vars.append(stl_vm_fix_ipv4)
@@ -108,7 +111,8 @@ class TrexProfile(TrafficProfile):
def _ip6_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
- min_value, max_value = self._get_start_end_ipv6(min_value, max_value)
+ # pylint: disable=unused-argument
+ min_value, max_value, _ = self._count_ip(min_value, max_value)
stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction),
min_value=min_value,
max_value=max_value,
@@ -116,14 +120,16 @@ class TrexProfile(TrafficProfile):
op='random',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_{}'.format(direction),
- pkt_offset='IPv6.{}'.format(direction),
- offset_fixup=8)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='ip6_{}'.format(direction),
+ pkt_offset='IPv6.{}'.format(direction),
+ offset_fixup=8)
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _dscp_range_action_partial(self, *_):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="dscp",
min_value=min_value,
max_value=max_value,
@@ -134,8 +140,10 @@ class TrexProfile(TrafficProfile):
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ return partial
def _udp_range_action_partial(self, field, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
actual_count = int(max_value) - int(min_value)
if not actual_count:
@@ -143,15 +151,17 @@ class TrexProfile(TrafficProfile):
elif int(count) > actual_count:
count = actual_count
- stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_{}".format(field),
- min_value=min_value,
- max_value=max_value,
- size=2,
- limit=int(count),
- seed=0x1235)
+ stl_vm_flow_var = STLVmFlowVarRepeatableRandom(
+ name="port_{}".format(field),
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ limit=int(count),
+ seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
- stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_{}'.format(field),
- pkt_offset=self.udp[field])
+ stl_vm_wr_flow_var = STLVmWrFlowVar(
+ fv_name='port_{}'.format(field),
+ pkt_offset=self.udp[field])
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
@@ -442,20 +452,18 @@ class TrexProfile(TrafficProfile):
self.profile = STLProfile(self.streams)
@classmethod
- def _get_start_end_ipv6(cls, start_ip, end_ip):
- try:
- ip1 = socket.inet_pton(socket.AF_INET6, start_ip)
- ip2 = socket.inet_pton(socket.AF_INET6, end_ip)
- hi1, lo1 = struct.unpack('!QQ', ip1)
- hi2, lo2 = struct.unpack('!QQ', ip2)
- if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2):
- raise SystemExit("IPv6: start_ip is greater then end_ip")
- max_p1 = abs(int(lo1) - int(lo2))
- base_p1 = lo1
- except Exception as ex_error:
- raise SystemExit(ex_error)
- else:
- return base_p1, max_p1 + base_p1
+ def _count_ip(cls, start_ip, end_ip):
+ start = ipaddress.ip_address(six.u(start_ip))
+ end = ipaddress.ip_address(six.u(end_ip))
+ if start.version == 4:
+ return start, end, int(end) - int(start)
+ elif start.version == 6:
+ if int(start) > int(end):
+ raise y_exc.IPv6RangeError(start_ip=str(start),
+ end_ip=str(end))
+ _, lo1 = struct.unpack('!QQ', start.packed)
+ _, lo2 = struct.unpack('!QQ', end.packed)
+ return lo1, lo2, lo2 - lo1
@classmethod
def _get_random_value(cls, min_port, max_port):
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 285ead3b6..61775b963 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -929,6 +929,7 @@ class ProxResourceHelper(ClientResourceHelper):
func = getattr(self.sut, cmd, None)
if func:
return func(*args, **kwargs)
+ return None
def _connect(self, client=None):
"""Run and connect to prox on the remote system """
@@ -1005,11 +1006,18 @@ class ProxDataHelper(object):
def samples(self):
samples = {}
for port_name, port_num in self.vnfd_helper.ports_iter():
- port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
- samples[port_name] = {
- "in_packets": port_rx_total,
- "out_packets": port_tx_total,
- }
+ try:
+ port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
+ samples[port_name] = {
+ "in_packets": port_rx_total,
+ "out_packets": port_tx_total,
+ }
+ except (KeyError, TypeError, NameError, MemoryError, ValueError,
+ SystemError, BufferError):
+ samples[port_name] = {
+ "in_packets": 0,
+ "out_packets": 0,
+ }
return samples
def __enter__(self):
@@ -1127,7 +1135,7 @@ class ProxProfileHelper(object):
for key, value in section:
if key == "mode" and value == mode:
core_tuple = CoreSocketTuple(section_name)
- core = core_tuple.find_in_topology(self.cpu_topology)
+ core = core_tuple.core_id
cores.append(core)
return cores
@@ -1149,6 +1157,10 @@ class ProxProfileHelper(object):
:return: return lat_min, lat_max, lat_avg
:rtype: list
"""
+
+ if not self._latency_cores:
+ self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
+
if self._latency_cores:
return self.sut.lat_stats(self._latency_cores)
return []
@@ -1198,12 +1210,12 @@ class ProxMplsProfileHelper(ProxProfileHelper):
if item_value.startswith("tag"):
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
cores_tagged.append(core_tag)
elif item_value.startswith("udp"):
core_tuple = CoreSocketTuple(section_name)
- core_udp = core_tuple.find_in_topology(self.cpu_topology)
+ core_udp = core_tuple.core_id
cores_plain.append(core_udp)
return cores_tagged, cores_plain
@@ -1276,23 +1288,23 @@ class ProxBngProfileHelper(ProxProfileHelper):
if item_value.startswith("cpe"):
core_tuple = CoreSocketTuple(section_name)
- cpe_core = core_tuple.find_in_topology(self.cpu_topology)
+ cpe_core = core_tuple.core_id
cpe_cores.append(cpe_core)
elif item_value.startswith("inet"):
core_tuple = CoreSocketTuple(section_name)
- inet_core = core_tuple.find_in_topology(self.cpu_topology)
+ inet_core = core_tuple.core_id
inet_cores.append(inet_core)
elif item_value.startswith("arp"):
core_tuple = CoreSocketTuple(section_name)
- arp_core = core_tuple.find_in_topology(self.cpu_topology)
+ arp_core = core_tuple.core_id
arp_cores.append(arp_core)
# We check the tasks/core separately
if item_value.startswith("arp_task"):
core_tuple = CoreSocketTuple(section_name)
- arp_task_core = core_tuple.find_in_topology(self.cpu_topology)
+ arp_task_core = core_tuple.core_id
arp_tasks_core.append(arp_task_core)
return cpe_cores, inet_cores, arp_cores, arp_tasks_core
@@ -1455,12 +1467,12 @@ class ProxVpeProfileHelper(ProxProfileHelper):
if item_value.startswith("cpe"):
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
cpe_cores.append(core_tag)
elif item_value.startswith("inet"):
core_tuple = CoreSocketTuple(section_name)
- inet_core = core_tuple.find_in_topology(self.cpu_topology)
+ inet_core = core_tuple.core_id
inet_cores.append(inet_core)
return cpe_cores, inet_cores
@@ -1639,7 +1651,7 @@ class ProxlwAFTRProfileHelper(ProxProfileHelper):
continue
core_tuple = CoreSocketTuple(section_name)
- core_tag = core_tuple.find_in_topology(self.cpu_topology)
+ core_tag = core_tuple.core_id
for item_value in (v for k, v in section if k == 'name'):
if item_value.startswith('tun'):
tun_cores.append(core_tag)
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index b7d295eee..ee7735972 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -14,6 +14,8 @@
import errno
import logging
+import datetime
+import time
from yardstick.common.process import check_if_process_failed
@@ -39,6 +41,9 @@ class ProxApproxVnf(SampleVNF):
if resource_helper_type is None:
resource_helper_type = ProxResourceHelper
+ self.prev_packets_in = 0
+ self.prev_packets_sent = 0
+ self.prev_time = time.time()
super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
@@ -79,12 +84,13 @@ class ProxApproxVnf(SampleVNF):
raise RuntimeError("Failed ..Invalid no of ports .. "
"1, 2 or 4 ports only supported at this time")
- port_stats = self.vnf_execute('port_stats', range(port_count))
+ self.port_stats = self.vnf_execute('port_stats', range(port_count))
+ curr_time = time.time()
try:
- rx_total = port_stats[6]
- tx_total = port_stats[7]
+ rx_total = self.port_stats[6]
+ tx_total = self.port_stats[7]
except IndexError:
- LOG.error("port_stats parse fail %s", port_stats)
+ LOG.debug("port_stats parse fail ")
# return empty dict so we don't mess up existing KPIs
return {}
@@ -96,7 +102,17 @@ class ProxApproxVnf(SampleVNF):
# collectd KPIs here and not TG KPIs, so use a different method name
"collect_stats": self.resource_helper.collect_collectd_kpi(),
}
- LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
+ curr_packets_in = int((rx_total - self.prev_packets_in) / (curr_time - self.prev_time))
+ curr_packets_fwd = int((tx_total - self.prev_packets_sent) / (curr_time - self.prev_time))
+
+ result["curr_packets_in"] = curr_packets_in
+ result["curr_packets_fwd"] = curr_packets_fwd
+
+ self.prev_packets_in = rx_total
+ self.prev_packets_sent = tx_total
+ self.prev_time = curr_time
+
+ LOG.debug("%s collect KPIs %s %s", self.APP_NAME, datetime.datetime.now(), result)
return result
def _tear_down(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 18b4f0b12..200930322 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -252,20 +252,12 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
self.ssh_helper.execute("sudo killall %s" % self.APP_NAME)
def _setup_dpdk(self):
- """ setup dpdk environment needed for vnf to run """
-
+ """Setup DPDK environment needed for VNF to run"""
self._setup_hugepages()
- self.ssh_helper.execute("sudo modprobe uio && sudo modprobe igb_uio")
-
- exit_status = self.ssh_helper.execute("lsmod | grep -i igb_uio")[0]
- if exit_status == 0:
- return
-
- dpdk = self.ssh_helper.join_bin_path(DPDK_VERSION)
- dpdk_setup = self.ssh_helper.provision_tool(tool_file="nsb_setup.sh")
- exit_status = self.ssh_helper.execute("which {} >/dev/null 2>&1".format(dpdk))[0]
- if exit_status != 0:
- self.ssh_helper.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup)
+ self.ssh_helper.execute('sudo modprobe uio && sudo modprobe igb_uio')
+ exit_status = self.ssh_helper.execute('lsmod | grep -i igb_uio')[0]
+ if exit_status:
+ raise y_exceptions.DPDKSetupDriverError()
def get_collectd_options(self):
options = self.scenario_helper.all_options.get("collectd", {})
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 630c8b9c0..12510db96 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -149,7 +149,7 @@ class IxiaResourceHelper(ClientResourceHelper):
self.client.ix_stop_traffic()
samples = self.generate_samples(traffic_profile.ports)
self._queue.put(samples)
- status, samples = traffic_profile.get_drop_percentage(self, samples, min_tol,
+ status, samples = traffic_profile.get_drop_percentage(samples, min_tol,
max_tol, self.client, mac)
current = samples['CurrentDropPercentage']
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index c02c0eb27..077ce2385 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -34,9 +34,9 @@ LOG = logging.getLogger(__name__)
VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
VPE_COLLECT_KPI = """\
-Pkts in:\s(\d+)\r\n\
-\tPkts dropped by AH:\s(\d+)\r\n\
-\tPkts dropped by other:\s(\d+)\
+Pkts in:\\s(\\d+)\r\n\
+\tPkts dropped by AH:\\s(\\d+)\r\n\
+\tPkts dropped by other:\\s(\\d+)\
"""
@@ -61,6 +61,25 @@ class ConfigCreate(object):
self.downlink_ports = self.vnfd_helper.port_pairs.downlink_ports
self.pipeline_per_port = 9
self.socket = socket
+ self._dpdk_port_to_link_id_map = None
+
+ @property
+ def dpdk_port_to_link_id_map(self):
+ # we need interface name -> DPDK port num (PMD ID) -> LINK ID
+ # LINK ID -> PMD ID is governed by the port mask
+ # LINK instances are created implicitly based on the PORT_MASK application startup
+ # argument. LINK0 is the first port enabled in the PORT_MASK, port 1 is the next one,
+ # etc. The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual
+ # position in the bitmask mentioned above. For example, if bit 5 is the first bit set
+ # in the bitmask, then LINK0 is having the PMD ID of 5. This mechanism creates a
+ # contiguous LINK ID space and isolates the configuration file against changes in the
+ # board PCIe slots where NICs are plugged in.
+ if self._dpdk_port_to_link_id_map is None:
+ self._dpdk_port_to_link_id_map = {}
+ for link_id, port_name in enumerate(sorted(self.vnfd_helper.port_pairs.all_ports,
+ key=self.vnfd_helper.port_num)):
+ self._dpdk_port_to_link_id_map[port_name] = link_id
+ return self._dpdk_port_to_link_id_map
def vpe_initialize(self, config):
config.add_section('EAL')
@@ -79,7 +98,7 @@ class ConfigCreate(object):
def vpe_rxq(self, config):
for port in self.downlink_ports:
- new_section = 'RXQ{0}.0'.format(self.vnfd_helper.port_num(port))
+ new_section = 'RXQ{0}.0'.format(self.dpdk_port_to_link_id_map[port])
config.add_section(new_section)
config.set(new_section, 'mempool', 'MEMPOOL1')
@@ -104,7 +123,7 @@ class ConfigCreate(object):
for k, v in parser.items(pipeline):
if k == "pktq_in":
if "RXQ" in v:
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
value = "RXQ{0}.0".format(port)
else:
value = self.get_sink_swq(parser, pipeline, k, index)
@@ -113,7 +132,7 @@ class ConfigCreate(object):
elif k == "pktq_out":
if "TXQ" in v:
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
value = "TXQ{0}.0".format(port)
else:
self.sw_q += 1
@@ -135,7 +154,7 @@ class ConfigCreate(object):
for k, v in parser.items(pipeline):
if k == "pktq_in":
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
if "RXQ" not in v:
value = self.get_sink_swq(parser, pipeline, k, index)
elif "TM" in v:
@@ -146,7 +165,7 @@ class ConfigCreate(object):
parser.set(pipeline, k, value)
if k == "pktq_out":
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
if "TXQ" not in v:
self.sw_q += 1
value = self.get_sink_swq(parser, pipeline, k, index)
@@ -171,7 +190,7 @@ class ConfigCreate(object):
config = self.vpe_initialize(config)
config = self.vpe_rxq(config)
config.write(cfg_file)
- for index in range(0, len(self.uplink_ports)):
+ for index, _ in enumerate(self.uplink_ports):
config = self.vpe_upstream(vnf_cfg, index)
config.write(cfg_file)
config = self.vpe_downstream(vnf_cfg, index)
@@ -209,7 +228,7 @@ class ConfigCreate(object):
return rules.get_string()
- def generate_tm_cfg(self, vnf_cfg, index=0):
+ def generate_tm_cfg(self, vnf_cfg):
vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg")
if os.path.exists(vnf_cfg):
return open(vnf_cfg).read()
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index d58ae5618..3c3d28146 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -10,23 +10,21 @@
"""Heat template and stack management"""
from __future__ import absolute_import
-from __future__ import print_function
-from six.moves import range
-
import collections
import datetime
import getpass
import logging
-
+import pkg_resources
import socket
+import tempfile
import time
-import heatclient.client
-import pkg_resources
-
+from oslo_serialization import jsonutils
from oslo_utils import encodeutils
+import shade
import yardstick.common.openstack_utils as op_utils
+from yardstick.common import exceptions
from yardstick.common import template_format
log = logging.getLogger(__name__)
@@ -36,123 +34,89 @@ HEAT_KEY_UUID_LENGTH = 8
PROVIDER_SRIOV = "sriov"
+_DEPLOYED_STACKS = {}
+
def get_short_key_uuid(uuid):
return str(uuid)[:HEAT_KEY_UUID_LENGTH]
-class HeatObject(object):
- """base class for template and stack"""
-
- def __init__(self):
- self._heat_client = None
- self.uuid = None
-
- @property
- def heat_client(self):
- """returns a heat client instance"""
-
- if self._heat_client is None:
- sess = op_utils.get_session()
- heat_endpoint = op_utils.get_endpoint(service_type='orchestration')
- self._heat_client = heatclient.client.Client(
- op_utils.get_heat_api_version(),
- endpoint=heat_endpoint, session=sess)
-
- return self._heat_client
-
- def status(self):
- """returns stack state as a string"""
- heat_client = self.heat_client
- stack = heat_client.stacks.get(self.uuid)
- return stack.stack_status
-
-
-class HeatStack(HeatObject):
+class HeatStack(object):
"""Represents a Heat stack (deployed template) """
- stacks = []
def __init__(self, name):
- super(HeatStack, self).__init__()
- self.uuid = None
self.name = name
- self.outputs = None
- HeatStack.stacks.append(self)
+ self.outputs = {}
+ self._cloud = shade.openstack_cloud()
+ self._stack = None
+
+ def create(self, template, heat_parameters, wait, timeout):
+ """Creates an OpenStack stack from a template"""
+ with tempfile.NamedTemporaryFile('wb', delete=False) as template_file:
+ template_file.write(jsonutils.dump_as_bytes(template))
+ template_file.close()
+ self._stack = self._cloud.create_stack(
+ self.name, template_file=template_file.name, wait=wait,
+ timeout=timeout, **heat_parameters)
+ outputs = self._stack.outputs
+ self.outputs = {output['output_key']: output['output_value'] for output
+ in outputs}
+ if self.uuid:
+ _DEPLOYED_STACKS[self.uuid] = self._stack
@staticmethod
def stacks_exist():
- """check if any stack has been deployed"""
- return len(HeatStack.stacks) > 0
+ """Check if any stack has been deployed"""
+ return len(_DEPLOYED_STACKS) > 0
- def _delete(self):
- """deletes a stack from the target cloud using heat"""
+ def delete(self, wait=True):
+ """Deletes a stack in the target cloud"""
if self.uuid is None:
return
- log.info("Deleting stack '%s' START, uuid:%s", self.name, self.uuid)
- heat = self.heat_client
- template = heat.stacks.get(self.uuid)
- start_time = time.time()
- template.delete()
+ try:
+ ret = self._cloud.delete_stack(self.uuid, wait=wait)
+ except TypeError:
+ # NOTE(ralonsoh): this exception catch solves a bug in Shade, which
+ # tries to retrieve and read the stack status when it's already
+ # deleted.
+ ret = True
- for status in iter(self.status, u'DELETE_COMPLETE'):
- log.debug("Deleting stack state: %s", status)
- if status == u'DELETE_FAILED':
- raise RuntimeError(
- heat.stacks.get(self.uuid).stack_status_reason)
-
- time.sleep(2)
-
- end_time = time.time()
- log.info("Deleting stack '%s' DONE in %d secs", self.name,
- end_time - start_time)
- self.uuid = None
-
- def delete(self, block=True, retries=3):
- """deletes a stack in the target cloud using heat (with retry)
- Sometimes delete fail with "InternalServerError" and the next attempt
- succeeds. So it is worthwhile to test a couple of times.
- """
- if self.uuid is None:
- return
-
- if not block:
- self._delete()
- return
-
- for _ in range(retries):
- try:
- self._delete()
- break
- except RuntimeError as err:
- log.warning(err.args)
- time.sleep(2)
-
- # if still not deleted try once more and let it fail everything
- if self.uuid is not None:
- self._delete()
-
- HeatStack.stacks.remove(self)
+ _DEPLOYED_STACKS.pop(self.uuid)
+ self._stack = None
+ return ret
@staticmethod
def delete_all():
- for stack in HeatStack.stacks[:]:
+ """Delete all deployed stacks"""
+ for stack in _DEPLOYED_STACKS:
stack.delete()
- def update(self):
- """update a stack"""
- raise RuntimeError("not implemented")
+ @property
+ def status(self):
+ """Retrieve the current stack status"""
+ if self._stack:
+ return self._stack.status
+
+ @property
+ def uuid(self):
+ """Retrieve the current stack ID"""
+ if self._stack:
+ return self._stack.id
-class HeatTemplate(HeatObject):
+class HeatTemplate(object):
"""Describes a Heat template and a method to deploy template to a stack"""
- DESCRIPTION_TEMPLATE = """\
+ DESCRIPTION_TEMPLATE = """
Stack built by the yardstick framework for %s on host %s %s.
All referred generated resources are prefixed with the template
-name (i.e. %s).\
+name (i.e. %s).
"""
+ HEAT_WAIT_LOOP_INTERVAL = 2
+ HEAT_STATUS_COMPLETE = 'COMPLETE'
+
def _init_template(self):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._template = {
@@ -171,9 +135,7 @@ name (i.e. %s).\
self.resources = self._template['resources']
def __init__(self, name, template_file=None, heat_parameters=None):
- super(HeatTemplate, self).__init__()
self.name = name
- self.state = "NOT_CREATED"
self.keystone_client = None
self.heat_parameters = {}
@@ -184,16 +146,13 @@ name (i.e. %s).\
if template_file:
with open(template_file) as stream:
- print("Parsing external template:", template_file)
+ log.info('Parsing external template: %s', template_file)
template_str = stream.read()
self._template = template_format.parse(template_str)
self._parameters = heat_parameters
else:
self._init_template()
- # holds results of requested output after deployment
- self.outputs = {}
-
log.debug("template object '%s' created", name)
def add_flavor(self, name, vcpus=1, ram=1024, disk=1, ephemeral=0,
@@ -202,9 +161,9 @@ name (i.e. %s).\
"""add to the template a Flavor description"""
if name is None:
name = 'auto'
- log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' " +
- "ephemeral '%d' is_public '%s' rxtx_factor '%d' " +
- "swap '%d' extra_specs '%s' ",
+ log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' "
+ "ephemeral '%d' is_public '%s' rxtx_factor '%d' "
+ "swap '%d' extra_specs '%s'",
name, vcpus, ram, disk, ephemeral, is_public,
rxtx_factor, swap, str(extra_specs))
@@ -521,7 +480,36 @@ name (i.e. %s).\
'port_range_max': '65535'},
{'remote_ip_prefix': '::/0',
'ethertype': 'IPv6',
- 'protocol': 'ipv6-icmp'}
+ 'protocol': 'ipv6-icmp'},
+ {'remote_ip_prefix': '0.0.0.0/0',
+ 'direction': 'egress',
+ 'protocol': 'tcp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '0.0.0.0/0',
+ 'direction': 'egress',
+ 'protocol': 'udp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '0.0.0.0/0',
+ 'direction': 'egress',
+ 'protocol': 'icmp'},
+ {'remote_ip_prefix': '::/0',
+ 'direction': 'egress',
+ 'ethertype': 'IPv6',
+ 'protocol': 'tcp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '::/0',
+ 'direction': 'egress',
+ 'ethertype': 'IPv6',
+ 'protocol': 'udp',
+ 'port_range_min': '1',
+ 'port_range_max': '65535'},
+ {'remote_ip_prefix': '::/0',
+ 'direction': 'egress',
+ 'ethertype': 'IPv6',
+ 'protocol': 'ipv6-icmp'},
]
}
}
@@ -600,57 +588,28 @@ name (i.e. %s).\
'value': {'get_resource': name}
}
- HEAT_WAIT_LOOP_INTERVAL = 2
- HEAT_CREATE_COMPLETE_STATUS = u'CREATE_COMPLETE'
-
def create(self, block=True, timeout=3600):
- """
- creates a template in the target cloud using heat
- returns a dict with the requested output values from the template
+ """Creates a stack in the target based on the stored template
- :param block: Wait for Heat create to finish
- :type block: bool
- :param: timeout: timeout in seconds for Heat create, default 3600s
- :type timeout: int
+ :param block: (bool) Wait for Heat create to finish
+ :param timeout: (int) Timeout in seconds for Heat create,
+ default 3600s
+ :return A dict with the requested output values from the template
"""
log.info("Creating stack '%s' START", self.name)
- # create stack early to support cleanup, e.g. ctrl-c while waiting
- stack = HeatStack(self.name)
-
- heat_client = self.heat_client
start_time = time.time()
- stack.uuid = self.uuid = heat_client.stacks.create(
- stack_name=self.name, template=self._template,
- parameters=self.heat_parameters)['stack']['id']
+ stack = HeatStack(self.name)
+ stack.create(self._template, self.heat_parameters, block, timeout)
if not block:
- self.outputs = stack.outputs = {}
- end_time = time.time()
log.info("Creating stack '%s' DONE in %d secs",
- self.name, end_time - start_time)
+ self.name, time.time() - start_time)
return stack
- time_limit = start_time + timeout
- for status in iter(self.status, self.HEAT_CREATE_COMPLETE_STATUS):
- log.debug("Creating stack state: %s", status)
- if status == u'CREATE_FAILED':
- stack_status_reason = heat_client.stacks.get(self.uuid).stack_status_reason
- heat_client.stacks.delete(self.uuid)
- raise RuntimeError(stack_status_reason)
- if time.time() > time_limit:
- raise RuntimeError("Heat stack create timeout")
+ if stack.status != self.HEAT_STATUS_COMPLETE:
+ raise exceptions.HeatTemplateError(stack_name=self.name)
- time.sleep(self.HEAT_WAIT_LOOP_INTERVAL)
-
- end_time = time.time()
- outputs = heat_client.stacks.get(self.uuid).outputs
log.info("Creating stack '%s' DONE in %d secs",
- self.name, end_time - start_time)
-
- # keep outputs as unicode
- self.outputs = {output["output_key"]: output["output_value"] for output
- in outputs}
-
- stack.outputs = self.outputs
+ self.name, time.time() - start_time)
return stack
diff --git a/yardstick/resources/scripts/install/ovs_deploy.bash b/yardstick/resources/scripts/install/ovs_deploy.bash
index d94f30db1..beda9a5e0 100755
--- a/yardstick/resources/scripts/install/ovs_deploy.bash
+++ b/yardstick/resources/scripts/install/ovs_deploy.bash
@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+set -e
+
INSTALL_OVS_BIN="/usr/src"
cd $INSTALL_OVS_BIN
@@ -22,20 +24,6 @@ if [[ $EUID -ne 0 ]]; then
exit 1;
fi
-prerequisite()
-{
- echo "Install required libraries to run collectd..."
- pkg=(git flex bison build-essential pkg-config automake autotools-dev libltdl-dev cmake qemu-kvm libvirt-bin bridge-utils numactl libnuma-dev libpcap-dev)
- for i in "${pkg[@]}"; do
- dpkg-query -W --showformat='${Status}\n' "${i}"|grep "install ok installed"
- if [ "$?" -eq "1" ]; then
- apt-get update
- apt-get -y install "${i}";
- fi
- done
- echo "Done"
-}
-
download_zip()
{
url=$1
@@ -53,6 +41,7 @@ download_zip()
dpdk_build()
{
+ echo "Build DPDK libraries"
pushd .
if [[ $DPDK_VERSION != "" ]]; then
export DPDK_DIR=$INSTALL_OVS_BIN/dpdk-stable-$DPDK_VERSION
@@ -62,13 +51,15 @@ dpdk_build()
DPDK_DOWNLOAD="http://fast.dpdk.org/rel/dpdk-$DPDK_VERSION.tar.xz"
download_zip "${DPDK_DOWNLOAD}" "DPDK"
cd dpdk-stable-"$DPDK_VERSION"
- make install -j T=$RTE_TARGET
+ make config T=$RTE_TARGET
+ make install -j $(nproc) T=$RTE_TARGET
fi
popd
}
ovs()
{
+ echo "Build and install OVS with DPDK"
pushd .
if [[ $OVS_VERSION != "" ]]; then
rm -rf openswitch-"$OVS_VERSION"
@@ -82,7 +73,7 @@ ovs()
else
./configure
fi
- make install -j
+ make install -j $(nproc)
fi
popd
}
diff --git a/yardstick/tests/fixture.py b/yardstick/tests/fixture.py
new file mode 100644
index 000000000..94d20eb34
--- /dev/null
+++ b/yardstick/tests/fixture.py
@@ -0,0 +1,47 @@
+# Copyright 2017 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+import six
+
+from yardstick.common import task_template
+
+
+class PluginParserFixture(fixtures.Fixture):
+ """PluginParser fixture.
+
+ This class is intended to be used as a fixture within unit tests and
+ therefore consumers must register it using useFixture() within their
+ unit test class.
+ """
+
+ def __init__(self, rendered_plugin):
+ super(PluginParserFixture, self).__init__()
+ self._rendered_plugin = rendered_plugin
+
+ def _setUp(self):
+ self.addCleanup(self._restore)
+ self._mock_tasktemplate_render = mock.patch.object(
+ task_template.TaskTemplate, 'render')
+ self.mock_tasktemplate_render = self._mock_tasktemplate_render.start()
+ self.mock_tasktemplate_render.return_value = self._rendered_plugin
+ self._mock_open = mock.patch.object(six.moves.builtins, 'open', create=True)
+ self.mock_open = self._mock_open.start()
+ self.mock_open.side_effect = mock.mock_open()
+
+ def _restore(self):
+ self._mock_tasktemplate_render.stop()
+ self._mock_open.stop()
diff --git a/tests/unit/benchmark/__init__.py b/yardstick/tests/unit/benchmark/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/__init__.py
+++ b/yardstick/tests/unit/benchmark/__init__.py
diff --git a/tests/unit/benchmark/contexts/__init__.py b/yardstick/tests/unit/benchmark/contexts/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/contexts/__init__.py
+++ b/yardstick/tests/unit/benchmark/contexts/__init__.py
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml
index dbdd3700d..dbdd3700d 100644
--- a/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml
index 306915ca1..306915ca1 100644
--- a/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml
index 65449c91c..65449c91c 100644
--- a/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_sample.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_sample.yaml
index 8d50c3aea..8d50c3aea 100644
--- a/tests/unit/benchmark/contexts/nodes_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_sample.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_sample_new.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_sample_new.yaml
index a400bec03..a400bec03 100644
--- a/tests/unit/benchmark/contexts/nodes_sample_new.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_sample_new.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml
index 55ff2e778..55ff2e778 100644
--- a/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml
index b1da1ea9f..b1da1ea9f 100644
--- a/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml
diff --git a/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml b/yardstick/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml
index c02849a05..c02849a05 100644
--- a/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml
diff --git a/tests/unit/benchmark/contexts/standalone/__init__.py b/yardstick/tests/unit/benchmark/contexts/standalone/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/contexts/standalone/__init__.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/__init__.py
diff --git a/tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml
index 2e501a6af..2e501a6af 100644
--- a/tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_duplicate_sample.yaml
diff --git a/tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml
index 0f51dbe63..0f51dbe63 100644
--- a/tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_ovs_dpdk_sample.yaml
diff --git a/tests/unit/benchmark/contexts/standalone/nodes_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_sample.yaml
index 8d50c3aea..8d50c3aea 100644
--- a/tests/unit/benchmark/contexts/standalone/nodes_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_sample.yaml
diff --git a/tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml
index 1c43b8725..1c43b8725 100644
--- a/tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/nodes_sriov_sample.yaml
diff --git a/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index 6090356b6..18ea3c4e6 100644
--- a/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -18,8 +18,6 @@ import unittest
import mock
from xml.etree import ElementTree
-
-from yardstick.benchmark.contexts.standalone.model import Libvirt
from yardstick.benchmark.contexts.standalone import model
from yardstick.network_services import utils
@@ -57,6 +55,9 @@ class ModelLibvirtTestCase(unittest.TestCase):
def _cleanup(self):
self._mock_write_xml.stop()
+ # TODO: Remove mocking of yardstick.ssh.SSH (here and elsewhere)
+ # In this case, we are mocking a param to be passed into other methods
+ # It can be a generic Mock() with return values set for the right methods
def test_check_if_vm_exists_and_delete(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -193,6 +194,8 @@ class ModelLibvirtTestCase(unittest.TestCase):
status = model.Libvirt.build_vm_xml(ssh_mock, {}, cfg_file, 'vm_0', 0)
self.assertEqual(status[0], result[0])
+ # TODO: Edit this test to test state instead of output
+ # update_interrupts_hugepages_perf does not return anything
def test_update_interrupts_hugepages_perf(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -203,9 +206,9 @@ class ModelLibvirtTestCase(unittest.TestCase):
# None, this check is trivial.
#status = Libvirt.update_interrupts_hugepages_perf(ssh_mock)
#self.assertIsNone(status)
- Libvirt.update_interrupts_hugepages_perf(ssh_mock)
+ model.Libvirt.update_interrupts_hugepages_perf(ssh_mock)
- @mock.patch("yardstick.benchmark.contexts.standalone.model.CpuSysCores")
+ @mock.patch.object(model, 'CpuSysCores')
@mock.patch.object(model.Libvirt, 'update_interrupts_hugepages_perf')
def test_pin_vcpu_for_perf(self, *args):
# NOTE(ralonsoh): test mocked methods/variables.
@@ -214,7 +217,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
ssh_mock.execute = \
mock.Mock(return_value=(0, "a", ""))
ssh.return_value = ssh_mock
- status = Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
+ status = model.Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
self.assertIsNotNone(status)
class StandaloneContextHelperTestCase(unittest.TestCase):
@@ -225,15 +228,15 @@ class StandaloneContextHelperTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
def setUp(self):
@@ -280,7 +283,7 @@ class StandaloneContextHelperTestCase(unittest.TestCase):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, pattern, ""))
+ mock.Mock(return_value=(1, pattern, ""))
ssh.return_value = ssh_mock
# NOTE(ralonsoh): this test doesn't cover function execution. This test
# should also check mocked function calls.
@@ -333,7 +336,7 @@ class StandaloneContextHelperTestCase(unittest.TestCase):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "", ""))
+ mock.Mock(return_value=(1, "", ""))
ssh.return_value = ssh_mock
# NOTE(ralonsoh): this test doesn't cover function execution. This test
# should also check mocked function calls.
@@ -348,19 +351,19 @@ class ServerTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
def setUp(self):
@@ -392,25 +395,27 @@ class ServerTestCase(unittest.TestCase):
{}, self.NETWORKS, '1.1.1.1/24', 'vm_0', vnf, '00:00:00:00:00:01')
self.assertIsNotNone(status)
+
class OvsDeployTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
+
@mock.patch('yardstick.ssh.SSH')
def setUp(self, mock_ssh):
self.ovs_deploy = model.OvsDeploy(mock_ssh, '/tmp/dpdk-devbind.py', {})
diff --git a/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index e39ecf4f2..02a85525a 100644
--- a/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.contexts.standalone.standaloneovs
-
-from __future__ import absolute_import
import os
-import unittest
+
import mock
+import unittest
from yardstick.benchmark.contexts.standalone import ovs_dpdk
@@ -39,28 +37,28 @@ class OvsDpdkContextTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'interface': 'if0',
- 'mac': "00:00:00:00:00:01",
- 'vf_pci': {'vf_pci': 0},
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'interface': 'if0',
+ 'mac': "00:00:00:00:00:01",
+ 'vf_pci': {'vf_pci': 0},
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'interface': 'if0',
- 'vf_pci': {'vf_pci': 0},
- 'mac': "00:00:00:00:00:01",
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'interface': 'if0',
+ 'vf_pci': {'vf_pci': 0},
+ 'mac': "00:00:00:00:00:01",
+ 'gateway_ip': '152.16.100.20'},
}
def setUp(self):
self.ovs_dpdk = ovs_dpdk.OvsDpdkContext()
- @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
+ @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
def test___init__(self, mock_helper, mock_server):
self.ovs_dpdk.helper = mock_helper
self.ovs_dpdk.vnf_node = mock_server
@@ -68,7 +66,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertTrue(self.ovs_dpdk.first_run)
def test_init(self):
- self.ovs_dpdk.helper.parse_pod_file = mock.Mock(return_value=[{}, {}, {}])
+ self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
+ return_value=[{}, {}, {}])
self.assertIsNone(self.ovs_dpdk.init(self.ATTRS))
def test_setup_ovs(self):
@@ -108,19 +107,16 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.wait_for_vswitchd = 0
self.assertIsNone(self.ovs_dpdk.setup_ovs_bridge_add_flows())
- def test_cleanup_ovs_dpdk_env(self):
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, "a", ""))
- ssh.return_value = ssh_mock
- self.ovs_dpdk.connection = ssh_mock
- self.ovs_dpdk.networks = self.NETWORKS
- self.ovs_dpdk.ovs_properties = {
- 'version': {'ovs': '2.7.0'}
- }
- self.ovs_dpdk.wait_for_vswitchd = 0
- self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
+ @mock.patch("yardstick.ssh.SSH")
+ def test_cleanup_ovs_dpdk_env(self, mock_ssh):
+ mock_ssh.execute.return_value = 0, "a", ""
+ self.ovs_dpdk.connection = mock_ssh
+ self.ovs_dpdk.networks = self.NETWORKS
+ self.ovs_dpdk.ovs_properties = {
+ 'version': {'ovs': '2.7.0'}
+ }
+ self.ovs_dpdk.wait_for_vswitchd = 0
+ self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
@mock.patch('yardstick.benchmark.contexts.standalone.model.OvsDeploy')
def test_check_ovs_dpdk_env(self, mock_ovs):
@@ -132,7 +128,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.connection = ssh_mock
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.ovs_properties = {
- 'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
+ 'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
}
self.ovs_dpdk.wait_for_vswitchd = 0
self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
@@ -143,11 +139,12 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.wait_for_vswitchd = 0
self.cleanup_ovs_dpdk_env = mock.Mock()
mock_ovs.deploy = mock.Mock()
+ # NOTE(elfoley): Check for a specific Exception
self.assertRaises(Exception, self.ovs_dpdk.check_ovs_dpdk_env)
@mock.patch('yardstick.ssh.SSH')
- def test_deploy(self, ssh_mock):
- ssh_mock.execute.return_value = (0, "a", "")
+ def test_deploy(self, mock_ssh):
+ mock_ssh.execute.return_value = 0, "a", ""
self.ovs_dpdk.vm_deploy = False
self.assertIsNone(self.ovs_dpdk.deploy())
@@ -168,15 +165,15 @@ class OvsDpdkContextTestCase(unittest.TestCase):
@mock.patch('yardstick.benchmark.contexts.standalone.model.Libvirt')
@mock.patch('yardstick.ssh.SSH')
- def test_undeploy(self, ssh_mock, _):
- ssh_mock.execute.return_value = (0, "a", "")
+ def test_undeploy(self, mock_ssh, *args):
+ mock_ssh.execute.return_value = 0, "a", ""
self.ovs_dpdk.vm_deploy = False
self.assertIsNone(self.ovs_dpdk.undeploy())
self.ovs_dpdk.vm_deploy = True
+ self.ovs_dpdk.connection = mock_ssh
self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
- self.ovs_dpdk.connection = ssh_mock
self.ovs_dpdk.drivers = ['vm_0', 'vm_1']
self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
self.ovs_dpdk.networks = self.NETWORKS
@@ -208,7 +205,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
}
- self.ovs_dpdk.helper.parse_pod_file = mock.Mock(return_value=[{}, {}, {}])
+ self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
+ return_value=[{}, {}, {}])
self.ovs_dpdk.init(attrs)
attr_name = 'bar.foo'
@@ -260,6 +258,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ # TODO(elfoley): Split this test for networks that exist and networks that
+ # don't
def test__get_network(self):
network1 = {
'name': 'net_1',
@@ -277,6 +277,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
'b': network2,
}
+ # Tests for networks that do not exist
attr_name = {}
self.assertIsNone(self.ovs_dpdk._get_network(attr_name))
@@ -285,9 +286,11 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertIsNone(self.ovs_dpdk._get_network(None))
+ # TODO(elfoley): Split this test
attr_name = 'vld777'
self.assertIsNone(self.ovs_dpdk._get_network(attr_name))
+ # Tests for networks that exist
attr_name = {'vld_id': 'vld999'}
expected = {
"name": 'net_2',
@@ -319,8 +322,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
self.assertIsNone(self.ovs_dpdk.configure_nics_for_ovs_dpdk())
- @mock.patch('yardstick.benchmark.contexts.standalone.model.Libvirt.add_ovs_interface')
- def test__enable_interfaces(self, _):
+ @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
+ def test__enable_interfaces(self, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
@@ -332,11 +335,12 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.drivers = []
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
- self.assertIsNone(self.ovs_dpdk._enable_interfaces(0, ["private_0"], 'test'))
+ self.assertIsNone(self.ovs_dpdk._enable_interfaces(
+ 0, ["private_0"], 'test'))
- @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
- def test_setup_ovs_dpdk_context(self, _, mock_libvirt):
+ @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
+ def test_setup_ovs_dpdk_context(self, mock_libvirt, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
@@ -361,10 +365,11 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk.host_mgmt = {}
self.ovs_dpdk.flavor = {}
self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
- mock_libvirt.check_if_vm_exists_and_delete = mock.Mock(return_value="")
- mock_libvirt.build_vm_xml = mock.Mock(return_value=[6, "00:00:00:00:00:01"])
+ mock_libvirt.build_vm_xml.return_value = [6, "00:00:00:00:00:01"]
self.ovs_dpdk._enable_interfaces = mock.Mock(return_value="")
- mock_libvirt.virsh_create_vm = mock.Mock(return_value="")
- mock_libvirt.pin_vcpu_for_perf = mock.Mock(return_value="")
- self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(return_value={})
+ mock_libvirt.virsh_create_vm.return_value = ""
+ mock_libvirt.pin_vcpu_for_perf.return_value = ""
+ self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(
+ return_value={})
+
self.assertIsNotNone(self.ovs_dpdk.setup_ovs_dpdk_context())
diff --git a/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index 7f11a7d59..f323fcd3c 100644
--- a/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.contexts.standalone.standalonesriov
-
-from __future__ import absolute_import
import os
-import unittest
+
import mock
+import unittest
from yardstick import ssh
from yardstick.benchmark.contexts.standalone import sriov
@@ -40,33 +38,31 @@ class SriovContextTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'interface': 'if0',
- 'mac': "00:00:00:00:00:01",
- 'vf_pci': {'vf_pci': 0},
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'interface': 'if0',
+ 'mac': "00:00:00:00:00:01",
+ 'vf_pci': {'vf_pci': 0},
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'interface': 'if0',
- 'vf_pci': {'vf_pci': 0},
- 'mac': "00:00:00:00:00:01",
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'interface': 'if0',
+ 'vf_pci': {'vf_pci': 0},
+ 'mac': "00:00:00:00:00:01",
+ 'gateway_ip': '152.16.100.20'},
}
def setUp(self):
self.sriov = sriov.SriovContext()
- @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
+ @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
- def test___init__(self, mock_helper, mock_libvirt, mock_server):
- # pylint: disable=unused-argument
+ def test___init__(self, mock_helper, mock_server, *args):
# NOTE(ralonsoh): this test doesn't cover function execution.
- # The pylint exception should be removed.
self.sriov.helper = mock_helper
self.sriov.vnf_node = mock_server
self.assertIsNone(self.sriov.file_path)
@@ -77,10 +73,8 @@ class SriovContextTestCase(unittest.TestCase):
self.assertIsNone(self.sriov.init(self.ATTRS))
@mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
- def test_deploy(self, mock_ssh):
- # pylint: disable=unused-argument
+ def test_deploy(self, *args):
# NOTE(ralonsoh): this test doesn't cover function execution.
- # The pylint exception should be removed.
self.sriov.vm_deploy = False
self.assertIsNone(self.sriov.deploy())
@@ -92,11 +86,9 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov.wait_for_vnfs_to_start = mock.Mock(return_value={})
self.assertIsNone(self.sriov.deploy())
- @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test_undeploy(self, mock_libvirt, mock_ssh):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
+ def test_undeploy(self, mock_ssh, *args):
self.sriov.vm_deploy = False
self.assertIsNone(self.sriov.undeploy())
@@ -184,6 +176,8 @@ class SriovContextTestCase(unittest.TestCase):
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ # TODO(elfoley): Split this test
+ # There are at least two sets of inputs/outputs
def test__get_network(self):
network1 = {
'name': 'net_1',
@@ -243,24 +237,23 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov._get_vf_data = mock.Mock(return_value="")
self.assertIsNone(self.sriov.configure_nics_for_sriov())
- @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test__enable_interfaces(self, mock_libvirt, mock_ssh):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ @mock.patch.object(ssh, 'SSH')
+ def test__enable_interfaces(self, mock_ssh, *args):
+ mock_ssh.return_value = 0, "a", ""
+
self.sriov.vm_deploy = True
self.sriov.connection = mock_ssh
self.sriov.vm_names = ['vm_0', 'vm_1']
self.sriov.drivers = []
self.sriov.networks = self.NETWORKS
- self.sriov._get_vf_data = mock.Mock(return_value="")
- self.assertIsNone(self.sriov._enable_interfaces(0, 0, ["private_0"], 'test'))
+ self.sriov.get_vf_data = mock.Mock(return_value="")
+ self.assertIsNone(self.sriov._enable_interfaces(
+ 0, 0, ["private_0"], 'test'))
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test_setup_sriov_context(self, mock_libvirt, mock_server):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ def test_setup_sriov_context(self, mock_libvirt, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
@@ -285,7 +278,8 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov.host_mgmt = {}
self.sriov.flavor = {}
self.sriov.configure_nics_for_sriov = mock.Mock(return_value="")
- mock_libvirt.build_vm_xml = mock.Mock(return_value=[6, "00:00:00:00:00:01"])
+ mock_libvirt.build_vm_xml = mock.Mock(
+ return_value=[6, "00:00:00:00:00:01"])
self.sriov._enable_interfaces = mock.Mock(return_value="")
self.sriov.vnf_node.generate_vnf_instance = mock.Mock(return_value={})
self.assertIsNotNone(self.sriov.setup_sriov_context())
diff --git a/tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml
index e468d0465..e468d0465 100644
--- a/tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone_duplicate_sample.yaml
diff --git a/tests/unit/benchmark/contexts/standalone_sample.yaml b/yardstick/tests/unit/benchmark/contexts/standalone_sample.yaml
index 95e12d62f..95e12d62f 100644
--- a/tests/unit/benchmark/contexts/standalone_sample.yaml
+++ b/yardstick/tests/unit/benchmark/contexts/standalone_sample.yaml
diff --git a/tests/unit/benchmark/contexts/test_dummy.py b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
index 1a54035df..1a54035df 100644
--- a/tests/unit/benchmark/contexts/test_dummy.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index f2e725df2..4348bb052 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -9,22 +9,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.heat
-
-from __future__ import absolute_import
-
+from collections import OrderedDict
+from itertools import count
import logging
import os
-import unittest
import uuid
-from collections import OrderedDict
import mock
+import unittest
-from itertools import count
from yardstick.benchmark.contexts import heat
from yardstick.benchmark.contexts import model
+
LOG = logging.getLogger(__name__)
@@ -82,16 +79,16 @@ class HeatContextTestCase(unittest.TestCase):
pgs['pgrp1']['policy'])
mock_sg.assert_called_with('servergroup1', self.test_context,
sgs['servergroup1']['policy'])
- self.assertTrue(len(self.test_context.placement_groups) == 1)
- self.assertTrue(len(self.test_context.server_groups) == 1)
+ self.assertEqual(len(self.test_context.placement_groups), 1)
+ self.assertEqual(len(self.test_context.server_groups), 1)
mock_network.assert_called_with(
'bar', self.test_context, networks['bar'])
- self.assertTrue(len(self.test_context.networks) == 1)
+ self.assertEqual(len(self.test_context.networks), 1)
mock_server.assert_called_with('baz', self.test_context,
servers['baz'])
- self.assertTrue(len(self.test_context.servers) == 1)
+ self.assertEqual(len(self.test_context.servers), 1)
if os.path.exists(self.test_context.key_filename):
try:
@@ -119,15 +116,17 @@ class HeatContextTestCase(unittest.TestCase):
"2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b")
mock_template.add_security_group.assert_called_with("foo-secgroup")
# mock_template.add_network.assert_called_with("bar-fool-network", 'physnet1', None)
- mock_template.add_router.assert_called_with("bar-fool-network-router",
- netattrs["external_network"],
- "bar-fool-network-subnet")
- mock_template.add_router_interface.assert_called_with("bar-fool-network-router-if0",
- "bar-fool-network-router",
- "bar-fool-network-subnet")
+ mock_template.add_router.assert_called_with(
+ "bar-fool-network-router",
+ netattrs["external_network"],
+ "bar-fool-network-subnet")
+ mock_template.add_router_interface.assert_called_with(
+ "bar-fool-network-router-if0",
+ "bar-fool-network-router",
+ "bar-fool-network-subnet")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_attrs_get(self, mock_template):
+ def test_attrs_get(self, *args):
image, flavor, user = expected_tuple = 'foo1', 'foo2', 'foo3'
self.assertNotEqual(self.test_context.image, image)
self.assertNotEqual(self.test_context.flavor, flavor)
@@ -139,7 +138,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertEqual(attr_tuple, expected_tuple)
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_attrs_set_negative(self, mock_template):
+ def test_attrs_set_negative(self, *args):
with self.assertRaises(AttributeError):
self.test_context.image = 'foo'
@@ -227,13 +226,13 @@ class HeatContextTestCase(unittest.TestCase):
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
@mock.patch('yardstick.benchmark.contexts.heat.os')
- def test_undeploy_key_filename(self, mock_template, mock_os):
+ def test_undeploy_key_filename(self, mock_os, mock_template):
self.test_context.stack = mock_template
mock_os.path.exists.return_value = True
self.assertIsNone(self.test_context.undeploy())
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_dict(self, mock_pkg_resources):
+ def test__get_server_found_dict(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a dictionary input.
@@ -274,7 +273,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertEqual(result['private_ip'], '10.0.0.1')
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_dict_no_attrs(self, mock_pkg_resources):
+ def test__get_server_found_dict_no_attrs(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a dictionary input.
@@ -313,7 +312,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertNotIn('ip', result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_found_not_dict(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a non-dictionary input
@@ -350,7 +349,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertNotIn('public_ip', result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_none_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_none_found_not_dict(self, *args):
"""
Use HeatContext._get_server to not get a server due to
None value associated with the match to a non-dictionary
@@ -384,7 +383,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_not_found_dict(self, mock_pkg_resources):
+ def test__get_server_not_found_dict(self, *args):
"""
Use HeatContext._get_server to not get a server for lack
of a match to a dictionary input
@@ -420,7 +419,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_not_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_not_found_not_dict(self, *args):
"""
Use HeatContext._get_server to not get a server for lack
of a match to a non-dictionary input
@@ -451,6 +450,7 @@ class HeatContextTestCase(unittest.TestCase):
result = self.test_context._get_server(attr_name)
self.assertIsNone(result)
+ # TODO: Split this into more granular tests
def test__get_network(self):
network1 = mock.MagicMock()
network1.name = 'net_1'
diff --git a/tests/unit/benchmark/contexts/test_kubernetes.py b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
index 3a926f85c..e149e0d18 100644
--- a/tests/unit/benchmark/contexts/test_kubernetes.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,14 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.kubernetes
-
-from __future__ import absolute_import
-import unittest
import mock
+import unittest
from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.contexts.kubernetes import KubernetesContext
+from yardstick.benchmark.contexts import kubernetes
context_cfg = {
@@ -43,33 +38,35 @@ prefix = 'yardstick.benchmark.contexts.kubernetes'
class KubernetesTestCase(unittest.TestCase):
+ def setUp(self):
+ self.k8s_context = kubernetes.KubernetesContext()
+ self.k8s_context.init(context_cfg)
+
def tearDown(self):
# clear kubernetes contexts from global list so we don't break other tests
Context.list = []
- @mock.patch('{}.KubernetesContext._delete_services'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_services')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_ssh_key')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_rcs')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_pods')
def test_undeploy(self,
mock_delete_pods,
mock_delete_rcs,
mock_delete_ssh,
mock_delete_services):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context.undeploy()
+ self.k8s_context.undeploy()
self.assertTrue(mock_delete_ssh.called)
self.assertTrue(mock_delete_rcs.called)
self.assertTrue(mock_delete_pods.called)
self.assertTrue(mock_delete_services.called)
- @mock.patch('{}.KubernetesContext._create_services'.format(prefix))
- @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
- @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
- @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
- @mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_services')
+ @mock.patch.object(kubernetes.KubernetesContext, '_wait_until_running')
+ @mock.patch.object(kubernetes.KubernetesTemplate, 'get_rc_pods')
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_rcs')
+ @mock.patch.object(kubernetes.KubernetesContext, '_set_ssh_key')
def test_deploy(self,
mock_set_ssh_key,
mock_create_rcs,
@@ -77,44 +74,37 @@ class KubernetesTestCase(unittest.TestCase):
mock_wait_until_running,
mock_create_services):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
with mock.patch("yardstick.benchmark.contexts.kubernetes.time"):
- k8s_context.deploy()
+ self.k8s_context.deploy()
self.assertTrue(mock_set_ssh_key.called)
self.assertTrue(mock_create_rcs.called)
self.assertTrue(mock_create_services.called)
self.assertTrue(mock_get_rc_pods.called)
self.assertTrue(mock_wait_until_running.called)
- @mock.patch('{}.paramiko'.format(prefix), **{"resource_filename.return_value": ""})
- @mock.patch('{}.pkg_resources'.format(prefix), **{"resource_filename.return_value": ""})
- @mock.patch('{}.utils'.format(prefix))
- @mock.patch('{}.open'.format(prefix), create=True)
- @mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
- @mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
- def test_ssh_key(self, mock_create, mock_delete, mock_open, mock_utils, mock_resources,
- mock_paramiko):
-
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._set_ssh_key()
- k8s_context._delete_ssh_key()
+ @mock.patch.object(kubernetes, 'paramiko', **{"resource_filename.return_value": ""})
+ @mock.patch.object(kubernetes, 'pkg_resources', **{"resource_filename.return_value": ""})
+ @mock.patch.object(kubernetes, 'utils')
+ @mock.patch.object(kubernetes, 'open', create=True)
+ @mock.patch.object(kubernetes.k8s_utils, 'delete_config_map')
+ @mock.patch.object(kubernetes.k8s_utils, 'create_config_map')
+ def test_ssh_key(self, mock_create, mock_delete, *args):
+ self.k8s_context._set_ssh_key()
+ self.k8s_context._delete_ssh_key()
+
self.assertTrue(mock_create.called)
self.assertTrue(mock_delete.called)
- @mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'read_pod_status')
def test_wait_until_running(self, mock_read_pod_status):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context.template.pods = ['server']
+ self.k8s_context.template.pods = ['server']
mock_read_pod_status.return_value = 'Running'
- k8s_context._wait_until_running()
+ self.k8s_context._wait_until_running()
- @mock.patch('{}.k8s_utils.get_pod_by_name'.format(prefix))
- @mock.patch('{}.KubernetesContext._get_node_ip'.format(prefix))
- @mock.patch('{}.k8s_utils.get_service_by_name'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'get_pod_by_name')
+ @mock.patch.object(kubernetes.KubernetesContext, '_get_node_ip')
+ @mock.patch.object(kubernetes.k8s_utils, 'get_service_by_name')
def test_get_server(self,
mock_get_service_by_name,
mock_get_node_ip,
@@ -136,64 +126,45 @@ class KubernetesTestCase(unittest.TestCase):
def __init__(self):
self.status = Status()
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
-
mock_get_service_by_name.return_value = Services()
mock_get_pod_by_name.return_value = Pod()
mock_get_node_ip.return_value = '172.16.10.131'
- server = k8s_context._get_server('server')
- self.assertIsNotNone(server)
+ self.assertIsNotNone(self.k8s_context._get_server('server'))
- @mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_rc')
def test_create_rcs(self, mock_create_rc):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_rcs()
+ self.k8s_context._create_rcs()
self.assertTrue(mock_create_rc.called)
- @mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'create_replication_controller')
def test_create_rc(self, mock_create_replication_controller):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_rc({})
+ self.k8s_context._create_rc({})
self.assertTrue(mock_create_replication_controller.called)
- @mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_rc')
def test_delete_rcs(self, mock_delete_rc):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_rcs()
+ self.k8s_context._delete_rcs()
self.assertTrue(mock_delete_rc.called)
- @mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'delete_replication_controller')
def test_delete_rc(self, mock_delete_replication_controller):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_rc({})
+ self.k8s_context._delete_rc({})
self.assertTrue(mock_delete_replication_controller.called)
- @mock.patch('{}.k8s_utils.get_node_list'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'get_node_list')
def test_get_node_ip(self, mock_get_node_list):
-
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._get_node_ip()
+ self.k8s_context._get_node_ip()
self.assertTrue(mock_get_node_list.called)
@mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create')
def test_create_services(self, mock_create):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_services()
+ self.k8s_context._create_services()
self.assertTrue(mock_create.called)
@mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete')
def test_delete_services(self, mock_delete):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_services()
+ self.k8s_context._delete_services()
self.assertTrue(mock_delete.called)
diff --git a/tests/unit/benchmark/contexts/test_model.py b/yardstick/tests/unit/benchmark/contexts/test_model.py
index 53b035b82..28011d494 100644
--- a/tests/unit/benchmark/contexts/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_model.py
@@ -155,7 +155,7 @@ class NetworkTestCase(unittest.TestCase):
def test_find_external_network(self):
mock_network = mock.Mock()
- mock_network.router = mock.Mock()
+ mock_network.router = mock.Mock() #pylint ignore assignment-from-none
mock_network.router.external_gateway_info = 'ext_net'
model.Network.list = [mock_network]
diff --git a/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py
index a2e2f7b9a..5329d30f4 100644
--- a/tests/unit/benchmark/contexts/test_node.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_node.py
@@ -21,6 +21,10 @@ from yardstick.common import constants as consts
from yardstick.benchmark.contexts import node
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
class NodeContextTestCase(unittest.TestCase):
PREFIX = 'yardstick.benchmark.contexts.node'
@@ -345,6 +349,7 @@ class NodeContextTestCase(unittest.TestCase):
result = self.test_context.get_context_from_server('my.vnf1')
self.assertIs(result, self.test_context)
+ # TODO: Split this into more granular tests
def test__get_network(self):
network1 = {
'name': 'net_1',
diff --git a/tests/unit/benchmark/core/__init__.py b/yardstick/tests/unit/benchmark/core/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/core/__init__.py
+++ b/yardstick/tests/unit/benchmark/core/__init__.py
diff --git a/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml b/yardstick/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
index 44c4a31ff..44c4a31ff 100644
--- a/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
+++ b/yardstick/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml b/yardstick/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
index ced13f19e..ced13f19e 100644
--- a/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
+++ b/yardstick/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
diff --git a/yardstick/tests/unit/benchmark/core/test_plugin.py b/yardstick/tests/unit/benchmark/core/test_plugin.py
new file mode 100644
index 000000000..0d14e4e86
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/core/test_plugin.py
@@ -0,0 +1,142 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import copy
+import os
+import pkg_resources
+
+import mock
+import testtools
+
+from yardstick import ssh
+from yardstick.benchmark.core import plugin
+from yardstick.tests import fixture
+
+
+class PluginTestCase(testtools.TestCase):
+
+ FILE = """
+schema: "yardstick:plugin:0.1"
+
+plugins:
+ name: sample
+
+deployment:
+ ip: 10.1.0.50
+ user: root
+ password: root
+"""
+
+ NAME = 'sample'
+ DEPLOYMENT = {'ip': '10.1.0.50', 'user': 'root', 'password': 'root'}
+
+ def setUp(self):
+ super(PluginTestCase, self).setUp()
+ self.plugin_parser = plugin.PluginParser(mock.Mock())
+ self.plugin = plugin.Plugin()
+ self.useFixture(fixture.PluginParserFixture(PluginTestCase.FILE))
+
+ self._mock_ssh_from_node = mock.patch.object(ssh.SSH, 'from_node')
+ self.mock_ssh_from_node = self._mock_ssh_from_node.start()
+ self.mock_ssh_obj = mock.Mock()
+ self.mock_ssh_from_node.return_value = self.mock_ssh_obj
+ self.mock_ssh_obj.wait = mock.Mock()
+ self.mock_ssh_obj._put_file_shell = mock.Mock()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_ssh_from_node.stop()
+
+ def test_install(self):
+ args = mock.Mock()
+ args.input_file = [mock.Mock()]
+ with mock.patch.object(self.plugin, '_install_setup') as \
+ mock_install, \
+ mock.patch.object(self.plugin, '_run') as mock_run:
+ self.plugin.install(args)
+ mock_install.assert_called_once_with(PluginTestCase.NAME,
+ PluginTestCase.DEPLOYMENT)
+ mock_run.assert_called_once_with(PluginTestCase.NAME)
+
+ def test_remove(self):
+ args = mock.Mock()
+ args.input_file = [mock.Mock()]
+ with mock.patch.object(self.plugin, '_remove_setup') as \
+ mock_remove, \
+ mock.patch.object(self.plugin, '_run') as mock_run:
+ self.plugin.remove(args)
+ mock_remove.assert_called_once_with(PluginTestCase.NAME,
+ PluginTestCase.DEPLOYMENT)
+ mock_run.assert_called_once_with(PluginTestCase.NAME)
+
+ @mock.patch.object(pkg_resources, 'resource_filename',
+ return_value='script')
+ def test__install_setup(self, mock_resource_filename):
+ plugin_name = 'plugin_name'
+ self.plugin._install_setup(plugin_name, PluginTestCase.DEPLOYMENT)
+ mock_resource_filename.assert_called_once_with(
+ 'yardstick.resources', 'scripts/install/' + plugin_name + '.bash')
+ self.mock_ssh_from_node.assert_called_once_with(
+ PluginTestCase.DEPLOYMENT)
+ self.mock_ssh_obj.wait.assert_called_once_with(timeout=600)
+ self.mock_ssh_obj._put_file_shell.assert_called_once_with(
+ 'script', '~/{0}.sh'.format(plugin_name))
+
+ @mock.patch.object(pkg_resources, 'resource_filename',
+ return_value='script')
+ @mock.patch.object(os, 'environ', return_value='1.2.3.4')
+ def test__install_setup_with_ip_local(self, mock_os_environ,
+ mock_resource_filename):
+ plugin_name = 'plugin_name'
+ deployment = copy.deepcopy(PluginTestCase.DEPLOYMENT)
+ deployment['ip'] = 'local'
+ self.plugin._install_setup(plugin_name, deployment)
+ mock_os_environ.__getitem__.assert_called_once_with('JUMP_HOST_IP')
+ mock_resource_filename.assert_called_once_with(
+ 'yardstick.resources',
+ 'scripts/install/' + plugin_name + '.bash')
+ self.mock_ssh_from_node.assert_called_once_with(
+ deployment, overrides={'ip': os.environ["JUMP_HOST_IP"]})
+ self.mock_ssh_obj.wait.assert_called_once_with(timeout=600)
+ self.mock_ssh_obj._put_file_shell.assert_called_once_with(
+ 'script', '~/{0}.sh'.format(plugin_name))
+
+ @mock.patch.object(pkg_resources, 'resource_filename',
+ return_value='script')
+ def test__remove_setup(self, mock_resource_filename):
+ plugin_name = 'plugin_name'
+ self.plugin._remove_setup(plugin_name, PluginTestCase.DEPLOYMENT)
+ mock_resource_filename.assert_called_once_with(
+ 'yardstick.resources',
+ 'scripts/remove/' + plugin_name + '.bash')
+ self.mock_ssh_from_node.assert_called_once_with(
+ PluginTestCase.DEPLOYMENT)
+ self.mock_ssh_obj.wait.assert_called_once_with(timeout=600)
+ self.mock_ssh_obj._put_file_shell.assert_called_once_with(
+ 'script', '~/{0}.sh'.format(plugin_name))
+
+ @mock.patch.object(pkg_resources, 'resource_filename',
+ return_value='script')
+ @mock.patch.object(os, 'environ', return_value='1.2.3.4')
+ def test__remove_setup_with_ip_local(self, mock_os_environ,
+ mock_resource_filename):
+ plugin_name = 'plugin_name'
+ deployment = copy.deepcopy(PluginTestCase.DEPLOYMENT)
+ deployment['ip'] = 'local'
+ self.plugin._remove_setup(plugin_name, deployment)
+ mock_os_environ.__getitem__.assert_called_once_with('JUMP_HOST_IP')
+ mock_resource_filename.assert_called_once_with(
+ 'yardstick.resources',
+ 'scripts/remove/' + plugin_name + '.bash')
+ self.mock_ssh_from_node.assert_called_once_with(
+ deployment, overrides={'ip': os.environ["JUMP_HOST_IP"]})
+ self.mock_ssh_obj.wait.assert_called_once_with(timeout=600)
+ self.mock_ssh_obj._put_file_shell.mock_os_environ(
+ 'script', '~/{0}.sh'.format(plugin_name))
diff --git a/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py
index 69546928c..3d9a503b6 100644
--- a/tests/unit/benchmark/core/test_report.py
+++ b/yardstick/tests/unit/benchmark/core/test_report.py
@@ -55,10 +55,12 @@ class ReportTestCase(unittest.TestCase):
self.assertEqual(1, mock_tasks.call_count)
self.assertEqual(1, mock_keys.call_count)
+ # pylint: disable=deprecated-method
def test_invalid_yaml_name(self):
self.assertRaisesRegexp(ValueError, "yaml*", self.rep._validate,
'F@KE_NAME', FAKE_TASK_ID)
+ # pylint: disable=deprecated-method
def test_invalid_task_id(self):
self.assertRaisesRegexp(ValueError, "task*", self.rep._validate,
FAKE_YAML_NAME, DUMMY_TASK_ID)
@@ -68,5 +70,7 @@ class ReportTestCase(unittest.TestCase):
mock_query.return_value = []
self.rep.yaml_name = FAKE_YAML_NAME
self.rep.task_id = FAKE_TASK_ID
+ # pylint: disable=deprecated-method
self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_fieldkeys)
self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks)
+ # pylint: enable=deprecated-method
diff --git a/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index bed0bb6d8..ee00d8826 100644
--- a/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,19 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.core.task
-
-from __future__ import print_function
-
-from __future__ import absolute_import
import os
-import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
+import mock
+import unittest
from yardstick.benchmark.core import task
from yardstick.common import constants as consts
@@ -29,19 +18,21 @@ from yardstick.common import constants as consts
class TaskTestCase(unittest.TestCase):
- @mock.patch('yardstick.benchmark.core.task.Context')
- def test_parse_nodes_host_target_same_context(self, mock_context):
- nodes = {
- "host": "node1.LF",
- "target": "node2.LF"
+ @mock.patch.object(task, 'Context')
+ def test_parse_nodes_with_context_same_context(self, mock_context):
+ scenario_cfg = {
+ "nodes": {
+ "host": "node1.LF",
+ "target": "node2.LF"
+ }
}
- scenario_cfg = {"nodes": nodes}
server_info = {
"ip": "10.20.0.3",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
mock_context.get_server.return_value = server_info
+
context_cfg = task.parse_nodes_with_context(scenario_cfg)
self.assertEqual(context_cfg["host"], server_info)
@@ -53,15 +44,22 @@ class TaskTestCase(unittest.TestCase):
t._set_dispatchers(output_config)
self.assertEqual(output_config, output_config)
- @mock.patch('yardstick.benchmark.core.task.DispatcherBase')
+ @mock.patch.object(task, 'DispatcherBase')
def test__do_output(self, mock_dispatcher):
t = task.Task()
output_config = {"DEFAULT": {"dispatcher": "file, http"}}
- mock_dispatcher.get = mock.MagicMock(return_value=[mock.MagicMock(),
- mock.MagicMock()])
+
+ dispatcher1 = mock.MagicMock()
+ dispatcher1.__dispatcher_type__ = 'file'
+
+ dispatcher2 = mock.MagicMock()
+ dispatcher2.__dispatcher_type__ = 'http'
+
+ mock_dispatcher.get = mock.MagicMock(return_value=[dispatcher1,
+ dispatcher2])
self.assertEqual(None, t._do_output(output_config, {}))
- @mock.patch('yardstick.benchmark.core.task.Context')
+ @mock.patch.object(task, 'Context')
def test_parse_networks_from_nodes(self, mock_context):
nodes = {
'node1': {
@@ -125,9 +123,9 @@ class TaskTestCase(unittest.TestCase):
self.assertEqual(mock_context.get_network.call_count, expected_get_network_calls)
self.assertDictEqual(networks, expected)
- @mock.patch('yardstick.benchmark.core.task.Context')
- @mock.patch('yardstick.benchmark.core.task.base_runner')
- def test_run(self, mock_base_runner, mock_ctx):
+ @mock.patch.object(task, 'Context')
+ @mock.patch.object(task, 'base_runner')
+ def test_run(self, mock_base_runner, *args):
scenario = {
'host': 'athena.demo',
'target': 'ares.demo',
@@ -148,8 +146,8 @@ class TaskTestCase(unittest.TestCase):
t._run([scenario], False, "yardstick.out")
self.assertTrue(runner.run.called)
- @mock.patch('yardstick.benchmark.core.task.os')
- def test_check_precondition(self, mock_os):
+ @mock.patch.object(os, 'environ')
+ def test_check_precondition(self, mock_os_environ):
cfg = {
'precondition': {
'installer_type': 'compass',
@@ -159,7 +157,7 @@ class TaskTestCase(unittest.TestCase):
}
t = task.TaskParser('/opt')
- mock_os.environ.get.side_effect = ['compass',
+ mock_os_environ.get.side_effect = ['compass',
'os-nosdn',
'huawei-pod1']
result = t._check_precondition(cfg)
@@ -168,82 +166,75 @@ class TaskTestCase(unittest.TestCase):
def test_parse_suite_no_constraint_no_args(self):
SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
- with mock.patch('yardstick.benchmark.core.task.os.environ',
+ with mock.patch.object(os, 'environ',
new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}):
task_files, task_args, task_args_fnames = t.parse_suite()
- print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
- task_args_fnames))
+
self.assertEqual(task_files[0], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
- self.assertEqual(task_args[0], None)
- self.assertEqual(task_args[1], None)
- self.assertEqual(task_args_fnames[0], None)
- self.assertEqual(task_args_fnames[1], None)
- @mock.patch('yardstick.benchmark.core.task.os.environ')
- def test_parse_suite_no_constraint_with_args(self, mock_environ):
+ self.assertIsNone(task_args[0])
+ self.assertIsNone(task_args[1])
+ self.assertIsNone(task_args_fnames[0])
+ self.assertIsNone(task_args_fnames[1])
+
+ def test_parse_suite_no_constraint_with_args(self):
SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
- with mock.patch('yardstick.benchmark.core.task.os.environ',
+ with mock.patch.object(os, 'environ',
new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}):
task_files, task_args, task_args_fnames = t.parse_suite()
- print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
- task_args_fnames))
+
self.assertEqual(task_files[0], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
- self.assertEqual(task_args[0], None)
+ self.assertIsNone(task_args[0])
self.assertEqual(task_args[1],
'{"host": "node1.LF","target": "node2.LF"}')
- self.assertEqual(task_args_fnames[0], None)
- self.assertEqual(task_args_fnames[1], None)
+ self.assertIsNone(task_args_fnames[0])
+ self.assertIsNone(task_args_fnames[1])
- @mock.patch('yardstick.benchmark.core.task.os.environ')
- def test_parse_suite_with_constraint_no_args(self, mock_environ):
+ def test_parse_suite_with_constraint_no_args(self):
SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
- with mock.patch('yardstick.benchmark.core.task.os.environ',
+ with mock.patch.object(os, 'environ',
new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}):
task_files, task_args, task_args_fnames = t.parse_suite()
- print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
- task_args_fnames))
self.assertEqual(task_files[0], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
- self.assertEqual(task_args[0], None)
- self.assertEqual(task_args[1], None)
- self.assertEqual(task_args_fnames[0], None)
- self.assertEqual(task_args_fnames[1], None)
+ self.assertIsNone(task_args[0])
+ self.assertIsNone(task_args[1])
+ self.assertIsNone(task_args_fnames[0])
+ self.assertIsNone(task_args_fnames[1])
- @mock.patch('yardstick.benchmark.core.task.os.environ')
- def test_parse_suite_with_constraint_with_args(self, mock_environ):
+ def test_parse_suite_with_constraint_with_args(self):
SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml"
t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
- with mock.patch('yardstick.benchmark.core.task.os.environ',
+ with mock.patch('os.environ',
new={'NODE_NAME': 'huawei-pod1', 'INSTALLER_TYPE': 'compass'}):
task_files, task_args, task_args_fnames = t.parse_suite()
- print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
- task_args_fnames))
+
self.assertEqual(task_files[0], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
- self.assertEqual(task_args[0], None)
+ self.assertIsNone(task_args[0])
self.assertEqual(task_args[1],
'{"host": "node1.LF","target": "node2.LF"}')
- self.assertEqual(task_args_fnames[0], None)
- self.assertEqual(task_args_fnames[1], None)
+ self.assertIsNone(task_args_fnames[0])
+ self.assertIsNone(task_args_fnames[1])
def test_parse_options(self):
options = {
'openstack': {
'EXTERNAL_NETWORK': '$network'
},
- 'ndoes': ['node1', '$node'],
+ 'nodes': ['node1', '$node'],
'host': '$host'
}
@@ -254,48 +245,50 @@ class TaskTestCase(unittest.TestCase):
'host': 'server.yardstick'
}
- idle_result = {
+ expected_result = {
'openstack': {
'EXTERNAL_NETWORK': 'ext-net'
},
- 'ndoes': ['node1', 'node2'],
+ 'nodes': ['node1', 'node2'],
'host': 'server.yardstick'
}
actual_result = t._parse_options(options)
- self.assertEqual(idle_result, actual_result)
+ self.assertEqual(expected_result, actual_result)
+
def test_change_server_name_host_str(self):
scenario = {'host': 'demo'}
suffix = '-8'
task.change_server_name(scenario, suffix)
- self.assertTrue(scenario['host'], 'demo-8')
+ self.assertEqual('demo-8', scenario['host'])
def test_change_server_name_host_dict(self):
scenario = {'host': {'name': 'demo'}}
suffix = '-8'
task.change_server_name(scenario, suffix)
- self.assertTrue(scenario['host']['name'], 'demo-8')
+ self.assertEqual('demo-8', scenario['host']['name'])
def test_change_server_name_target_str(self):
scenario = {'target': 'demo'}
suffix = '-8'
task.change_server_name(scenario, suffix)
- self.assertTrue(scenario['target'], 'demo-8')
+ self.assertEqual('demo-8', scenario['target'])
def test_change_server_name_target_dict(self):
scenario = {'target': {'name': 'demo'}}
suffix = '-8'
task.change_server_name(scenario, suffix)
- self.assertTrue(scenario['target']['name'], 'demo-8')
+ self.assertEqual('demo-8', scenario['target']['name'])
- @mock.patch('yardstick.benchmark.core.task.utils')
- @mock.patch('yardstick.benchmark.core.task.logging')
- def test_set_log(self, mock_logging, mock_utils):
+ @mock.patch('six.moves.builtins.open', side_effect=mock.mock_open())
+ @mock.patch.object(task, 'utils')
+ @mock.patch('logging.root')
+ def test_set_log(self, mock_logging_root, *args):
task_obj = task.Task()
task_obj.task_id = 'task_id'
task_obj._set_log()
- self.assertTrue(mock_logging.root.addHandler.called)
+ mock_logging_root.addHandler.assert_called()
def _get_file_abspath(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
diff --git a/tests/unit/benchmark/core/test_testcase.py b/yardstick/tests/unit/benchmark/core/test_testcase.py
index 1f5aad75e..1f5aad75e 100644
--- a/tests/unit/benchmark/core/test_testcase.py
+++ b/yardstick/tests/unit/benchmark/core/test_testcase.py
diff --git a/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml b/yardstick/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
index 168d4b01a..168d4b01a 100644
--- a/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
+++ b/yardstick/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
diff --git a/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml b/yardstick/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
index 299e5de56..299e5de56 100644
--- a/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
+++ b/yardstick/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
diff --git a/tests/unit/benchmark/runner/__init__.py b/yardstick/tests/unit/benchmark/runner/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/runner/__init__.py
+++ b/yardstick/tests/unit/benchmark/runner/__init__.py
diff --git a/tests/unit/benchmark/runner/test_base.py b/yardstick/tests/unit/benchmark/runner/test_base.py
index f47b88e95..59739c54f 100644
--- a/tests/unit/benchmark/runner/test_base.py
+++ b/yardstick/tests/unit/benchmark/runner/test_base.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
@@ -9,75 +7,82 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
+import time
+import mock
import unittest
-import time
+from subprocess import CalledProcessError
-from mock import mock
from yardstick.benchmark.runners import base
-from yardstick.benchmark.runners.iteration import IterationRunner
+from yardstick.benchmark.runners import iteration
class ActionTestCase(unittest.TestCase):
@mock.patch("yardstick.benchmark.runners.base.subprocess")
def test__execute_shell_command(self, mock_subprocess):
- mock_subprocess.check_output.side_effect = Exception()
+ mock_subprocess.check_output.side_effect = CalledProcessError(-1, '')
self.assertEqual(base._execute_shell_command("")[0], -1)
@mock.patch("yardstick.benchmark.runners.base.subprocess")
def test__single_action(self, mock_subprocess):
- mock_subprocess.check_output.side_effect = Exception()
+ mock_subprocess.check_output.side_effect = CalledProcessError(-1, '')
base._single_action(0, "echo", mock.MagicMock())
@mock.patch("yardstick.benchmark.runners.base.subprocess")
def test__periodic_action(self, mock_subprocess):
- mock_subprocess.check_output.side_effect = Exception()
+ mock_subprocess.check_output.side_effect = CalledProcessError(-1, '')
base._periodic_action(0, "echo", mock.MagicMock())
class RunnerTestCase(unittest.TestCase):
+ def setUp(self):
+ config = {
+ 'output_config': {
+ 'DEFAULT': {
+ 'dispatcher': 'file'
+ }
+ }
+ }
+ self.runner = iteration.IterationRunner(config)
+
@mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
- def test_get_output(self, mock_process):
- runner = IterationRunner({})
- runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
- runner.output_queue.put({'criteria': 'PASS'})
+ def test_get_output(self, *args):
+ self.runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
+ self.runner.output_queue.put({'criteria': 'PASS'})
idle_result = {
'case': 'opnfv_yardstick_tc002',
'criteria': 'PASS'
}
- for retries in range(1000):
+ for _ in range(1000):
time.sleep(0.01)
- if not runner.output_queue.empty():
+ if not self.runner.output_queue.empty():
break
- actual_result = runner.get_output()
+ actual_result = self.runner.get_output()
self.assertEqual(idle_result, actual_result)
@mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
- def test_get_result(self, mock_process):
- runner = IterationRunner({})
- runner.result_queue.put({'case': 'opnfv_yardstick_tc002'})
- runner.result_queue.put({'criteria': 'PASS'})
+ def test_get_result(self, *args):
+ self.runner.result_queue.put({'case': 'opnfv_yardstick_tc002'})
+ self.runner.result_queue.put({'criteria': 'PASS'})
idle_result = [
{'case': 'opnfv_yardstick_tc002'},
{'criteria': 'PASS'}
]
- for retries in range(1000):
+ for _ in range(1000):
time.sleep(0.01)
- if not runner.result_queue.empty():
+ if not self.runner.result_queue.empty():
break
- actual_result = runner.get_result()
+ actual_result = self.runner.get_result()
self.assertEqual(idle_result, actual_result)
def test__run_benchmark(self):
diff --git a/tests/unit/benchmark/runner/test_search.py b/yardstick/tests/unit/benchmark/runner/test_search.py
index 8fab5a71f..1bc07448d 100644
--- a/tests/unit/benchmark/runner/test_search.py
+++ b/yardstick/tests/unit/benchmark/runner/test_search.py
@@ -11,14 +11,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-from __future__ import absolute_import
+import time
-import unittest
import mock
+import unittest
-from tests.unit import STL_MOCKS
+from yardstick.tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -40,7 +39,8 @@ class TestSearchRunnerHelper(unittest.TestCase):
benchmark = cls()
method = getattr(benchmark, 'my_method')
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
with helper.get_benchmark_instance():
helper()
@@ -54,22 +54,25 @@ class TestSearchRunnerHelper(unittest.TestCase):
'runner': {},
}
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
with self.assertRaises(RuntimeError):
helper()
- @mock.patch('yardstick.benchmark.runners.search.time')
- def test_is_not_done(self, mock_time):
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_is_not_done(self, mock_time, *args):
cls = mock.MagicMock()
aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
- mock_time.time.side_effect = range(1000)
+ mock_time.side_effect = range(1000)
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
index = -1
for index in helper.is_not_done():
@@ -78,8 +81,8 @@ class TestSearchRunnerHelper(unittest.TestCase):
self.assertGreaterEqual(index, 10)
- @mock.patch('yardstick.benchmark.runners.search.time')
- def test_is_not_done_immediate_stop(self, mock_time):
+ @mock.patch.object(time, 'sleep')
+ def test_is_not_done_immediate_stop(self, *args):
cls = mock.MagicMock()
aborted = mock.MagicMock()
scenario_cfg = {
@@ -88,7 +91,8 @@ class TestSearchRunnerHelper(unittest.TestCase):
},
}
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
index = -1
for index in helper.is_not_done():
@@ -97,6 +101,7 @@ class TestSearchRunnerHelper(unittest.TestCase):
self.assertEqual(index, -1)
+
class TestSearchRunner(unittest.TestCase):
def test__worker_run_once(self):
diff --git a/tests/unit/benchmark/scenarios/__init__.py b/yardstick/tests/unit/benchmark/scenarios/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/__init__.py
diff --git a/tests/unit/benchmark/scenarios/availability/__init__.py b/yardstick/tests/unit/benchmark/scenarios/availability/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/availability/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/__init__.py
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index cc179602e..f0921c0f6 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -20,20 +20,24 @@ from yardstick.benchmark.scenarios.availability.attacker import \
attacker_baremetal
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.return_value = (0, 'unittest')
- exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG')
def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
- exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
mock_log.error.assert_called_once()
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
index 612b5a662..612b5a662 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
index 0a8e8322a..0a8e8322a 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index 92ae8aa88..9bc04ebf4 100644
--- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,19 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.monitor.monitor_command
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import basemonitor
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
- '.BaseMonitor')
class MonitorMgrTestCase(unittest.TestCase):
def setUp(self):
@@ -52,19 +43,23 @@ class MonitorMgrTestCase(unittest.TestCase):
for mo in self.monitor_list:
mo._result = {"outage_time": 10}
- def test__MonitorMgr_setup_successful(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test__MonitorMgr_setup_successful(self, *args):
instance = basemonitor.MonitorMgr({"nova-api": 10})
instance.init_monitors(self.monitor_configs, None)
instance.start_monitors()
instance.wait_monitors()
- ret = instance.verify_SLA()
+ # TODO(elfoley): Check the return value
+ ret = instance.verify_SLA() # pylint: disable=unused-variable
- def test_MonitorMgr_getitem(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test_MonitorMgr_getitem(self, *args):
monitorMgr = basemonitor.MonitorMgr({"nova-api": 10})
monitorMgr.init_monitors(self.monitor_configs, None)
- def test_store_result(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test_store_result(self, *args):
expect = {'process_neutron-server_outage_time': 10,
'openstack-router-list_outage_time': 10}
result = {}
@@ -102,9 +97,7 @@ class BaseMonitorTestCase(unittest.TestCase):
ins.run()
ins.verify_SLA()
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
- '.multiprocessing')
+ @mock.patch.object(basemonitor, 'multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
ins.setup()
@@ -112,11 +105,12 @@ class BaseMonitorTestCase(unittest.TestCase):
ins.run()
ins.verify_SLA()
+ # TODO(elfoley): fix this test to not throw an error
def test__basemonitor_getmonitorcls_successfule(self):
cls = None
try:
cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
- except Exception:
+ except Exception: # pylint: disable=broad-except
pass
self.assertIsNone(cls)
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
index 03ec1492b..b7c9f62ff 100644
--- a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huan Li and others
# lihuansse@tongji.edu.cn
@@ -9,19 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.operation.baseoperation
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.operation import baseoperation
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.operation.baseoperation'
- '.BaseOperation')
class OperationMgrTestCase(unittest.TestCase):
def setUp(self):
@@ -33,17 +24,19 @@ class OperationMgrTestCase(unittest.TestCase):
self.operation_configs = []
self.operation_configs.append(config)
- def test_all_successful(self, mock_operation):
+ @mock.patch.object(baseoperation, 'BaseOperation')
+ def test_all_successful(self, *args):
mgr_ins = baseoperation.OperationMgr()
mgr_ins.init_operations(self.operation_configs, None)
- operation_ins = mgr_ins["service-status"]
+ _ = mgr_ins["service-status"]
mgr_ins.rollback()
- def test_getitem_fail(self, mock_operation):
+ @mock.patch.object(baseoperation, 'BaseOperation')
+ def test_getitem_fail(self, *args):
mgr_ins = baseoperation.OperationMgr()
mgr_ins.init_operations(self.operation_configs, None)
with self.assertRaises(KeyError):
- operation_ins = mgr_ins["operation-not-exist"]
+ _ = mgr_ins["operation-not-exist"]
class TestOperation(baseoperation.BaseOperation):
@@ -66,22 +59,21 @@ class BaseOperationTestCase(unittest.TestCase):
'operation_type': 'general-operation',
'key': 'service-status'
}
+ self.base_ins = baseoperation.BaseOperation(self.config, None)
def test_all_successful(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- base_ins.setup()
- base_ins.run()
- base_ins.rollback()
+ self.base_ins.setup()
+ self.base_ins.run()
+ self.base_ins.rollback()
def test_get_script_fullpath(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- base_ins.get_script_fullpath("ha_tools/test.bash")
+ self.base_ins.get_script_fullpath("ha_tools/test.bash")
+ # TODO(elfoley): Fix test to check on expected outputs
+ # pylint: disable=unused-variable
def test_get_operation_cls_successful(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- operation_ins = base_ins.get_operation_cls("test-operation")
+ operation_ins = self.base_ins.get_operation_cls("test-operation")
def test_get_operation_cls_fail(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
with self.assertRaises(RuntimeError):
- operation_ins = base_ins.get_operation_cls("operation-not-exist")
+ self.base_ins.get_operation_cls("operation-not-exist")
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
index 36ce900fb..ae74d241c 100644
--- a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
@@ -9,10 +9,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.result_checker
-# .baseresultchecker
-
-from __future__ import absolute_import
import mock
import unittest
@@ -20,8 +16,6 @@ from yardstick.benchmark.scenarios.availability.result_checker import \
baseresultchecker
-@mock.patch('yardstick.benchmark.scenarios.availability.result_checker'
- '.baseresultchecker.BaseResultChecker')
class ResultCheckerMgrTestCase(unittest.TestCase):
def setUp(self):
@@ -33,21 +27,27 @@ class ResultCheckerMgrTestCase(unittest.TestCase):
self.checker_configs = []
self.checker_configs.append(config)
- def test_ResultCheckerMgr_setup_successful(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
- mgr_ins.verify()
+ self.mgr_ins = baseresultchecker.ResultCheckerMgr()
+
+ self._mock_basechecker = mock.patch.object(baseresultchecker,
+ 'BaseResultChecker')
+ self.mock_basechecker = self._mock_basechecker.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_basechecker.stop()
+
+ def test_ResultCheckerMgr_setup_successful(self):
+ self.mgr_ins.verify()
- def test_getitem_succeessful(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
- checker_ins = mgr_ins["process-checker"]
+ def test_getitem_succeessful(self):
+ self.mgr_ins.init_ResultChecker(self.checker_configs, None)
+ _ = self.mgr_ins["process-checker"]
- def test_getitem_fail(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
+ def test_getitem_fail(self):
+ self.mgr_ins.init_ResultChecker(self.checker_configs, None)
with self.assertRaises(KeyError):
- checker_ins = mgr_ins["checker-not-exist"]
+ _ = self.mgr_ins["checker-not-exist"]
class BaseResultCheckerTestCase(unittest.TestCase):
@@ -66,22 +66,20 @@ class BaseResultCheckerTestCase(unittest.TestCase):
'checker_type': 'general-result-checker',
'key': 'process-checker'
}
+ self.ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
def test_baseresultchecker_setup_verify_successful(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- ins.setup()
- ins.verify()
+ self.ins.setup()
+ self.ins.verify()
def test_baseresultchecker_verfiy_pass(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- ins.setup()
- ins.actualResult = True
- ins.expectedResult = True
- ins.verify()
+ self.ins.setup()
+ self.ins.actualResult = True
+ self.ins.expectedResult = True
+ self.ins.verify()
def test_get_script_fullpath(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- path = ins.get_script_fullpath("test.bash")
+ self.ins.get_script_fullpath("test.bash")
def test_get_resultchecker_cls_successful(self):
baseresultchecker.BaseResultChecker.get_resultchecker_cls(
diff --git a/tests/unit/benchmark/scenarios/availability/test_director.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_director.py
index d01a60e2d..72ce7b0d5 100644
--- a/tests/unit/benchmark/scenarios/availability/test_director.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_director.py
@@ -18,6 +18,10 @@ import unittest
from yardstick.benchmark.scenarios.availability.director import Director
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.director.basemonitor')
@mock.patch('yardstick.benchmark.scenarios.availability.director.baseattacker')
@mock.patch(
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
index b84cef23c..1aebcc85b 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,36 +7,37 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.monitor.monitor_command
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_command
-@mock.patch('subprocess.check_output')
class ExecuteShellTestCase(unittest.TestCase):
- def test__fun_execute_shell_command_successful(self, mock_subprocess_check_output):
+ def setUp(self):
+ self._mock_subprocess = mock.patch.object(monitor_command, 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_subprocess.stop()
+
+ def test__fun_execute_shell_command_successful(self):
cmd = "env"
- mock_subprocess_check_output.return_value = (0, 'unittest')
- exitcode, _ = monitor_command._execute_shell_command(cmd)
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, _t = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log,
- mock_subprocess_check_output):
+ @mock.patch.object(monitor_command, 'LOG')
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log):
cmd = "env"
- mock_subprocess_check_output.side_effect = RuntimeError
+ self.mock_subprocess.check_output.side_effect = RuntimeError
exitcode, _ = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
mock_log.error.assert_called_once()
-@mock.patch('subprocess.check_output')
class MonitorOpenstackCmdTestCase(unittest.TestCase):
def setUp(self):
@@ -54,35 +53,39 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
'monitor_time': 1,
'sla': {'max_outage_time': 5}
}
+ self._mock_subprocess = mock.patch.object(monitor_command, 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_subprocess.stop()
- def test__monitor_command_monitor_func_successful(self, mock_subprocess_check_output):
+ def test__monitor_command_monitor_func_successful(self):
instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
- mock_subprocess_check_output.return_value = (0, 'unittest')
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
ret = instance.monitor_func()
self.assertTrue(ret)
instance._result = {"outage_time": 0}
instance.verify_SLA()
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
- def test__monitor_command_monitor_func_failure(self, mock_log, mock_subprocess_check_output):
- mock_subprocess_check_output.return_value = (1, 'unittest')
+ @mock.patch.object(monitor_command, 'LOG')
+ def test__monitor_command_monitor_func_failure(self, mock_log):
+ self.mock_subprocess.check_output.return_value = (1, 'unittest')
instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
- mock_subprocess_check_output.side_effect = RuntimeError
+ self.mock_subprocess.check_output.side_effect = RuntimeError
ret = instance.monitor_func()
self.assertFalse(ret)
mock_log.error.assert_called_once()
instance._result = {"outage_time": 10}
instance.verify_SLA()
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.monitor_command'
- '.ssh')
- def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess_check_output):
+ @mock.patch.object(monitor_command, 'ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh):
- mock_subprocess_check_output.return_value = (0, 'unittest')
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
self.config["host"] = "node1"
instance = monitor_command.MonitorOpenstackCmd(
self.config, self.context, {"nova-api": 10})
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
index c14f073ec..7022ea678 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
@@ -18,6 +18,10 @@ import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
'monitor_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
index b59ec6cf1..0d61d9b15 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
@@ -17,6 +17,11 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_multi
+
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
'monitor_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
@@ -42,7 +47,8 @@ class MultiMonitorServiceTestCase(unittest.TestCase):
}
def test__monitor_multi_all_successful(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
+ ins = monitor_multi.MultiMonitor(
+ self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
@@ -51,11 +57,11 @@ class MultiMonitorServiceTestCase(unittest.TestCase):
ins.verify_SLA()
def test__monitor_multi_all_fail(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
+ ins = monitor_multi.MultiMonitor(
+ self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins.start_monitor()
ins.wait_monitor()
ins.verify_SLA()
-
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
index 41ce5445e..41ce5445e 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
diff --git a/tests/unit/benchmark/scenarios/availability/test_operation_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_operation_general.py
index fb8ccb122..a965f7f64 100644
--- a/tests/unit/benchmark/scenarios/availability/test_operation_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_operation_general.py
@@ -19,6 +19,10 @@ from yardstick.benchmark.scenarios.availability.operation import \
operation_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.operation.'
'operation_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.operation.'
diff --git a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
index d036bb0da..234adcb6e 100644
--- a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
@@ -21,6 +21,10 @@ from yardstick.benchmark.scenarios.availability.result_checker import \
result_checker_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
'result_checker_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index 244a5e798..45840d569 100644
--- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huan Li and others
# lihuansse@tongji.edu.cn
@@ -9,18 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.scenario_general
-
-from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.scenario_general import \
- ScenarioGeneral
-
+from yardstick.benchmark.scenarios.availability import scenario_general
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.scenario_general.Director')
class ScenarioGeneralTestCase(unittest.TestCase):
def setUp(self):
@@ -44,28 +35,33 @@ class ScenarioGeneralTestCase(unittest.TestCase):
'index': 2}]
}
}
+ self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
+
+ self._mock_director = mock.patch.object(scenario_general, 'Director')
+ self.mock_director = self._mock_director.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_director.stop()
- def test_scenario_general_all_successful(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
- ins.setup()
- ins.run({})
- ins.teardown()
+ def test_scenario_general_all_successful(self):
+ self.instance.setup()
+ self.instance.run({})
+ self.instance.teardown()
- def test_scenario_general_exception(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
+ def test_scenario_general_exception(self):
mock_obj = mock.Mock()
mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
- ins.director = mock_obj
- ins.director.data = {}
- ins.run({})
- ins.teardown()
+ self.instance.director = mock_obj
+ self.instance.director.data = {}
+ self.instance.run({})
+ self.instance.teardown()
- def test_scenario_general_case_fail(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
+ def test_scenario_general_case_fail(self):
mock_obj = mock.Mock()
mock_obj.verify.return_value = False
- ins.director = mock_obj
- ins.director.data = {}
- ins.run({})
- ins.pass_flag = True
- ins.teardown()
+ self.instance.director = mock_obj
+ self.instance.director.data = {}
+ self.instance.run({})
+ self.instance.pass_flag = True
+ self.instance.teardown()
diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 97d534894..6bb3ec63b 100644
--- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.serviceha
-
-from __future__ import absolute_import
import mock
import unittest
@@ -48,11 +43,11 @@ class ServicehaTestCase(unittest.TestCase):
sla = {"outage_time": 5}
self.args = {"options": options, "sla": sla}
- @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor')
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
- def test__serviceha_setup_run_successful(self, _,
- mock_monitor):
+ # NOTE(elfoley): This should be split into test_setup and test_run
+ # NOTE(elfoley): This should explicitly test outcomes and states
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_setup_run_successful(self, mock_monitor, *args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
@@ -65,15 +60,15 @@ class ServicehaTestCase(unittest.TestCase):
p.setup()
self.assertTrue(p.setup_done)
-# def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
-# p = serviceha.ServiceHA(self.args, self.ctx)
+ # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
+ # p = serviceha.ServiceHA(self.args, self.ctx)
-# p.setup()
-# self.assertTrue(p.setup_done)
-#
-# result = {}
-# result["outage_time"] = 10
-# mock_monitor.Monitor().get_result.return_value = result
+ # p.setup()
+ # self.assertEqual(p.setup_done, True)
+
+ # result = {}
+ # result["outage_time"] = 10
+ # mock_monitor.Monitor().get_result.return_value = result
-# ret = {}
-# self.assertRaises(AssertionError, p.run, ret)
+ # ret = {}
+ # self.assertRaises(AssertionError, p.run, ret)
diff --git a/tests/unit/benchmark/scenarios/availability/test_util.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_util.py
index 548efe91b..548efe91b 100644
--- a/tests/unit/benchmark/scenarios/availability/test_util.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_util.py
diff --git a/tests/unit/benchmark/scenarios/compute/__init__.py b/yardstick/tests/unit/benchmark/scenarios/compute/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/compute/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/__init__.py
diff --git a/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt
index e2c79a9b1..e2c79a9b1 100644
--- a/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cachestat_sample_output.txt
diff --git a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
index 723e64bcb..723e64bcb 100644
--- a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output1.txt
diff --git a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt
index c66520a27..c66520a27 100644
--- a/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/cpuload_sample_output2.txt
diff --git a/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt b/yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt
index 1793e2f10..1793e2f10 100644
--- a/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/memload_sample_output.txt
diff --git a/tests/unit/benchmark/scenarios/compute/test_cachestat.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py
index b0ddfc6b4..b0ddfc6b4 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cachestat.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cachestat.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
index 7b9a5ad4a..7b9a5ad4a 100644
--- a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_cpuload.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py
index 840ac7885..840ac7885 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cpuload.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cpuload.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index 51ffd2488..51ffd2488 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 65939c6ba..b3152d12c 100644
--- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -21,6 +21,10 @@ from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
class LmbenchTestCase(unittest.TestCase):
diff --git a/tests/unit/benchmark/scenarios/compute/test_memload.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py
index ebae9993d..ebae9993d 100644
--- a/tests/unit/benchmark/scenarios/compute/test_memload.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_memload.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_plugintest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py
index 680f6ad65..680f6ad65 100644
--- a/tests/unit/benchmark/scenarios/compute/test_plugintest.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_plugintest.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
index fb55b809f..26a26cdf7 100644
--- a/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
@@ -162,5 +162,6 @@ class QemuMigrateTestCase(unittest.TestCase):
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index 4f71fbb36..4f71fbb36 100644
--- a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py
index 40423b9da..74612d7b6 100644
--- a/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu.py
@@ -17,7 +17,6 @@ import unittest
import mock
-from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import spec_cpu
@@ -39,7 +38,6 @@ class SpecCPUTestCase(unittest.TestCase):
options = {
"SPECint_benchmark": "perlbench",
- "runspec_tune": "all",
"output_format": "all",
"runspec_iterations": "1",
"runspec_tune": "base",
@@ -63,7 +61,6 @@ class SpecCPUTestCase(unittest.TestCase):
args = {"options": options}
s = spec_cpu.SpecCPU(args, self.ctx)
- sample_output = ''
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
s.run(self.result)
expected_result = {}
@@ -79,8 +76,10 @@ class SpecCPUTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, s.run, self.result)
+
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py
index c428e1fb8..c428e1fb8 100644
--- a/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_spec_cpu_for_vm.py
diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
index fec355b45..fec355b45 100644
--- a/tests/unit/benchmark/scenarios/compute/test_unixbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
diff --git a/tests/unit/benchmark/scenarios/dummy/__init__.py b/yardstick/tests/unit/benchmark/scenarios/dummy/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/dummy/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/dummy/__init__.py
diff --git a/tests/unit/benchmark/scenarios/dummy/test_dummy.py b/yardstick/tests/unit/benchmark/scenarios/dummy/test_dummy.py
index bc5131806..bc5131806 100644
--- a/tests/unit/benchmark/scenarios/dummy/test_dummy.py
+++ b/yardstick/tests/unit/benchmark/scenarios/dummy/test_dummy.py
diff --git a/tests/unit/benchmark/scenarios/lib/__init__.py b/yardstick/tests/unit/benchmark/scenarios/lib/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/lib/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/__init__.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_add_memory_load.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_add_memory_load.py
index bda07f723..bda07f723 100644
--- a/tests/unit/benchmark/scenarios/lib/test_add_memory_load.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_add_memory_load.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
index e69924072..25b911d5e 100644
--- a/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
@@ -17,14 +17,15 @@ class AttachVolumeTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.attach_server_volume')
def test_attach_volume(self, mock_attach_server_volume):
options = {
- 'volume_id': '123-456-000',
- 'server_id': '000-123-456'
+ 'volume_id': '123-456-000',
+ 'server_id': '000-123-456'
}
args = {"options": options}
obj = AttachVolume(args, {})
obj.run({})
self.assertTrue(mock_attach_server_volume.called)
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_check_connectivity.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_connectivity.py
index 1fb2f89ca..7188c29d5 100644
--- a/tests/unit/benchmark/scenarios/lib/test_check_connectivity.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_connectivity.py
@@ -30,7 +30,7 @@ class CheckConnectivityTestCase(unittest.TestCase):
'target': {
'ipaddr': '172.16.0.138'
}
- }
+ }
@mock.patch('yardstick.benchmark.scenarios.lib.check_connectivity.ssh')
def test_check_connectivity(self, mock_ssh):
@@ -43,18 +43,18 @@ class CheckConnectivityTestCase(unittest.TestCase):
'ssh_port': '22',
'ssh_timeout': 600,
'ping_parameter': "-s 2048"
- },
+ },
'sla': {'status': 'True',
'action': 'assert'}
}
- result = {}
+ # TODO(elfoley): Properly check the outputs
+ result = {} # pylint: disable=unused-variable
obj = check_connectivity.CheckConnectivity(args, {})
obj.setup()
mock_ssh.SSH.execute.return_value = (0, '100', '')
-
@mock.patch('yardstick.benchmark.scenarios.lib.check_connectivity.ssh')
def test_check_connectivity_key(self, mock_ssh):
@@ -64,18 +64,20 @@ class CheckConnectivityTestCase(unittest.TestCase):
'ssh_port': '22',
'ssh_timeout': 600,
'ping_parameter': "-s 2048"
- },
+ },
'sla': {'status': 'True',
'action': 'assert'}
}
- result = {}
+ # TODO(elfoley): Properly check the outputs
+ result = {} # pylint: disable=unused-variable
obj = check_connectivity.CheckConnectivity(args, self.ctx)
obj.setup()
mock_ssh.SSH.execute.return_value = (0, '100', '')
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_check_numa_info.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_numa_info.py
index 1dd461d41..f983f9c5b 100644
--- a/tests/unit/benchmark/scenarios/lib/test_check_numa_info.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_numa_info.py
@@ -6,17 +6,16 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import mock
+import unittest
from yardstick.benchmark.scenarios.lib.check_numa_info import CheckNumaInfo
class CheckNumaInfoTestCase(unittest.TestCase):
- @mock.patch(
- 'yardstick.benchmark.scenarios.lib.check_numa_info.CheckNumaInfo._check_vm2_status')
- def test_check_numa_info(self, mock_check_vm2):
+ @mock.patch.object(CheckNumaInfo, '_check_vm2_status')
+ def test_run(self, mock_check_vm2):
scenario_cfg = {'info1': {}, 'info2': {}}
obj = CheckNumaInfo(scenario_cfg, {})
obj.run({})
diff --git a/tests/unit/benchmark/scenarios/lib/test_check_value.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
index 21e83f830..5a40e7d8f 100644
--- a/tests/unit/benchmark/scenarios/lib/test_check_value.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_check_value.py
@@ -10,33 +10,29 @@ import unittest
from yardstick.benchmark.scenarios.lib.check_value import CheckValue
-
class CheckValueTestCase(unittest.TestCase):
+ def setUp(self):
+ self.result = {}
+
def test_check_value_eq(self):
scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 2}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
+ self.assertRaises(AssertionError, obj.run, self.result)
+ self.assertEqual({}, self.result)
def test_check_value_eq_pass(self):
scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 1}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
+
+ obj.run(self.result)
+ self.assertEqual({}, self.result)
def test_check_value_ne(self):
scenario_cfg = {'options': {'operator': 'ne', 'value1': 1, 'value2': 1}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
-
+ self.assertRaises(AssertionError, obj.run, self.result)
+ self.assertEqual({}, self.result)
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_flavor.py
index 036ae952d..036ae952d 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_flavor.py
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
new file mode 100644
index 000000000..a7286f5da
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
@@ -0,0 +1,58 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+import mock
+
+from yardstick.benchmark.scenarios.lib import create_floating_ip
+import yardstick.common.openstack_utils as op_utils
+
+
+class CreateFloatingIpTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_get_network_id = mock.patch.object(
+ op_utils, 'get_network_id')
+ self.mock_get_network_id = self._mock_get_network_id.start()
+ self._mock_create_floating_ip = mock.patch.object(
+ op_utils, 'create_floating_ip')
+ self.mock_create_floating_ip = self._mock_create_floating_ip.start()
+ self._mock_get_neutron_client = mock.patch.object(
+ op_utils, 'get_neutron_client')
+ self.mock_get_neutron_client = self._mock_get_neutron_client.start()
+ self._mock_get_shade_client = mock.patch.object(
+ op_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_floating_ip, 'LOG')
+ self.mock_log = self._mock_log.start()
+
+ self._fip_obj = create_floating_ip.CreateFloatingIp(mock.ANY, mock.ANY)
+ self._fip_obj.scenario_cfg = {'output': 'key1\nkey2'}
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_network_id.stop()
+ self._mock_create_floating_ip.stop()
+ self._mock_get_neutron_client.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_create_floating_ip.return_value = {'fip_id': 'value1',
+ 'fip_addr': 'value2'}
+ output = self._fip_obj.run(mock.ANY)
+ self.assertDictEqual({'key1': 'value1', 'key2': 'value2'}, output)
+
+ def test_run_no_fip(self):
+ self.mock_create_floating_ip.return_value = None
+ output = self._fip_obj.run(mock.ANY)
+ self.assertIsNone(output)
+ self.mock_log.error.assert_called_once_with(
+ 'Creating floating ip failed!')
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
index c213ceba0..b26957979 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
@@ -9,28 +9,30 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.create_image import CreateImage
-
+from yardstick.benchmark.scenarios.lib import create_image
+from yardstick.common import openstack_utils
+# NOTE(elfoley): There should be more tests here.
class CreateImageTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.create_image')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
+ @mock.patch.object(openstack_utils, 'create_image')
+ @mock.patch.object(openstack_utils, 'get_glance_client')
def test_create_image(self, mock_get_glance_client, mock_create_image):
options = {
- 'image_name': 'yardstick_test_image_01',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'min_disk': '1',
- 'min_ram': '512',
- 'protected': 'False',
- 'tags': '["yardstick automatic test image"]',
- 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
+ 'image_name': 'yardstick_test_image_01',
+ 'disk_format': 'qcow2',
+ 'container_format': 'bare',
+ 'min_disk': '1',
+ 'min_ram': '512',
+ 'protected': 'False',
+ 'tags': '["yardstick automatic test image"]',
+ 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
}
args = {"options": options}
- obj = CreateImage(args, {})
+ obj = create_image.CreateImage(args, {})
obj.run({})
- self.assertTrue(mock_create_image.called)
+ mock_create_image.assert_called_once()
+ mock_get_glance_client.assert_called_once()
def main():
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
index 4b9b72013..10e351b5e 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
@@ -6,26 +6,25 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
-import mock
-from yardstick.benchmark.scenarios.lib.create_keypair import CreateKeypair
+import mock
+import unittest
-PREFIX = "yardstick.benchmark.scenarios.lib.create_keypair"
+from yardstick.benchmark.scenarios.lib import create_keypair
class CreateKeypairTestCase(unittest.TestCase):
- @mock.patch('{}.paramiko'.format(PREFIX))
- @mock.patch('{}.op_utils'.format(PREFIX))
- def test_create_keypair(self, mock_op_utils, mock_paramiko):
+ @mock.patch.object(create_keypair, 'paramiko')
+ @mock.patch.object(create_keypair, 'op_utils')
+ def test_create_keypair(self, mock_op_utils, *args):
options = {
'key_name': 'yardstick_key',
'key_path': '/tmp/yardstick_key'
}
args = {"options": options}
- obj = CreateKeypair(args, {})
+ obj = create_keypair.CreateKeypair(args, {})
obj.run({})
- self.assertTrue(mock_op_utils.create_keypair.called)
+ mock_op_utils.create_keypair.assert_called_once()
def main():
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_network.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_network.py
index 8e7d8b5a1..e0382851f 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_network.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_network.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_network import CreateNetwork
@@ -19,10 +18,10 @@ class CreateNetworkTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.create_neutron_net')
def test_create_network(self, mock_get_neutron_client, mock_create_neutron_net):
options = {
- 'openstack_paras': {
- 'name': 'yardstick_net',
- 'admin_state_up': 'True'
- }
+ 'openstack_paras': {
+ 'name': 'yardstick_net',
+ 'admin_state_up': 'True'
+ }
}
args = {"options": options}
obj = CreateNetwork(args, {})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_port.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_port.py
index 3b2aa2247..0f15058da 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_port.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_port.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_port import CreatePort
@@ -18,9 +17,9 @@ class CreatePortTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.get_neutron_client')
def test_create_port(self, mock_get_neutron_client):
options = {
- 'openstack_paras': {
- 'name': 'yardstick_port'
- }
+ 'openstack_paras': {
+ 'name': 'yardstick_port'
+ }
}
args = {"options": options}
obj = CreatePort(args, {})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_router.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_router.py
index b956a3634..8f3914b83 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_router.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_router.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_router import CreateRouter
@@ -19,10 +18,10 @@ class CreateRouterTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.create_neutron_router')
def test_create_router(self, mock_get_neutron_client, mock_create_neutron_router):
options = {
- 'openstack_paras': {
- 'admin_state_up': 'True',
- 'name': 'yardstick_router'
- }
+ 'openstack_paras': {
+ 'admin_state_up': 'True',
+ 'name': 'yardstick_router'
+ }
}
args = {"options": options}
obj = CreateRouter(args, {})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
index b962f7f0e..c1c137cda 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
@@ -19,10 +18,10 @@ class CreateSecGroupTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.create_security_group_full')
def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
options = {
- 'openstack_paras': {
- 'sg_name': 'yardstick_sec_group',
- 'description': 'security group for yardstick manual VM'
- }
+ 'openstack_paras': {
+ 'sg_name': 'yardstick_sec_group',
+ 'description': 'security group for yardstick manual VM'
+ }
}
args = {"options": options}
obj = CreateSecgroup(args, {})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
index 7c4193132..74003b995 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
@@ -21,10 +21,10 @@ class CreateServerTestCase(unittest.TestCase):
def test_create_server(self, mock_get_nova_client, mock_get_neutron_client,
mock_get_glance_client, mock_create_instance_and_wait_for_active):
scenario_cfg = {
- 'options' : {
- 'openstack_paras': 'example'
- },
- 'output': 'server'
+ 'options': {
+ 'openstack_paras': 'example'
+ },
+ 'output': 'server'
}
obj = CreateServer(scenario_cfg, {})
obj.run({})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_subnet.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
index 0154755c4..b7f29dfe4 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_subnet import CreateSubnet
@@ -19,12 +18,12 @@ class CreateSubnetTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.create_neutron_subnet')
def test_create_subnet(self, mock_get_neutron_client, mock_create_neutron_subnet):
options = {
- 'openstack_paras': {
- 'network_id': '123-123-123',
- 'name': 'yardstick_subnet',
- 'cidr': '10.10.10.0/24',
- 'ip_version': '4'
- }
+ 'openstack_paras': {
+ 'network_id': '123-123-123',
+ 'name': 'yardstick_subnet',
+ 'cidr': '10.10.10.0/24',
+ 'ip_version': '4'
+ }
}
args = {"options": options}
obj = CreateSubnet(args, {})
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
index ef2c0ccaf..ca055db2f 100644
--- a/tests/unit/benchmark/scenarios/lib/test_create_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
@@ -6,10 +6,10 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import mock
+import unittest
-import yardstick.benchmark.scenarios.lib.create_volume
+from yardstick.benchmark.scenarios.lib import create_volume
class CreateVolumeTestCase(unittest.TestCase):
@@ -32,10 +32,9 @@ class CreateVolumeTestCase(unittest.TestCase):
}
}
- self.scenario = (
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={}))
+ self.scenario = create_volume.CreateVolume(
+ scenario_cfg=self.scenario_cfg,
+ context_cfg={})
def _stop_mock(self):
self._mock_cinder_client.stop()
@@ -50,10 +49,9 @@ class CreateVolumeTestCase(unittest.TestCase):
expected_im_name = self.scenario_cfg["options"]["image"]
expected_im_id = None
- scenario = (
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={}))
+ scenario = create_volume.CreateVolume(
+ scenario_cfg=self.scenario_cfg,
+ context_cfg={})
self.assertEqual(expected_vol_name, scenario.volume_name)
self.assertEqual(expected_vol_size, scenario.volume_size)
@@ -75,13 +73,32 @@ class CreateVolumeTestCase(unittest.TestCase):
mock_image_id.assert_called_once()
mock_create_volume.assert_called_once()
- @mock.patch.object(
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume, 'setup')
+ @mock.patch.object(create_volume.CreateVolume, 'setup')
def test_run_no_setup(self, scenario_setup):
self.scenario.setup_done = False
self.scenario.run()
scenario_setup.assert_called_once()
+ @mock.patch('yardstick.common.openstack_utils.create_volume')
+ @mock.patch('yardstick.common.openstack_utils.get_image_id')
+ @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
+ @mock.patch('yardstick.common.openstack_utils.get_glance_client')
+ def test_create_volume(self, mock_get_glance_client,
+ mock_get_cinder_client, mock_image_id,
+ mock_create_volume):
+ options = {
+ 'volume_name': 'yardstick_test_volume_01',
+ 'size': '256',
+ 'image': 'cirros-0.3.5'
+ }
+ args = {"options": options}
+ scenario = create_volume.CreateVolume(args, {})
+ scenario.run()
+ self.assertTrue(mock_create_volume.called)
+ self.assertTrue(mock_image_id.called)
+ self.assertTrue(mock_get_glance_client.called)
+ self.assertTrue(mock_get_cinder_client.called)
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_flavor.py
index 4a91b8939..4a91b8939 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_flavor.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
index 7592c8070..df2321292 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_floating_ip import DeleteFloatingIp
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
index 2bbf14d16..9edc2ff1d 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
@@ -19,7 +19,7 @@ class DeleteImageTestCase(unittest.TestCase):
@mock.patch('yardstick.common.openstack_utils.get_glance_client')
def test_delete_image(self, mock_get_glance_client, mock_image_id, mock_delete_image):
options = {
- 'image_name': 'yardstick_test_image_01'
+ 'image_name': 'yardstick_test_image_01'
}
args = {"options": options}
obj = DeleteImage(args, {})
@@ -28,6 +28,7 @@ class DeleteImageTestCase(unittest.TestCase):
self.assertTrue(mock_image_id.called)
self.assertTrue(mock_get_glance_client.called)
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
index 9663fe9fb..73894a903 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
new file mode 100644
index 000000000..aef99ee94
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from oslo_utils import uuidutils
+import unittest
+import mock
+
+import yardstick.common.openstack_utils as op_utils
+from yardstick.benchmark.scenarios.lib import delete_network
+
+
+class DeleteNetworkTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_delete_neutron_net = mock.patch.object(
+ op_utils, 'delete_neutron_net')
+ self.mock_delete_neutron_net = self._mock_delete_neutron_net.start()
+ self._mock_get_shade_client = mock.patch.object(
+ op_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_network, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'network_id': _uuid}}
+ self._del_obj = delete_network.DeleteNetwork(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_neutron_net.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_neutron_net.return_value = True
+ self.assertTrue(self._del_obj.run({}))
+ self.mock_log.info.assert_called_once_with(
+ "Delete network successful!")
+
+ def test_run_fail(self):
+ self.mock_delete_neutron_net.return_value = False
+ self.assertFalse(self._del_obj.run({}))
+ self.mock_log.error.assert_called_once_with("Delete network failed!")
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_port.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_port.py
index 77b9c7009..de3179b2d 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_port.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_port.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_port import DeletePort
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_router.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router.py
index ab1ad5d35..73cb81278 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_router.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router import DeleteRouter
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py
index 1150dccda..3cfc4ed21 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_gateway.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router_gateway import DeleteRouterGateway
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py
index 2cc9c9f37..67aff1091 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router_interface import DeleteRouterInterface
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
index 622ead5ac..622ead5ac 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
index a11d0121b..9438b077a 100644
--- a/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
diff --git a/tests/unit/benchmark/scenarios/lib/test_detach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
index 0cffcba15..87af63a55 100644
--- a/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
@@ -8,7 +8,6 @@
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
diff --git a/tests/unit/benchmark/scenarios/lib/test_get_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
index bf12e0a32..bf12e0a32 100644
--- a/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py
index f046c92ea..f046c92ea 100644
--- a/tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_migrate_target_host.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_get_numa_info.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_numa_info.py
index 680692fdc..50d5238d7 100644
--- a/tests/unit/benchmark/scenarios/lib/test_get_numa_info.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_numa_info.py
@@ -11,6 +11,11 @@ import mock
from yardstick.benchmark.scenarios.lib.get_numa_info import GetNumaInfo
+
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
BASE = 'yardstick.benchmark.scenarios.lib.get_numa_info'
diff --git a/tests/unit/benchmark/scenarios/lib/test_get_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
index aebbf5416..aebbf5416 100644
--- a/tests/unit/benchmark/scenarios/lib/test_get_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
diff --git a/tests/unit/benchmark/scenarios/lib/test_get_server_ip.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server_ip.py
index 3d20d5439..3d20d5439 100644
--- a/tests/unit/benchmark/scenarios/lib/test_get_server_ip.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server_ip.py
diff --git a/tests/unit/benchmark/scenarios/networking/__init__.py b/yardstick/tests/unit/benchmark/scenarios/networking/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/networking/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/__init__.py
diff --git a/tests/unit/benchmark/scenarios/networking/imix_voice.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/imix_voice.yaml
index b8f8e5358..b8f8e5358 100644
--- a/tests/unit/benchmark/scenarios/networking/imix_voice.yaml
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/imix_voice.yaml
diff --git a/tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json b/yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json
index b56009ba1..b56009ba1 100644
--- a/tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output.json
diff --git a/tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json b/yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json
index 8173c8f64..8173c8f64 100644
--- a/tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/iperf3_sample_output_udp.json
diff --git a/tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml
index f3046f463..f3046f463 100644
--- a/tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/ipv4_1flow_Packets_vpe.yaml
diff --git a/tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml
index 2123e4705..2123e4705 100644
--- a/tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/ipv4_throughput_vpe.yaml
diff --git a/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json b/yardstick/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json
index bba76cfa5..bba76cfa5 100755
--- a/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json
diff --git a/tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt b/yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt
index f90457cb3..f90457cb3 100644
--- a/tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output1.txt
diff --git a/tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt b/yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt
index 417613ec1..417613ec1 100644
--- a/tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/netutilization_sample_output2.txt
diff --git a/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 4d3745230..4d3745230 100644
--- a/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
index d82a00931..d82a00931 100755
--- a/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
index 8be9bb94d..8be9bb94d 100755
--- a/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_netutilization.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netutilization.py
index 1227e056e..1227e056e 100644
--- a/tests/unit/benchmark/scenarios/networking/test_netutilization.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netutilization.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
index 3e7a3c5ee..3e7a3c5ee 100644
--- a/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_nstat.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_nstat.py
index 4b58e06c1..7dd5351b1 100644
--- a/tests/unit/benchmark/scenarios/networking/test_nstat.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_nstat.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
@@ -9,13 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.nstat.Nstat
-
-from __future__ import absolute_import
-
-import unittest
-
import mock
+import unittest
from yardstick.benchmark.scenarios.networking import nstat
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
index 06353249a..06353249a 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
index d2be6f576..d2be6f576 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping6.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 005b53177..acd9027d3 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
@@ -9,13 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-
-from __future__ import absolute_import
-
+import mock
import unittest
-import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
@@ -127,9 +121,7 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
@@ -154,9 +146,7 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
@@ -181,9 +171,7 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
@@ -314,13 +302,8 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 4)
@@ -335,13 +318,8 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 1
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=1)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 1)
@@ -356,13 +334,8 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
self.assertRaises(RuntimeError, p._enable_ovs_multiqueue)
@@ -536,21 +509,10 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 1
- p._get_usable_queue_number = mock_result3
-
- mock_result4 = mock.Mock()
- mock_result4.return_value = 4
- p._get_available_queue_number = mock_result4
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
p.multiqueue_setup()
@@ -566,21 +528,10 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 1
- p._get_usable_queue_number = mock_result3
-
- mock_result4 = mock.Mock()
- mock_result4.return_value = 1
- p._get_available_queue_number = mock_result4
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=1)
p.multiqueue_setup()
@@ -596,13 +547,8 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result2
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
@@ -618,13 +564,8 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result2
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
@@ -677,28 +618,12 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_result = mock.Mock()
- mock_result.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result
-
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 4
- p._enable_ovs_multiqueue = mock_result3
-
- mock_result4 = mock.Mock()
- p._setup_irqmapping_ovs = mock_result4
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
+ p._enable_ovs_multiqueue = mock.Mock(return_value=4)
+ p._setup_irqmapping_ovs = mock.Mock()
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
@@ -726,20 +651,10 @@ class PktgenTestCase(unittest.TestCase):
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_result1 = mock.Mock()
- mock_result1.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 2
- p._get_sriov_queue_number = mock_result2
-
- mock_result3 = mock.Mock()
- p._setup_irqmapping_sriov = mock_result3
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ p._get_sriov_queue_number = mock.Mock(return_value=2)
+ p._setup_irqmapping_sriov = mock.Mock()
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index c9eec4b94..99399abdc 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 ZTE and others.
#
@@ -9,12 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-
-from __future__ import absolute_import
-import unittest
-
import mock
+import unittest
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
index c2e35af75..1b12bd507 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Nokia and others.
#
@@ -6,7 +7,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-#!/usr/bin/env python
# Unittest for yardstick.benchmark.scenarios.networking.pktgen.PktgenDPDK
@@ -19,6 +19,10 @@ import mock
from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk_throughput.ssh')
class PktgenDPDKTestCase(unittest.TestCase):
diff --git a/tests/unit/benchmark/scenarios/networking/test_sfc.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_sfc.py
index 78c0352dd..78c0352dd 100644
--- a/tests/unit/benchmark/scenarios/networking/test_sfc.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_sfc.py
diff --git a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 016608a21..fb55b5ea0 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -26,7 +26,7 @@ import mock
from copy import deepcopy
-from tests.unit import STL_MOCKS
+from yardstick.tests.unit import STL_MOCKS
from yardstick.benchmark.scenarios.networking.vnf_generic import \
SshManager, NetworkServiceTestCase, IncorrectConfig, \
open_relative_file
@@ -35,6 +35,10 @@ from yardstick.network_services.vnf_generic.vnf.base import \
GenericTrafficGen, GenericVNF
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
COMPLETE_TREX_VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
@@ -354,7 +358,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
ssh_mock.execute = \
mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
ssh.from_node.return_value = ssh_mock
- for node, node_dict in self.context_cfg["nodes"].items():
+ for _, node_dict in self.context_cfg["nodes"].items():
with SshManager(node_dict) as conn:
self.assertIsNotNone(conn)
@@ -365,7 +369,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.scenario_cfg["traffic_options"]["flow"] = \
self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
result = '152.16.100.2-152.16.100.254'
- self.assertEqual(result, self.s._get_ip_flow_range('152.16.100.2-152.16.100.254'))
+ self.assertEqual(result, self.s._get_ip_flow_range(
+ '152.16.100.2-152.16.100.254'))
def test__get_ip_flow_range(self):
self.scenario_cfg["traffic_options"]["flow"] = \
@@ -397,21 +402,19 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.scenario_cfg["options"] = {}
self.scenario_cfg['options'] = {
'flow': {
- 'src_ip': [
- {
- 'tg__1': 'xe0',
- },
- ],
- 'dst_ip': [
- {
- 'tg__1': 'xe1',
- },
- ],
- 'public_ip': ['1.1.1.1'],
+ 'src_ip': [
+ {
+ 'tg__1': 'xe0',
+ },
+ ],
+ 'dst_ip': [
+ {
+ 'tg__1': 'xe1',
+ },
+ ],
+ 'public_ip': ['1.1.1.1'],
},
}
- result = {'flow': {'dst_ip0': '152.16.40.2-152.16.40.254',
- 'src_ip0': '152.16.100.2-152.16.100.254'}}
self.assertEqual({'flow': {}}, self.s._get_traffic_flow())
@@ -461,8 +464,10 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.map_topology_to_infrastructure()
nodes = self.context_cfg["nodes"]
- self.assertEqual("../../vnf_descriptors/tg_rfc2544_tpl.yaml", nodes['tg__1']['VNF model'])
- self.assertEqual("../../vnf_descriptors/vpe_vnf.yaml", nodes['vnf__1']['VNF model'])
+ self.assertEqual(
+ "../../vnf_descriptors/tg_rfc2544_tpl.yaml", nodes['tg__1']['VNF model'])
+ self.assertEqual("../../vnf_descriptors/vpe_vnf.yaml",
+ nodes['vnf__1']['VNF model'])
def test_map_topology_to_infrastructure_insufficient_nodes(self):
del self.context_cfg['nodes']['vnf__1']
@@ -499,7 +504,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
del interface['local_mac']
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
@@ -514,7 +519,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'][0])
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
@@ -525,7 +530,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'][:1]
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
@@ -590,7 +595,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
tgen.name = "tgen__1"
vnf = mock.Mock(autospec=GenericVNF)
vnf.runs_traffic = False
- vnf.instantiate.side_effect = RuntimeError("error during instantiate")
+ vnf.instantiate.side_effect = RuntimeError(
+ "error during instantiate")
vnf.terminate = mock.Mock(return_value=True)
self.s.vnfs = [tgen, vnf]
self.s.traffic_profile = mock.Mock()
@@ -616,7 +622,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
def test___get_traffic_imix_exception(self):
with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}):
- self.assertEqual({'imix': {'64B': 100}}, self.s._get_traffic_imix())
+ self.assertEqual({'imix': {'64B': 100}},
+ self.s._get_traffic_imix())
def test__fill_traffic_profile(self):
with mock.patch.dict("sys.modules", STL_MOCKS):
@@ -641,7 +648,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
def test_teardown_exception(self):
vnf = mock.Mock(autospec=GenericVNF)
- vnf.terminate = mock.Mock(side_effect=RuntimeError("error duing terminate"))
+ vnf.terminate = mock.Mock(
+ side_effect=RuntimeError("error duing terminate"))
vnf.name = str(vnf)
self.s.vnfs = [vnf]
self.s.traffic_profile = mock.Mock()
@@ -737,6 +745,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
NetworkServiceTestCase._probe_missing_values(netdevs, network)
assert network['vpci'] == '0000:00:19.0'
+ # TODO: Split this into several tests, for different IOError sub-types
def test_open_relative_path(self):
mock_open = mock.mock_open()
mock_open_result = mock_open()
@@ -747,7 +756,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
# test
with mock.patch(module_name, mock_open, create=True):
- self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+ self.assertEqual(open_relative_file(
+ 'foo', 'bar'), mock_open_result)
mock_open_call_count += 1 # one more call expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
@@ -760,7 +770,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
raise IOError(errno.ENOENT, 'not found')
mock_open.side_effect = open_effect
- self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+ self.assertEqual(open_relative_file(
+ 'foo', 'bar'), mock_open_result)
mock_open_call_count += 2 # two more calls expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index be8ac55d0..be8ac55d0 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
new file mode 100644
index 000000000..1923960e9
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -0,0 +1,221 @@
+# Copyright 2017 Nokia
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import time
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+
+
+class VsperfDPDKTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "10.229.47.137",
+ "user": "ubuntu",
+ "password": "ubuntu",
+ },
+ }
+ self.args = {
+ 'task_id': "1234-5678",
+ 'options': {
+ 'testname': 'pvp_tput',
+ 'traffic_type': 'rfc2544_throughput',
+ 'frame_size': '64',
+ 'test_params': 'TRAFFICGEN_DURATION=30;',
+ 'trafficgen_port1': 'ens4',
+ 'trafficgen_port2': 'ens5',
+ 'conf_file': 'vsperf-yardstick.conf',
+ 'setup_script': 'setup_yardstick.sh',
+ 'moongen_helper_file': '~/moongen.py',
+ 'moongen_host_ip': '10.5.201.151',
+ 'moongen_port1_mac': '8c:dc:d4:ae:7c:5c',
+ 'moongen_port2_mac': '8c:dc:d4:ae:7c:5d',
+ 'trafficgen_port1_nw': 'test2',
+ 'trafficgen_port2_nw': 'test3',
+ },
+ 'sla': {
+ 'metrics': 'throughput_rx_fps',
+ 'throughput_rx_fps': 500000,
+ 'action': 'monitor',
+ }
+ }
+
+ self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
+
+ self._mock_ssh = mock.patch(
+ 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
+ self.mock_ssh = self._mock_ssh.start()
+ self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
+ self.mock_subprocess_call = self._mock_subprocess_call.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_ssh.stop()
+ self._mock_subprocess_call.stop()
+
+ def test_setup(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ def test_teardown(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.teardown()
+ self.assertFalse(self.scenario.setup_done)
+
+ def test_is_dpdk_setup_no(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+
+ result = self.scenario._is_dpdk_setup()
+ self.assertFalse(result)
+
+ def test_is_dpdk_setup_yes(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ result = self.scenario._is_dpdk_setup()
+ self.assertTrue(result)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_first(self, *args):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+
+ self.scenario.dpdk_setup()
+ self.assertFalse(self.scenario._is_dpdk_setup())
+ self.assertTrue(self.scenario.dpdk_setup_done)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_next(self, *args):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.dpdk_setup()
+ self.assertTrue(self.scenario._is_dpdk_setup())
+ self.assertTrue(self.scenario.dpdk_setup_done)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_runtime_error(self, *args):
+
+ # setup specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ self.assertTrue(self.scenario.setup_done)
+
+ self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
+
+ @mock.patch.object(subprocess, 'check_output')
+ @mock.patch('time.sleep')
+ def test_run_ok(self, *args):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # run() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ result = {}
+ self.scenario.run(result)
+
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+
+ def test_run_failed_vsperf_execution(self):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, self.scenario.run, result)
+
+ def test_run_falied_csv_report(self):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # run() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, self.scenario.run, result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml
index b1641836b..b1641836b 100644
--- a/tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/tg_trex_tpl.yaml
diff --git a/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml b/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml
index 1ac6c1f89..1ac6c1f89 100644
--- a/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/vpe_vnf_topology.yaml
diff --git a/tests/unit/benchmark/scenarios/parser/__init__.py b/yardstick/tests/unit/benchmark/scenarios/parser/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/parser/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/parser/__init__.py
diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/yardstick/tests/unit/benchmark/scenarios/parser/test_parser.py
index ee2bbc07d..ee2bbc07d 100644
--- a/tests/unit/benchmark/scenarios/parser/test_parser.py
+++ b/yardstick/tests/unit/benchmark/scenarios/parser/test_parser.py
diff --git a/tests/unit/benchmark/scenarios/storage/__init__.py b/yardstick/tests/unit/benchmark/scenarios/storage/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/benchmark/scenarios/storage/__init__.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/__init__.py
diff --git a/tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json b/yardstick/tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json
index e9f642aba..e9f642aba 100644
--- a/tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/fio_read_sample_output.json
diff --git a/tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json b/yardstick/tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json
index 4c7501818..4c7501818 100644
--- a/tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/fio_rw_sample_output.json
diff --git a/tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json b/yardstick/tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json
index 7c760e8bc..7c760e8bc 100644
--- a/tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/fio_write_sample_output.json
diff --git a/tests/unit/benchmark/scenarios/storage/test_bonnie.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_bonnie.py
index b3524e9a7..b98dceae7 100644
--- a/tests/unit/benchmark/scenarios/storage/test_bonnie.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_bonnie.py
@@ -17,7 +17,6 @@ import unittest
import mock
-from yardstick.common import utils
from yardstick.benchmark.scenarios.storage import bonnie
@@ -67,8 +66,10 @@ class BonnieTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, b.run, self.result)
+
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/storage/test_fio.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
index 0cffea224..0cffea224 100644
--- a/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
diff --git a/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
index 095674f72..095674f72 100644
--- a/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
diff --git a/tests/unit/benchmark/scenarios/storage/test_storperf.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 7b16bb37d..52786d7cb 100644
--- a/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -21,8 +21,12 @@ from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import storperf
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
def mocked_requests_config_post(*args, **kwargs):
- class MockResponseConfigPost:
+ class MockResponseConfigPost(object):
def __init__(self, json_data, status_code):
self.content = json_data
@@ -35,7 +39,7 @@ def mocked_requests_config_post(*args, **kwargs):
def mocked_requests_config_get(*args, **kwargs):
- class MockResponseConfigGet:
+ class MockResponseConfigGet(object):
def __init__(self, json_data, status_code):
self.content = json_data
@@ -48,7 +52,7 @@ def mocked_requests_config_get(*args, **kwargs):
def mocked_requests_job_get(*args, **kwargs):
- class MockResponseJobGet:
+ class MockResponseJobGet(object):
def __init__(self, json_data, status_code):
self.content = json_data
@@ -61,7 +65,7 @@ def mocked_requests_job_get(*args, **kwargs):
def mocked_requests_job_post(*args, **kwargs):
- class MockResponseJobPost:
+ class MockResponseJobPost(object):
def __init__(self, json_data, status_code):
self.content = json_data
@@ -72,7 +76,7 @@ def mocked_requests_job_post(*args, **kwargs):
def mocked_requests_job_delete(*args, **kwargs):
- class MockResponseJobDelete:
+ class MockResponseJobDelete(object):
def __init__(self, json_data, status_code):
self.content = json_data
@@ -82,7 +86,7 @@ def mocked_requests_job_delete(*args, **kwargs):
def mocked_requests_delete(*args, **kwargs):
- class MockResponseDelete:
+ class MockResponseDelete(object):
def __init__(self, json_data, status_code):
self.json_data = json_data
@@ -92,7 +96,7 @@ def mocked_requests_delete(*args, **kwargs):
def mocked_requests_delete_failed(*args, **kwargs):
- class MockResponseDeleteFailed:
+ class MockResponseDeleteFailed(object):
def __init__(self, json_data, status_code):
self.json_data = json_data
diff --git a/yardstick/tests/unit/benchmark/scenarios/test_base.py b/yardstick/tests/unit/benchmark/scenarios/test_base.py
new file mode 100644
index 000000000..a95e6bc86
--- /dev/null
+++ b/yardstick/tests/unit/benchmark/scenarios/test_base.py
@@ -0,0 +1,106 @@
+# Copyright 2017: Intel Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from yardstick.benchmark.scenarios import base
+
+
+class ScenarioTestCase(unittest.TestCase):
+
+ def test_get_scenario_type(self):
+ scenario_type = 'dummy scenario'
+
+ class DummyScenario(base.Scenario):
+ __scenario_type__ = scenario_type
+
+ self.assertEqual(scenario_type, DummyScenario.get_scenario_type())
+
+ def test_get_scenario_type_not_defined(self):
+ class DummyScenario(base.Scenario):
+ pass
+
+ self.assertEqual(str(None), DummyScenario.get_scenario_type())
+
+ def test_get_description(self):
+ docstring = """First line
+ Second line
+ Third line
+ """
+
+ class DummyScenario(base.Scenario):
+ __doc__ = docstring
+
+ self.assertEqual(docstring.splitlines()[0],
+ DummyScenario.get_description())
+
+ def test_get_description_empty(self):
+ class DummyScenario(base.Scenario):
+ pass
+
+ self.assertEqual(str(None), DummyScenario.get_description())
+
+ def test_get_types(self):
+ scenario_names = set(
+ scenario.__scenario_type__ for scenario in
+ base.Scenario.get_types() if hasattr(scenario,
+ '__scenario_type__'))
+ existing_scenario_class_names = {
+ 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser'}
+ self.assertTrue(existing_scenario_class_names.issubset(scenario_names))
+
+ def test_get_cls_existing_scenario(self):
+ scenario_name = 'NSPerf'
+ scenario = base.Scenario.get_cls(scenario_name)
+ self.assertEqual(scenario_name, scenario.__scenario_type__)
+
+ def test_get_cls_non_existing_scenario(self):
+ wrong_scenario_name = 'Non-existing-scenario'
+ with self.assertRaises(RuntimeError) as exc:
+ base.Scenario.get_cls(wrong_scenario_name)
+ self.assertEqual('No such scenario type %s' % wrong_scenario_name,
+ str(exc.exception))
+
+ def test_get_existing_scenario(self):
+ scenario_name = 'NSPerf'
+ scenario_module = ('yardstick.benchmark.scenarios.networking.'
+ 'vnf_generic.NetworkServiceTestCase')
+ self.assertEqual(scenario_module, base.Scenario.get(scenario_name))
+
+ def test_get_non_existing_scenario(self):
+ wrong_scenario_name = 'Non-existing-scenario'
+ with self.assertRaises(RuntimeError) as exc:
+ base.Scenario.get(wrong_scenario_name)
+ self.assertEqual('No such scenario type %s' % wrong_scenario_name,
+ str(exc.exception))
+
+
+class IterScenarioClassesTestCase(unittest.TestCase):
+
+ def test_no_scenario_type_defined(self):
+ some_existing_scenario_class_names = [
+ 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser']
+ scenario_types = [scenario.__scenario_type__ for scenario
+ in base._iter_scenario_classes()]
+ for class_name in some_existing_scenario_class_names:
+ self.assertIn(class_name, scenario_types)
+
+ def test_scenario_type_defined(self):
+ some_existing_scenario_class_names = [
+ 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser']
+ for class_name in some_existing_scenario_class_names:
+ scenario_class = next(base._iter_scenario_classes(
+ scenario_type=class_name))
+ self.assertEqual(class_name, scenario_class.__scenario_type__)
diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py
index bf468489e..8a2f5f95b 100644
--- a/yardstick/tests/unit/common/test_openstack_utils.py
+++ b/yardstick/tests/unit/common/test_openstack_utils.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
@@ -9,12 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.common.openstack_utils
-
-from __future__ import absolute_import
+from oslo_utils import uuidutils
import unittest
import mock
+from shade import exc
from yardstick.common import openstack_utils
@@ -38,9 +35,51 @@ class GetHeatApiVersionTestCase(unittest.TestCase):
self.assertEqual(api_version, expected_result)
-def main():
- unittest.main()
+class GetNetworkIdTestCase(unittest.TestCase):
+
+ def test_get_network_id(self):
+ _uuid = uuidutils.generate_uuid()
+ mock_shade_client = mock.Mock()
+ mock_shade_client.list_networks = mock.Mock()
+ mock_shade_client.list_networks.return_value = [{'id': _uuid}]
+
+ output = openstack_utils.get_network_id(mock_shade_client,
+ 'network_name')
+ self.assertEqual(_uuid, output)
+
+ def test_get_network_id_no_network(self):
+ mock_shade_client = mock.Mock()
+ mock_shade_client.list_networks = mock.Mock()
+ mock_shade_client.list_networks.return_value = None
+
+ output = openstack_utils.get_network_id(mock_shade_client,
+ 'network_name')
+ self.assertEqual(None, output)
+
+
+class DeleteNeutronNetTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.delete_network = mock.Mock()
+
+ def test_delete_neutron_net(self):
+ self.mock_shade_client.delete_network.return_value = True
+ output = openstack_utils.delete_neutron_net(self.mock_shade_client,
+ 'network_id')
+ self.assertTrue(output)
+ def test_delete_neutron_net_fail(self):
+ self.mock_shade_client.delete_network.return_value = False
+ output = openstack_utils.delete_neutron_net(self.mock_shade_client,
+ 'network_id')
+ self.assertFalse(output)
-if __name__ == '__main__':
- main()
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_neutron_net_exception(self, mock_logger):
+ self.mock_shade_client.delete_network.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_neutron_net(self.mock_shade_client,
+ 'network_id')
+ self.assertFalse(output)
+ mock_logger.error.assert_called_once()
diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py
index faf70cdbc..e0a353812 100644
--- a/yardstick/tests/unit/orchestrator/test_heat.py
+++ b/yardstick/tests/unit/orchestrator/test_heat.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2017 Intel Corporation
#
@@ -9,62 +7,99 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.orchestrator.heat
-from contextlib import contextmanager
-from itertools import count
-from tempfile import NamedTemporaryFile
-import time
-import uuid
+import tempfile
import mock
+from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
+import shade
import unittest
from yardstick.benchmark.contexts import node
+from yardstick.common import exceptions
from yardstick.orchestrator import heat
-TARGET_MODULE = 'yardstick.orchestrator.heat'
-
-
-def mock_patch_target_module(inner_import):
- return mock.patch('.'.join([TARGET_MODULE, inner_import]))
-
-
-@contextmanager
-def timer():
- start = time.time()
- data = {'start': start}
- try:
- yield data
- finally:
- data['end'] = end = time.time()
- data['delta'] = end - start
-
-
-def index_value_iter(index, index_value, base_value=None):
- for current_index in count():
- if current_index == index:
- yield index_value
- else:
- yield base_value
-
+class FakeStack(object):
-def get_error_message(error):
- try:
- # py2
- return error.message
- except AttributeError:
- # py3
- return next((arg for arg in error.args if isinstance(arg, str)), None)
+ def __init__(self, outputs=None, status=None, id=None):
+ self.outputs = outputs
+ self.status = status
+ self.id = id
-class HeatContextTestCase(unittest.TestCase):
+class HeatStackTestCase(unittest.TestCase):
- def test_get_short_key_uuid(self):
- u = uuid.uuid4()
- k = heat.get_short_key_uuid(u)
- self.assertEqual(heat.HEAT_KEY_UUID_LENGTH, len(k))
- self.assertIn(k, str(u))
+ def setUp(self):
+ self.stack_name = 'STACK NAME'
+ with mock.patch.object(shade, 'openstack_cloud'):
+ self.heatstack = heat.HeatStack(self.stack_name)
+ self._mock_stack_create = mock.patch.object(self.heatstack._cloud,
+ 'create_stack')
+ self.mock_stack_create = self._mock_stack_create.start()
+ self._mock_stack_delete = mock.patch.object(self.heatstack._cloud,
+ 'delete_stack')
+ self.mock_stack_delete = self._mock_stack_delete.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_stack_create.stop()
+ self._mock_stack_delete.stop()
+ heat._DEPLOYED_STACKS = {}
+
+ def test_create(self):
+ template = {'tkey': 'tval'}
+ heat_parameters = {'pkey': 'pval'}
+ outputs = [{'output_key': 'okey', 'output_value': 'oval'}]
+ id = uuidutils.generate_uuid()
+ self.mock_stack_create.return_value = FakeStack(
+ outputs=outputs, status=mock.Mock(), id=id)
+ mock_tfile = mock.Mock()
+ with mock.patch.object(tempfile._TemporaryFileWrapper, '__enter__',
+ return_value=mock_tfile):
+ self.heatstack.create(template, heat_parameters, True, 100)
+ mock_tfile.write.assert_called_once_with(jsonutils.dump_as_bytes(template))
+ mock_tfile.close.assert_called_once()
+
+ self.mock_stack_create.assert_called_once_with(
+ self.stack_name, template_file=mock_tfile.name, wait=True,
+ timeout=100, pkey='pval')
+ self.assertEqual({'okey': 'oval'}, self.heatstack.outputs)
+ self.assertEqual(heat._DEPLOYED_STACKS[id], self.heatstack._stack)
+
+ def test_stacks_exist(self):
+ self.assertEqual(0, self.heatstack.stacks_exist())
+ heat._DEPLOYED_STACKS['id'] = 'stack'
+ self.assertEqual(1, self.heatstack.stacks_exist())
+
+ def test_delete_not_uuid(self):
+ self.assertIsNone(self.heatstack.delete())
+
+ def test_delete_existing_uuid(self):
+ id = uuidutils.generate_uuid()
+ self.heatstack._stack = FakeStack(
+ outputs=mock.Mock(), status=mock.Mock(), id=id)
+ heat._DEPLOYED_STACKS[id] = self.heatstack._stack
+ delete_return = mock.Mock()
+ self.mock_stack_delete.return_value = delete_return
+
+ ret = self.heatstack.delete(wait=True)
+ self.assertEqual(delete_return, ret)
+ self.assertFalse(heat._DEPLOYED_STACKS)
+ self.mock_stack_delete.assert_called_once_with(id, wait=True)
+
+ def test_delete_bug_in_shade(self):
+ id = uuidutils.generate_uuid()
+ self.heatstack._stack = FakeStack(
+ outputs=mock.Mock(), status=mock.Mock(), id=id)
+ heat._DEPLOYED_STACKS[id] = self.heatstack._stack
+ self.mock_stack_delete.side_effect = TypeError()
+
+ ret = self.heatstack.delete(wait=True)
+ self.assertTrue(ret)
+ self.assertFalse(heat._DEPLOYED_STACKS)
+ self.mock_stack_delete.assert_called_once_with(id, wait=True)
class HeatTemplateTestCase(unittest.TestCase):
@@ -75,63 +110,53 @@ class HeatTemplateTestCase(unittest.TestCase):
def test_add_tenant_network(self):
self.template.add_network('some-network')
- self.assertEqual(
- self.template.resources['some-network']['type'],
- 'OS::Neutron::Net')
+ self.assertEqual('OS::Neutron::Net',
+ self.template.resources['some-network']['type'])
def test_add_provider_network(self):
self.template.add_network('some-network', 'physnet2', 'sriov')
- self.assertEqual(
- self.template.resources['some-network']['type'],
- 'OS::Neutron::ProviderNet')
- self.assertEqual(
- self.template.resources['some-network']['properties']['physical_network'],
- 'physnet2')
+ self.assertEqual(self.template.resources['some-network']['type'],
+ 'OS::Neutron::ProviderNet')
+ self.assertEqual(self.template.resources['some-network'][
+ 'properties']['physical_network'], 'physnet2')
def test_add_subnet(self):
netattrs = {'cidr': '10.0.0.0/24',
- 'provider': None, 'external_network': 'ext_net'}
- self.template.add_subnet(
- 'some-subnet', "some-network", netattrs['cidr'])
+ 'provider': None,
+ 'external_network': 'ext_net'}
+ self.template.add_subnet('some-subnet', "some-network",
+ netattrs['cidr'])
- self.assertEqual(
- self.template.resources['some-subnet']['type'],
- 'OS::Neutron::Subnet')
- self.assertEqual(
- self.template.resources['some-subnet']['properties']['cidr'],
- '10.0.0.0/24')
+ self.assertEqual(self.template.resources['some-subnet']['type'],
+ 'OS::Neutron::Subnet')
+ self.assertEqual(self.template.resources['some-subnet']['properties'][
+ 'cidr'], '10.0.0.0/24')
def test_add_router(self):
self.template.add_router('some-router', 'ext-net', 'some-subnet')
- self.assertEqual(
- self.template.resources['some-router']['type'],
- 'OS::Neutron::Router')
- self.assertIn(
- 'some-subnet',
- self.template.resources['some-router']['depends_on'])
+ self.assertEqual(self.template.resources['some-router']['type'],
+ 'OS::Neutron::Router')
+ self.assertIn('some-subnet',
+ self.template.resources['some-router']['depends_on'])
def test_add_router_interface(self):
- self.template.add_router_interface(
- 'some-router-if', 'some-router', 'some-subnet')
+ self.template.add_router_interface('some-router-if', 'some-router',
+ 'some-subnet')
- self.assertEqual(
- self.template.resources['some-router-if']['type'],
- 'OS::Neutron::RouterInterface')
- self.assertIn(
- 'some-subnet',
- self.template.resources['some-router-if']['depends_on'])
+ self.assertEqual(self.template.resources['some-router-if']['type'],
+ 'OS::Neutron::RouterInterface')
+ self.assertIn('some-subnet',
+ self.template.resources['some-router-if']['depends_on'])
def test_add_servergroup(self):
self.template.add_servergroup('some-server-group', 'anti-affinity')
- self.assertEqual(
- self.template.resources['some-server-group']['type'],
- 'OS::Nova::ServerGroup')
- self.assertEqual(
- self.template.resources['some-server-group']['properties']['policies'],
- ['anti-affinity'])
+ self.assertEqual(self.template.resources['some-server-group']['type'],
+ 'OS::Nova::ServerGroup')
+ self.assertEqual(self.template.resources['some-server-group'][
+ 'properties']['policies'], ['anti-affinity'])
def test__add_resources_to_template_raw(self):
test_context = node.NodeContext()
@@ -142,16 +167,13 @@ class HeatTemplateTestCase(unittest.TestCase):
test_context.keypair_name = "foo-key"
test_context.secgroup_name = "foo-secgroup"
test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
- heat_object = heat.HeatObject()
- heat_stack = heat.HeatStack("tmpStack")
- self.assertTrue(heat_stack.stacks_exist())
-
- test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
+ test_context.tmpfile = tempfile.NamedTemporaryFile(
+ delete=True, mode='w+t')
test_context.tmpfile.write("heat_template_version: 2015-04-30")
test_context.tmpfile.flush()
test_context.tmpfile.seek(0)
- heat_template = heat.HeatTemplate(heat_object)
+ heat_template = heat.HeatTemplate('template name')
heat_template.resources = {}
heat_template.add_network("network1")
@@ -163,324 +185,86 @@ class HeatTemplateTestCase(unittest.TestCase):
heat_template.add_router("router1", "gw1", "subnet1")
heat_template.add_router_interface("router_if1", "router1", "subnet1")
heat_template.add_port("port1", "network1", "subnet1", "normal")
- heat_template.add_port(
- "port2",
- "network2",
- "subnet2",
- "normal",
- sec_group_id="sec_group1",
- provider="not-sriov")
- heat_template.add_port(
- "port3",
- "network2",
- "subnet2",
- "normal",
- sec_group_id="sec_group1",
- provider="sriov")
- heat_template.add_floating_ip(
- "floating_ip1", "network1", "port1", "router_if1")
- heat_template.add_floating_ip(
- "floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
- heat_template.add_floating_ip_association(
- "floating_ip1_association", "floating_ip1", "port1")
+ heat_template.add_port("port2", "network2", "subnet2", "normal",
+ sec_group_id="sec_group1", provider="not-sriov")
+ heat_template.add_port("port3", "network2", "subnet2", "normal",
+ sec_group_id="sec_group1", provider="sriov")
+ heat_template.add_floating_ip("floating_ip1", "network1", "port1",
+ "router_if1")
+ heat_template.add_floating_ip("floating_ip2", "network2", "port2",
+ "router_if2", "foo-secgroup")
+ heat_template.add_floating_ip_association("floating_ip1_association",
+ "floating_ip1", "port1")
heat_template.add_servergroup("server_grp2", "affinity")
heat_template.add_servergroup("server_grp3", "anti-affinity")
heat_template.add_security_group("security_group")
+ heat_template.add_server(name="server1", image="image1",
+ flavor="flavor1", flavors=[])
+ heat_template.add_server_group(name="servergroup",
+ policies=["policy1", "policy2"])
+ heat_template.add_server_group(name="servergroup",
+ policies="policy1")
heat_template.add_server(
- name="server1", image="image1", flavor="flavor1", flavors=[])
- heat_template.add_server_group(
- name="servergroup", policies=["policy1", "policy2"])
- heat_template.add_server_group(name="servergroup", policies="policy1")
- heat_template.add_server(
- name="server2",
- image="image1",
- flavor="flavor1",
- flavors=[],
- ports=[
- "port1",
- "port2"],
- networks=[
- "network1",
- "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={
- "cat": 1,
- "doc": 2},
- additional_properties={
- "prop1": 1,
- "prop2": 2})
+ name="server2", image="image1", flavor="flavor1", flavors=[],
+ ports=["port1", "port2"], networks=["network1", "network2"],
+ scheduler_hints="hints1", user="user1", key_name="foo-key",
+ user_data="user", metadata={"cat": 1, "doc": 2},
+ additional_properties={"prop1": 1, "prop2": 2})
heat_template.add_server(
- name="server2",
- image="image1",
- flavor="flavor1",
- flavors=[
- "flavor1",
- "flavor2"],
- ports=[
- "port1",
- "port2"],
- networks=[
- "network1",
- "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={
- "cat": 1,
- "doc": 2},
- additional_properties={
- "prop1": 1,
- "prop2": 2})
+ name="server2", image="image1", flavor="flavor1",
+ flavors=["flavor1", "flavor2"], ports=["port1", "port2"],
+ networks=["network1", "network2"], scheduler_hints="hints1",
+ user="user1", key_name="foo-key", user_data="user",
+ metadata={"cat": 1, "doc": 2},
+ additional_properties={"prop1": 1, "prop2": 2})
heat_template.add_server(
- name="server2",
- image="image1",
- flavor="flavor1",
- flavors=[
- "flavor3",
- "flavor4"],
- ports=[
- "port1",
- "port2"],
- networks=[
- "network1",
- "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={
- "cat": 1,
- "doc": 2},
- additional_properties={
- "prop1": 1,
- "prop2": 2})
- heat_template.add_flavor(
- name="flavor1",
- vcpus=1,
- ram=2048,
- disk=1,
- extra_specs={
- "cat": 1,
- "dog": 2})
+ name="server2", image="image1", flavor="flavor1",
+ flavors=["flavor3", "flavor4"], ports=["port1", "port2"],
+ networks=["network1", "network2"], scheduler_hints="hints1",
+ user="user1", key_name="foo-key", user_data="user",
+ metadata={"cat": 1, "doc": 2},
+ additional_properties={"prop1": 1, "prop2": 2})
+ heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,
+ extra_specs={"cat": 1, "dog": 2})
heat_template.add_flavor(name=None, vcpus=1, ram=2048)
heat_template.add_server(
- name="server1",
- image="image1",
- flavor="flavor1",
- flavors=[],
- ports=[
- "port1",
- "port2"],
- networks=[
- "network1",
- "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={
- "cat": 1,
- "doc": 2},
- additional_properties={
- "prop1": 1,
- "prop2": 2})
+ name="server1", image="image1", flavor="flavor1", flavors=[],
+ ports=["port1", "port2"], networks=["network1", "network2"],
+ scheduler_hints="hints1", user="user1", key_name="foo-key",
+ user_data="user", metadata={"cat": 1, "doc": 2},
+ additional_properties={"prop1": 1, "prop2": 2})
heat_template.add_network("network1")
heat_template.add_flavor("test")
- self.assertEqual(
- heat_template.resources['test']['type'], 'OS::Nova::Flavor')
-
- @mock_patch_target_module('op_utils')
- @mock_patch_target_module('heatclient')
- def test_create_negative(self, mock_heat_client_class, mock_op_utils):
- self.template.HEAT_WAIT_LOOP_INTERVAL = 0
- mock_heat_client = mock_heat_client_class() # get the constructed mock
-
- # populate attributes of the constructed mock
- mock_heat_client.stacks.get().stack_status_reason = 'the reason'
-
- expected_status_calls = 0
- expected_constructor_calls = 1 # above, to get the instance
- expected_create_calls = 0
- expected_op_utils_usage = 0
-
- with mock.patch.object(self.template, 'status', return_value=None) as mock_status:
- # block with timeout hit
- timeout = 0
- with self.assertRaises(RuntimeError) as raised, timer():
- self.template.create(block=True, timeout=timeout)
-
- # ensure op_utils was used
- expected_op_utils_usage += 1
- self.assertEqual(
- mock_op_utils.get_session.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_heat_api_version.call_count,
- expected_op_utils_usage)
-
- # ensure the constructor and instance were used
- self.assertEqual(mock_heat_client_class.call_count,
- expected_constructor_calls)
- self.assertEqual(
- mock_heat_client.stacks.create.call_count,
- expected_create_calls)
-
- # ensure that the status was used
- self.assertGreater(mock_status.call_count, expected_status_calls)
- expected_status_calls = mock_status.call_count # synchronize the value
-
- # ensure the expected exception was raised
- error_message = get_error_message(raised.exception)
- self.assertIn('timeout', error_message)
- self.assertNotIn('the reason', error_message)
-
- # block with create failed
- timeout = 10
- mock_status.side_effect = iter([None, None, u'CREATE_FAILED'])
- with self.assertRaises(RuntimeError) as raised, timer():
- self.template.create(block=True, timeout=timeout)
-
- # ensure the existing heat_client was used and op_utils was used
- # again
- self.assertEqual(
- mock_op_utils.get_session.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_heat_api_version.call_count,
- expected_op_utils_usage)
-
- # ensure the constructor was not used but the instance was used
- self.assertEqual(mock_heat_client_class.call_count,
- expected_constructor_calls)
- self.assertEqual(
- mock_heat_client.stacks.create.call_count,
- expected_create_calls)
-
- # ensure that the status was used three times
- expected_status_calls += 3
- self.assertEqual(mock_status.call_count, expected_status_calls)
-
- # NOTE(elfoley): This needs to be split into multiple tests.
- # The lines where the template is reset should serve as a guide for where
- # to split.
- @mock_patch_target_module('op_utils')
- @mock_patch_target_module('heatclient')
- def test_create(self, mock_heat_client_class, mock_op_utils):
- self.template.HEAT_WAIT_LOOP_INTERVAL = 0.2
- mock_heat_client = mock_heat_client_class()
-
- # populate attributes of the constructed mock
- mock_heat_client.stacks.get().outputs = [
- {'output_key': 'key1', 'output_value': 'value1'},
- {'output_key': 'key2', 'output_value': 'value2'},
- {'output_key': 'key3', 'output_value': 'value3'},
- ]
- expected_outputs = { # pylint: disable=unused-variable
- 'key1': 'value1',
- 'key2': 'value2',
- 'key3': 'value3',
- }
-
- expected_status_calls = 0
- expected_constructor_calls = 1 # above, to get the instance
- expected_create_calls = 0
- expected_op_utils_usage = 0
-
- with mock.patch.object(self.template, 'status') as mock_status:
- self.template.name = 'no block test'
- mock_status.return_value = None
-
- # no block
- self.assertIsInstance(self.template.create(
- block=False, timeout=2), heat.HeatStack)
-
- # ensure op_utils was used
- expected_op_utils_usage += 1
- self.assertEqual(
- mock_op_utils.get_session.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
- self.assertEqual(
- mock_op_utils.get_heat_api_version.call_count,
- expected_op_utils_usage)
-
- # ensure the constructor and instance were used
- self.assertEqual(mock_heat_client_class.call_count,
- expected_constructor_calls)
- self.assertEqual(
- mock_heat_client.stacks.create.call_count,
- expected_create_calls)
-
- # ensure that the status was not used
- self.assertEqual(mock_status.call_count, expected_status_calls)
-
- # ensure no outputs because this requires blocking
- self.assertEqual(self.template.outputs, {})
-
- # block with immediate complete
- self.template.name = 'block, immediate complete test'
-
- mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS
- self.assertIsInstance(self.template.create(
- block=True, timeout=2), heat.HeatStack)
-
- # ensure existing instance was re-used and op_utils was not used
- self.assertEqual(mock_heat_client_class.call_count,
- expected_constructor_calls)
- self.assertEqual(
- mock_heat_client.stacks.create.call_count,
- expected_create_calls)
-
- # ensure status was checked once
- expected_status_calls += 1
- self.assertEqual(mock_status.call_count, expected_status_calls)
-
- # reset template outputs
- self.template.outputs = None
-
- # block with delayed complete
- self.template.name = 'block, delayed complete test'
-
- success_index = 2
- mock_status.side_effect = index_value_iter(
- success_index, self.template.HEAT_CREATE_COMPLETE_STATUS)
- self.assertIsInstance(self.template.create(
- block=True, timeout=2), heat.HeatStack)
-
- # ensure existing instance was re-used and op_utils was not used
- self.assertEqual(mock_heat_client_class.call_count,
- expected_constructor_calls)
- self.assertEqual(
- mock_heat_client.stacks.create.call_count,
- expected_create_calls)
-
- # ensure status was checked three more times
- expected_status_calls += 1 + success_index
- self.assertEqual(mock_status.call_count, expected_status_calls)
-
-
-class HeatStackTestCase(unittest.TestCase):
-
- def test_delete_calls__delete_multiple_times(self):
- stack = heat.HeatStack('test')
- stack.uuid = 1
- with mock.patch.object(stack, "_delete") as delete_mock:
- stack.delete()
- # call once and then call again if uuid is not none
- self.assertGreater(delete_mock.call_count, 1)
-
- def test_delete_all_calls_delete(self):
- # we must patch the object before we create an instance
- # so we can override delete() in all the instances
- with mock.patch.object(heat.HeatStack, "delete") as delete_mock:
- stack = heat.HeatStack('test')
- stack.uuid = 1
- stack.delete_all()
- self.assertGreater(delete_mock.call_count, 0)
+ self.assertEqual(heat_template.resources['test']['type'],
+ 'OS::Nova::Flavor')
+
+ def test_create_not_block(self):
+ heat_stack = mock.Mock()
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ ret = self.template.create(block=False)
+ heat_stack.create.assert_called_once_with(
+ self.template._template, self.template.heat_parameters, False,
+ 3600)
+ self.assertEqual(heat_stack, ret)
+
+ def test_create_block(self):
+ heat_stack = mock.Mock()
+ heat_stack.status = self.template.HEAT_STATUS_COMPLETE
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ ret = self.template.create(block=False)
+ heat_stack.create.assert_called_once_with(
+ self.template._template, self.template.heat_parameters, False,
+ 3600)
+ self.assertEqual(heat_stack, ret)
+
+
+ def test_create_block_status_no_complete(self):
+ heat_stack = mock.Mock()
+ heat_stack.status = 'other status'
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ self.assertRaises(exceptions.HeatTemplateError,
+ self.template.create, block=True)
+ heat_stack.create.assert_called_once_with(
+ self.template._template, self.template.heat_parameters, True,
+ 3600)