aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc1
-rw-r--r--.gitignore3
-rw-r--r--README.rst2
-rw-r--r--ci/docker/yardstick-ci/Dockerfile2
-rw-r--r--docs/configguide/yardstick_testcases/01-introduction.rst38
-rw-r--r--docs/configguide/yardstick_testcases/02-methodology.rst181
-rw-r--r--docs/configguide/yardstick_testcases/03-list-of-tcs.rst72
-rw-r--r--docs/configguide/yardstick_testcases/04-vtc-overview.rst114
-rwxr-xr-xdocs/configguide/yardstick_testcases/Yardstick_task_templates.rst155
-rw-r--r--docs/configguide/yardstick_testcases/glossary.rst33
-rw-r--r--docs/configguide/yardstick_testcases/index.rst12
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst (renamed from docs/yardstick/opnfv_yardstick_tc001.rst)42
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst (renamed from docs/yardstick/opnfv_yardstick_tc002.rst)35
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst72
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst139
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst157
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst85
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst84
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst77
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst (renamed from docs/yardstick/opnfv_yardstick_tc012.rst)61
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst69
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst136
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst152
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst67
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst99
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst99
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst60
-rw-r--r--docs/configguide/yardstick_testcases/testcase_description_v2_template.rst64
-rw-r--r--docs/templates/testcase_description_v2_template.rst43
-rw-r--r--docs/userguide/yardstick_framework/03-installation.rst (renamed from docs/user_guides/framework/03-installation.rst)3
-rw-r--r--docs/userguide/yardstick_framework/index.rst (renamed from docs/user_guides/framework/index.rst)0
-rw-r--r--docs/vTC/README.rst87
-rw-r--r--docs/vTC/abbreviations.rst5
-rw-r--r--docs/yardstick/index.rst21
-rw-r--r--docs/yardstick/opnfv_yardstick_tc019.rst129
-rw-r--r--etc/yardstick/nodes/compass_sclab_physical/pod.yaml42
-rw-r--r--etc/yardstick/yardstick.conf.sample5
-rwxr-xr-xrun_tests.sh2
-rw-r--r--samples/cyclictest-node-context.yaml50
-rw-r--r--samples/dummy-no-context.yaml14
-rw-r--r--samples/dummy.yaml17
-rwxr-xr-xsamples/ha-baremetal.yaml45
-rwxr-xr-xsamples/ha-service.yaml42
-rw-r--r--samples/lmbench.yaml22
-rw-r--r--samples/parser.yaml21
-rw-r--r--samples/ping-serial.yaml11
-rw-r--r--samples/ping6.yaml28
-rw-r--r--samples/pktgen.yaml22
-rwxr-xr-xsamples/serviceha.yaml29
-rw-r--r--samples/tosca.yaml149
-rw-r--r--samples/unixbench.yaml35
-rw-r--r--samples/yang.yaml687
-rwxr-xr-xsetup.py10
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml48
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml26
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml32
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml58
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml53
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml38
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml32
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml38
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml31
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml28
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml27
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml85
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml85
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml22
-rw-r--r--tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_vTC_daily.yaml16
-rw-r--r--tests/opnfv/test_suites/opnfv_vTC_weekly.yaml16
-rw-r--r--tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml18
-rw-r--r--tests/unit/benchmark/contexts/test_dummy.py31
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py77
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_process.py51
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_basemonitor.py84
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor.py83
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py79
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_process.py56
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_serviceha.py158
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cyclictest.py159
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_unixbench.py169
-rw-r--r--tests/unit/benchmark/scenarios/dummy/__init__.py0
-rw-r--r--tests/unit/benchmark/scenarios/dummy/test_dummy.py33
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping6.py99
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py48
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py51
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py48
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py51
-rw-r--r--tests/unit/benchmark/scenarios/parser/__init__.py0
-rw-r--r--tests/unit/benchmark/scenarios/parser/test_parser.py58
-rw-r--r--tests/unit/dispatcher/__init__.py0
-rw-r--r--tests/unit/dispatcher/test_influxdb.py124
-rw-r--r--tests/unit/dispatcher/test_influxdb_line_protocol.py55
-rwxr-xr-xtools/ubuntu-server-cloudimg-modify.sh7
-rw-r--r--yardstick/__init__.py7
-rw-r--r--yardstick/benchmark/contexts/dummy.py38
-rw-r--r--yardstick/benchmark/contexts/heat.py23
-rw-r--r--yardstick/benchmark/contexts/node.py11
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py93
-rw-r--r--[-rwxr-xr-x]yardstick/benchmark/runners/iteration.py101
-rw-r--r--yardstick/benchmark/runners/sequence.py2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/attacker/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py129
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py67
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py47
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml13
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash27
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash20
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash18
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash18
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml12
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash21
-rwxr-xr-xyardstick/benchmark/scenarios/availability/monitor.py114
-rwxr-xr-xyardstick/benchmark/scenarios/availability/monitor/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py140
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py108
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py81
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py166
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py105
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py8
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py156
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash46
-rw-r--r--yardstick/benchmark/scenarios/dummy/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/dummy/dummy.py36
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py119
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_benchmark.bash20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_metadata.txt82
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_setup.bash84
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_teardown.bash58
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py85
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py92
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_throughput.py85
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py91
-rw-r--r--yardstick/benchmark/scenarios/parser/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/parser/parser.py80
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser.sh51
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser_setup.sh16
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser_teardown.sh13
-rwxr-xr-xyardstick/cmd/commands/task.py18
-rw-r--r--yardstick/dispatcher/influxdb.py149
-rw-r--r--yardstick/dispatcher/influxdb_line_protocol.py114
-rw-r--r--yardstick/ssh.py7
-rw-r--r--yardstick/vTC/__init__.py0
-rw-r--r--yardstick/vTC/apexlake/.gitignore2
-rw-r--r--yardstick/vTC/apexlake/MANIFEST.in7
-rw-r--r--yardstick/vTC/apexlake/README.md116
-rw-r--r--yardstick/vTC/apexlake/__init__.py17
-rw-r--r--yardstick/vTC/apexlake/apexlake.conf69
-rwxr-xr-xyardstick/vTC/apexlake/bin/run_tests.sh1
-rw-r--r--yardstick/vTC/apexlake/docs/source/api.rst5
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/__init__.py17
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/api.py148
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py281
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/__init__.py17
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/benchmark_base_class.py3
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_benchmark.py12
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py4
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/multi_tenancy_throughput_benchmark.py4
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py10
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarks/test_benchmark.py2
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/common.py600
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/constants/conf_file_sections.py26
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/deployment_unit.py119
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/heat_manager.py7
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/heat_template_generation.py11
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/libraries/packet_checker/test_sniff.c2
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/packet_generators/dpdk_packet_generator.py5
-rw-r--r--yardstick/vTC/apexlake/heat_templates/stress_workload.yaml112
-rw-r--r--yardstick/vTC/apexlake/heat_templates/vTC.yaml167
-rw-r--r--yardstick/vTC/apexlake/setup.py38
-rw-r--r--yardstick/vTC/apexlake/tests/api_test.py143
-rw-r--r--yardstick/vTC/apexlake/tests/benchmarking_unit_test.py477
-rw-r--r--yardstick/vTC/apexlake/tests/common_test.py648
-rw-r--r--yardstick/vTC/apexlake/tests/conf_file_sections_test.py30
-rw-r--r--yardstick/vTC/apexlake/tests/data/common/conf.cfg43
-rw-r--r--yardstick/vTC/apexlake/tests/data/common/file_replacement.txt1
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait.tmp199
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/benchmark_1.csv3
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/metadata.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_experiments/results_benchmark_1.csv5
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait.tmp199
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml199
-rw-r--r--yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml.json1
-rw-r--r--yardstick/vTC/apexlake/tests/deployment_unit_test.py273
-rw-r--r--yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py2
-rw-r--r--yardstick/vTC/apexlake/tests/generates_template_test.py39
-rw-r--r--yardstick/vTC/apexlake/tests/heat_manager_test.py11
-rw-r--r--yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py10
-rw-r--r--yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py22
-rw-r--r--yardstick/vTC/apexlake/tests/multi_tenancy_throughput_benchmark_test.py3
-rw-r--r--yardstick/vTC/apexlake/tests/rfc2544_throughput_benchmark_test.py19
-rw-r--r--yardstick/vTC/apexlake/tests/tree_node_test.py97
210 files changed, 13017 insertions, 905 deletions
diff --git a/.coveragerc b/.coveragerc
index 07ca20984..d2e3447da 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -5,3 +5,4 @@ source = yardstick
[report]
ignore_errors = True
precision = 3
+omit = yardstick/vTC/*
diff --git a/.gitignore b/.gitignore
index 162687f8d..6f462f55a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,5 +26,6 @@ test.log
.testrepository/
cover/
.*.sw?
-/output/
+/docs_build/
+/docs_output/
/releng/
diff --git a/README.rst b/README.rst
index 8cb360040..582622264 100644
--- a/README.rst
+++ b/README.rst
@@ -80,7 +80,7 @@ Example setup known to work for development and test:
- Cloud: Mirantis OpenStack 6.0 deployed using Virtualbox
Install dependencies:
-$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev
+$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev
$ sudo easy_install -U setuptools
Create a virtual environment:
diff --git a/ci/docker/yardstick-ci/Dockerfile b/ci/docker/yardstick-ci/Dockerfile
index 229b91227..a1cf9160f 100644
--- a/ci/docker/yardstick-ci/Dockerfile
+++ b/ci/docker/yardstick-ci/Dockerfile
@@ -28,6 +28,8 @@ RUN apt-get update && apt-get install -y \
libssl-dev \
python \
python-dev \
+ libxml2-dev \
+ libxslt1-dev \
python-setuptools && \
easy_install -U setuptools
diff --git a/docs/configguide/yardstick_testcases/01-introduction.rst b/docs/configguide/yardstick_testcases/01-introduction.rst
new file mode 100644
index 000000000..6cca2875e
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/01-introduction.rst
@@ -0,0 +1,38 @@
+============
+Introduction
+============
+
+**Welcome to Yardstick's documentation !**
+
+.. _Yardstick: https://wiki.opnfv.org/yardstick
+
+Yardstick_ is an OPNFV Project.
+
+The project's goal is to verify infrastructure compliance, from the perspective
+of a :term:`VNF`.
+
+The Project's scope is the development of a test framework, *Yardstick*, test
+cases and test stimuli to enable :term:`NFVI` verification.
+The Project also includes a sample :term:`VNF`, the :term:`VTC` and its
+experimental framework, *ApexLake* !
+
+The chapter :doc:`02-methodology` describes the methodology implemented by the
+Yardstick Project for :term:`NFVI` verification. The chapter
+:doc:`03-list-of-tcs` includes a list of available Yardstick test cases.
+
+Yardstick is used for verifying the OPNFV infrastructure and some of the OPNFV
+features, listed in :doc:`03-list-of-tcs`.
+
+The *Yardstick* framework is deployed in several OPNFV community labs. It is
+installer, infrastructure and application independent.
+
+.. _Pharos: https://wiki.opnfv.org/pharos
+
+.. seealso:: Pharos_ for information on OPNFV community labs.
+
+Contact Yardstick
+=================
+
+Feedback? `Contact us`_
+
+.. _Contact us: opnfv-users@lists.opnfv.org
diff --git a/docs/configguide/yardstick_testcases/02-methodology.rst b/docs/configguide/yardstick_testcases/02-methodology.rst
new file mode 100644
index 000000000..5097c566b
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/02-methodology.rst
@@ -0,0 +1,181 @@
+===========
+Methodology
+===========
+
+Abstract
+========
+
+This chapter describes the methodology implemented by the Yardstick project for
+verifying the NFV Infrastructure from the perspective of a VNF.
+
+ETSI-NFV
+========
+
+.. _NFV-TST001: https://docbox.etsi.org/ISG/NFV/Open/Drafts/TST001_-_Pre-deployment_Validation/
+
+The document ETSI GS NFV-TST001_, "Pre-deployment Testing; Report on Validation
+of NFV Environments and Services", recommends methods for pre-deployment
+testing of the functional components of an NFV environment.
+
+The Yardstick project implements the methodology described in chapter 6, "Pre-
+deployment validation of NFV infrastructure".
+
+The methodology consists in decomposing the typical VNF work-load performance
+metrics into a number of characteristics/performance vectors, which each can be
+represented by distinct test-cases.
+
+The methodology includes five steps:
+
+* *Step1:* Define Infrastruture - the HW, SW and corresponding configuration
+ target for validation; the OPNFV infrastructure, in OPNFV community labs.
+
+* *Step2:* Identify VNF type - the application for which the infrastructure is
+ to be validated, and its requirements on the underlying infrastructure.
+
+* *Step3:* Select test cases - depending on the workload that represents the
+ application for which the infrastruture is to be validated, the relevant
+ test cases amongst the list of available Yardstick test cases.
+
+* *Step4:* Execute tests - define the duration and number of iterations for the
+ selected test cases, tests runs are automated via OPNFV Jenkins Jobs.
+
+* *Step5:* Collect results - using the common API for result collection.
+
+Metrics
+=======
+
+The metrics, as defined by ETSI GS NFV-TST001, are shown in
+:ref:`Table1 <table2_1>`, :ref:`Table2 <table2_2>` and
+:ref:`Table3 <table2_3>`.
+
+In OPNFV Brahmaputra release, generic test cases covering aspects of the listed
+metrics are available; further OPNFV releases will provide extended testing of
+these metrics.
+The view of available Yardstick test cases cross ETSI definitions in
+:ref:`Table1 <table2_1>`, :ref:`Table2 <table2_2>` and :ref:`Table3 <table2_3>`
+is shown in :ref:`Table4 <table2_4>`.
+It shall be noticed that the Yardstick test cases are examples, the test
+duration and number of iterations are configurable, as are the System Under
+Test (SUT) and the attributes (or, in Yardstick nomemclature, the scenario
+options).
+
+.. _table2_1:
+
+**Table 1 - Performance/Speed Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Performance/Speed |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Latency for random memory access |
+| | * Latency for cache read/write operations |
+| | * Processing speed (instructions per second) |
+| | * Throughput for random memory access (bytes per second) |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * Throughput per NFVI node (frames/byte per second) |
+| | * Throughput provided to a VM (frames/byte per second) |
+| | * Latency per traffic flow |
+| | * Latency between VMs |
+| | * Latency between NFVI nodes |
+| | * Packet delay variation (jitter) between VMs |
+| | * Packet delay variation (jitter) between NFVI nodes |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Sequential read/write IOPS |
+| | * Random read/write IOPS |
+| | * Latency for storage read/write operations |
+| | * Throughput for storage read/write operations |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_2:
+
+**Table 2 - Capacity/Scale Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Capacity/Scale |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Number of cores and threads- Available memory size |
+| | * Cache size |
+| | * Processor utilization (max, average, standard deviation) |
+| | * Memory utilization (max, average, standard deviation) |
+| | * Cache utilization (max, average, standard deviation) |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * Number of connections |
+| | * Number of frames sent/received |
+| | * Maximum throughput between VMs (frames/byte per second) |
+| | * Maximum throughput between NFVI nodes (frames/byte per second) |
+| | * Network utilization (max, average, standard deviation) |
+| | * Number of traffic flows |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Storage/Disk size |
+| | * Capacity allocation (block-based, object-based) |
+| | * Block size |
+| | * Maximum sequential read/write IOPS |
+| | * Maximum random read/write IOPS |
+| | * Disk utilization (max, average, standard deviation) |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_3:
+
+**Table 3 - Availability/Reliability Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Availability/Reliability |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Processor availability (Error free processing time) |
+| | * Memory availability (Error free memory time) |
+| | * Processor mean-time-to-failure |
+| | * Memory mean-time-to-failure |
+| | * Number of processing faults per second |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * NIC availability (Error free connection time) |
+| | * Link availability (Error free transmission time) |
+| | * NIC mean-time-to-failure |
+| | * Network timeout duration due to link failure |
+| | * Frame loss rate |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Disk availability (Error free disk access time) |
+| | * Disk mean-time-to-failure |
+| | * Number of failed storage read/write operations per second |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_4:
+
+**Table 4 - Yardstick Generic Test Cases**
+
++---------+-------------------+----------------+------------------------------+
+| Category| Performance/Speed | Capacity/Scale | Availability/Reliability |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Compute | TC003 | TC003 | TC013 [1]_ |
+| | TC004 | TC004 | TC015 [1]_ |
+| | TC014 | TC010 | |
+| | TC024 | TC012 | |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Network | TC002 | TC001 | TC016 [1]_ |
+| | TC011 | TC008 | TC018 [1]_ |
+| | | TC009 | |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Storage | TC005 | TC005 | TC017 [1]_ |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+
+.. note:: The description in this OPNFV document is intended as a reference for
+ users to understand the scope of the Yardstick Project and the
+ deliverables of the Yardstick framework. For complete description of
+ the methodology, refer to the ETSI document.
+
+.. rubric:: Footnotes
+.. [1] To be included in future deliveries.
diff --git a/docs/configguide/yardstick_testcases/03-list-of-tcs.rst b/docs/configguide/yardstick_testcases/03-list-of-tcs.rst
new file mode 100644
index 000000000..f72d80b75
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/03-list-of-tcs.rst
@@ -0,0 +1,72 @@
+====================
+Yardstick Test Cases
+====================
+
+Abstract
+========
+
+This chapter lists available Yardstick test cases.
+Yardstick test cases are divided in two main categories:
+
+* *Generic NFVI Test Cases* - Test Cases developed to realize the methodology
+described in :doc:`02-methodology`
+
+* *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
+aspect of a feature delivered by an OPNFV Project, including the test cases
+developed for the :term:`VTC`.
+
+Generic NFVI Test Case Descriptions
+===================================
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc001.rst
+ opnfv_yardstick_tc002.rst
+ opnfv_yardstick_tc005.rst
+ opnfv_yardstick_tc008.rst
+ opnfv_yardstick_tc009.rst
+ opnfv_yardstick_tc010.rst
+ opnfv_yardstick_tc012.rst
+ opnfv_yardstick_tc014.rst
+ opnfv_yardstick_tc037.rst
+ opnfv_yardstick_tc038.rst
+
+OPNFV Feature Test Cases
+========================
+
+IPv6
+----
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc027.rst
+
+Parser
+------
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc040.rst
+
+virtual Traffic Classifier
+--------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc006.rst
+ opnfv_yardstick_tc007.rst
+ opnfv_yardstick_tc020.rst
+ opnfv_yardstick_tc021.rst
+
+Templates
+=========
+
+.. toctree::
+ :maxdepth: 1
+
+ testcase_description_v2_template
+ Yardstick_task_templates
diff --git a/docs/configguide/yardstick_testcases/04-vtc-overview.rst b/docs/configguide/yardstick_testcases/04-vtc-overview.rst
new file mode 100644
index 000000000..95159a9bc
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/04-vtc-overview.rst
@@ -0,0 +1,114 @@
+==========================
+Virtual Traffic Classifier
+==========================
+
+Abstract
+========
+
+.. _TNOVA: http://www.t-nova.eu/
+.. _TNOVAresults: http://www.t-nova.eu/results/
+.. _Yardstick: https://wiki.opnfv.org/yardstick
+
+This chapter provides an overview of the virtual Traffic Classifier, a
+contribution to OPNFV Yardstick_ from the EU Project TNOVA_.
+Additional documentation is available in TNOVAresults_.
+
+Overview
+========
+
+The virtual Traffic Classifier :term:`VNF`, the :term:`VTC`, comprises of a
+:term:`VNFC`. The :term:`VNFC` contains both the Traffic Inspection module, and
+the Traffic forwarding module, needed to run the VNF. The exploitation of
+:term:`DPI` methods for traffic classification is built around two basic
+assumptions:
+
+* third parties unaffiliated with either source or recipient are able to
+inspect each IP packet’s payload
+
+* the classifier knows the relevant syntax of each application’s packet
+payloads (protocol signatures, data patterns, etc.).
+
+The proposed :term:`DPI` based approach will only use an indicative, small
+number of the initial packets from each flow in order to identify the content
+and not inspect each packet.
+
+In this respect it follows the :term:`PBFS`. This method uses a table to track
+each session based on the 5-tuples (src address, dest address, src port,dest
+port, transport protocol) that is maintained for each flow.
+
+Concepts
+========
+
+* *Traffic Inspection*: The process of packet analysis and application
+identification of network traffic that passes through the :term:`VTC`.
+
+* *Traffic Forwarding*: The process of packet forwarding from an incoming
+network interface to a pre-defined outgoing network interface.
+
+* *Traffic Rule Application*: The process of packet tagging, based on a
+predefined set of rules. Packet tagging may include e.g. :term:`ToS` field
+modification.
+
+Architecture
+============
+
+The Traffic Inspection module is the most computationally intensive component
+of the :term:`VNF`. It implements filtering and packet matching algorithms in
+order to support the enhanced traffic forwarding capability of the :term:`VNF`.
+The component supports a flow table (exploiting hashing algorithms for fast
+indexing of flows) and an inspection engine for traffic classification.
+
+The implementation used for these experiments exploits the nDPI library.
+The packet capturing mechanism is implemented using libpcap. When the
+:term:`DPI` engine identifies a new flow, the flow register is updated with the
+appropriate information and transmitted across the Traffic Forwarding module,
+which then applies any required policy updates.
+
+The Traffic Forwarding moudle is responsible for routing and packet forwarding.
+It accepts incoming network traffic, consults the flow table for classification
+information for each incoming flow and then applies pre-defined policies
+marking e.g. :term:`ToS`/:term:`DSCP` multimedia traffic for :term:`QoS`
+enablement on the forwarded traffic.
+It is assumed that the traffic is forwarded using the default policy until it
+is identified and new policies are enforced.
+
+The expected response delay is considered to be negligible, as only a small
+number of packets are required to identify each flow.
+
+Graphical Overview
+==================
+
+.. code-block:: console
+
+ +----------------------------+
+ | |
+ | Virtual Traffic Classifier |
+ | |
+ | Analysing/Forwarding |
+ | ------------> |
+ | ethA ethB |
+ | |
+ +----------------------------+
+ | ^
+ | |
+ v |
+ +----------------------------+
+ | |
+ | Virtual Switch |
+ | |
+ +----------------------------+
+
+Install
+=======
+
+run the build.sh with root privileges
+
+Run
+===
+
+sudo ./pfbridge -a eth1 -b eth2
+
+Development Environment
+=======================
+
+Ubuntu 14.04
diff --git a/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst b/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst
new file mode 100755
index 000000000..d2c2b7ec9
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst
@@ -0,0 +1,155 @@
+Task Template Syntax
+====================
+
+Basic template syntax
+---------------------
+A nice feature of the input task format used in Yardstick is that it supports
+the template syntax based on Jinja2.
+This turns out to be extremely useful when, say, you have a fixed structure of
+your task but you want to parameterize this task in some way.
+For example, imagine your input task file (task.yaml) runs a set of Ping
+scenarios:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+ schema: "yardstick:task:0.1"
+
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: 200
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+
+ sla:
+ max_rtt: 10
+ action: monitor
+
+ context:
+ ...
+
+Let's say you want to run the same set of scenarios with the same runner/
+context/sla, but you want to try another packetsize to compare the performance.
+The most elegant solution is then to turn the packetsize name into a template
+variable:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+
+ schema: "yardstick:task:0.1"
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: {{packetsize}}
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+
+ sla:
+ max_rtt: 10
+ action: monitor
+
+ context:
+ ...
+
+and then pass the argument value for {{packetsize}} when starting a task with
+this configuration file.
+Yardstick provides you with different ways to do that:
+
+1.Pass the argument values directly in the command-line interface (with either
+a JSON or YAML dictionary):
+
+::
+
+ yardstick task start samples/ping-template.yaml
+ --task-args'{"packetsize":"200"}'
+
+2.Refer to a file that specifies the argument values (JSON/YAML):
+
+::
+
+ yardstick task start samples/ping-template.yaml --task-args-file args.yaml
+
+Using the default values
+------------------------
+Note that the Jinja2 template syntax allows you to set the default values for
+your parameters.
+With default values set, your task file will work even if you don't
+parameterize it explicitly while starting a task.
+The default values should be set using the {% set ... %} clause (task.yaml).
+For example:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+ schema: "yardstick:task:0.1"
+ {% set packetsize = packetsize or "100" %}
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: {{packetsize}}
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+ ...
+
+If you don't pass the value for {{packetsize}} while starting a task, the
+default one will be used.
+
+Advanced templates
+------------------
+
+Yardstick makes it possible to use all the power of Jinja2 template syntax,
+including the mechanism of built-in functions.
+As an example, let us make up a task file that will do a block storage
+performance test.
+The input task file (fio-template.yaml) below uses the Jinja2 for-endfor
+construct to accomplish that:
+
+::
+
+ #Test block sizes of 4KB, 8KB, 64KB, 1MB
+ #Test 5 workloads: read, write, randwrite, randread, rw
+ schema: "yardstick:task:0.1"
+
+ scenarios:
+ {% for bs in ['4k', '8k', '64k', '1024k' ] %}
+ {% for rw in ['read', 'write', 'randwrite', 'randread', 'rw' ] %}
+ -
+ type: Fio
+ options:
+ filename: /home/ec2-user/data.raw
+ bs: {{bs}}
+ rw: {{rw}}
+ ramp_time: 10
+ host: fio.demo
+ runner:
+ type: Duration
+ duration: 60
+ interval: 60
+
+ {% endfor %}
+ {% endfor %}
+ context
+ ...
diff --git a/docs/configguide/yardstick_testcases/glossary.rst b/docs/configguide/yardstick_testcases/glossary.rst
new file mode 100644
index 000000000..8ce9a6ba0
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/glossary.rst
@@ -0,0 +1,33 @@
+==================
+Yardstick Glossary
+==================
+
+.. glossary::
+ :sorted:
+
+ DPI
+ Deep Packet Inspection
+
+ DSCP
+ Differentiated Services Code Point
+
+ PBFS
+ Packet Based per Flow State
+
+ QoS
+ Quality of Service
+
+ VNF
+ Virtual Network Function
+
+ VNFC
+ Virtual Network Function Component
+
+ NFVI
+ Network Function Virtualization Infrastructure
+
+ ToS
+ Type of Service
+
+ VTC
+ Virtual Traffic Classifier
diff --git a/docs/configguide/yardstick_testcases/index.rst b/docs/configguide/yardstick_testcases/index.rst
new file mode 100644
index 000000000..55d4ea3e1
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/index.rst
@@ -0,0 +1,12 @@
+==================
+Yardstick Overview
+==================
+
+.. toctree::
+ :maxdepth: 2
+
+ 01-introduction
+ 02-methodology
+ 04-vtc-overview
+ 03-list-of-tcs
+ glossary
diff --git a/docs/yardstick/opnfv_yardstick_tc001.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst
index 16c9d2c60..810bad489 100644
--- a/docs/yardstick/opnfv_yardstick_tc001.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst
@@ -1,12 +1,18 @@
*************************************
Yardstick Test Case Description TC001
*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
+-----------------------------------------------------------------------------+
|Network Performance |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC001_NW PERF |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Number of flows and throughput |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To evaluate the IaaS network performance with regards to |
| | flows and throughput, such as if and how different amounts |
@@ -19,6 +25,7 @@ Yardstick Test Case Description TC001
| | graphs ans similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc001.yaml |
| | |
@@ -28,6 +35,7 @@ Yardstick Test Case Description TC001
| | twice. The client and server are distributed on different HW.|
| | For SLA max_ppm is set to 1000. The amount of configured |
| | ports map to between 110 up to 1001000 flows, respectively. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
@@ -36,30 +44,36 @@ Yardstick Test Case Description TC001
| | image. |
| | As an example see the /yardstick/tools/ directory for how |
| | to generate a Linux image with pktgen included.) |
+| | |
+--------------+--------------------------------------------------------------+
-|references |https://www.kernel.org/doc/Documentation/networking/pktgen.txt|
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
-| |ETSI-NFV-TST001 |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different packet sizes, amount |
| | of flows and test duration. Default values exist. |
| | |
-| |SLA (optional): |
-| | max_ppm: The number of packets per million packets sent |
-| | that are acceptable to lose, i.e. not received. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. pktgen is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc002.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst
index bc795bf38..56350f5bb 100644
--- a/docs/yardstick/opnfv_yardstick_tc002.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst
@@ -2,12 +2,17 @@
Yardstick Test Case Description TC002
*************************************
+.. _cirros-image: https://download.cirros-cloud.net
+
+-----------------------------------------------------------------------------+
|Network Latency |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC002_NW LATENCY |
+| | |
+--------------+--------------------------------------------------------------+
|metric | RTT, Round Trip Time |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To do a basic verification that network latency is within |
| | acceptable boundaries when packets travel between hosts |
@@ -16,11 +21,13 @@ Yardstick Test Case Description TC002
| | graphs and similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc002.yaml |
| | |
| | Packet size 100 bytes. Total test duration 600 seconds. |
| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | ping |
| | |
@@ -28,11 +35,13 @@ Yardstick Test Case Description TC002
| | doesn't need to be installed. It is also part of the |
| | Yardstick Docker image. |
| | (For example also a Cirros image can be downloaded from |
-| | https://download.cirros-cloud.net, it includes ping) |
+| | cirros-image_, it includes ping) |
+| | |
+--------------+--------------------------------------------------------------+
|references | Ping man page |
| | |
| | ETSI-NFV-TST001 |
+| | |
+--------------+--------------------------------------------------------------+
|applicability | Test case can be configured with different packet sizes, |
| | burst sizes, ping intervals and test duration. |
@@ -46,20 +55,24 @@ Yardstick Test Case Description TC002
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with ping included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. Ping is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. Ping is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any RTT is above the optional SLA |
| | value, or if there is a test case execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
-
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst
new file mode 100644
index 000000000..8b7474696
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst
@@ -0,0 +1,72 @@
+*************************************
+Yardstick Test Case Description TC005
+*************************************
+
+.. _fio: http://www.bluestop.org/fio/HOWTO.txt
+
++-----------------------------------------------------------------------------+
+|Storage Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC005_Storage Performance |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | IOPS, throughput and latency |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS storage performance with regards to |
+| | IOPS, throughput and latency. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons |
+| | and product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc005.yaml |
+| | |
+| | IO types: read, write, randwrite, randread, rw |
+| | IO block size: 4KB, 64KB, 1024KB, where each |
+| | runs for 30 seconds(10 for ramp time, 20 for runtime). |
+| | |
+| | For SLA minimum read/write iops is set to 100, minimum |
+| | read/write throughput is set to 400 KB/s, and maximum |
+| | read/write latency is set to 20000 usec. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | fio |
+| | |
+| | (fio is not always part of a Linux distribution, hence it |
+| | needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with fio included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | fio_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different read/write types, IO |
+| | block size, IO depth, ramp time (runtime required for stable |
+| | results) and test duration. Default values exist. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with fio included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. fio is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst
new file mode 100644
index 000000000..b68315078
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst
@@ -0,0 +1,139 @@
+*************************************
+Yardstick Test Case Description TC006
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC006_Virtual Traffic Classifier Data Plane |
+| | Throughput Benchmarking Test. |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To measure the throughput supported by the virtual Traffic |
+| | Classifier according to the RFC2544 methodology for a |
+| | user-defined set of vTC deployment configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: file: opnfv_yardstick_tc006.yaml |
+| | |
+| | packet_size: size of the packets to be used during the |
+| | throughput calculation. |
+| | Allowe values: [64, 128, 256, 512, 1024, 1280, 1518] |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDK Pktgen: DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | RFC 2544: rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The vTC is deployed, according to the user-defined |
+| | configuration |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is correctly deployed and configured as necessary |
+| | The initialization script has been correctly executed and |
+| | vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Test case is executed with the selected parameters: |
+| | - vTC flavor |
+| | - vNIC type |
+| | - packet size |
+| | The traffic is sent to the vTC using the maximum available |
+| | traffic rate for 60 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The vTC instance forwards all the packets back to the packet |
+| | generator for 60 seconds, as specified by RFC 2544. |
+| | |
+| | Steps 3 and 4 are executed different times, with different |
+| | rates in order to find the maximum supported traffic rate |
+| | according to the current definition of throughput in RFC |
+| | 2544. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The result of the test is a number between 0 and 100 which |
+| | represents the throughput in terms of percentage of the |
+| | available pktgen NIC bandwidth. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst
new file mode 100644
index 000000000..a7a4776d5
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst
@@ -0,0 +1,157 @@
+*************************************
+Yardstick Test Case Description TC007
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC007_Virtual Traffic Classifier Data Plane |
+| | Throughput Benchmarking Test in Presence of Noisy |
+| | neighbours |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To measure the throughput supported by the virtual Traffic |
+| | Classifier according to the RFC2544 methodology for a |
+| | user-defined set of vTC deployment configurations in the |
+| | presence of noisy neighbours. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc007.yaml |
+| | |
+| | packet_size: size of the packets to be used during the |
+| | throughput calculation. |
+| | Allowe values: [64, 128, 256, 512, 1024, 1280, 1518] |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | |
+| | num_of_neighbours: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | |
+| | amount_of_ram: RAM to be used by each neighbor. |
+| | Allowed values: ['250M', '1G', '2G', '3G', '4G', '5G', |
+| | '6G', '7G', '8G', '9G', '10G'] |
+| | Deault value: 256M |
+| | |
+| | number_of_cores: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | Default value: 1 |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The noisy neighbours are deployed as required by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is deployed, according to the configuration required |
+| | by the user |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | Test case is executed with the parameters specified by the |
+| | user: |
+| | - vTC flavor |
+| | - vNIC type |
+| | - packet size |
+| | The traffic is sent to the vTC using the maximum available |
+| | traffic rate |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | The vTC instance forwards all the packets back to the |
+| | packet generator for 60 seconds, as specified by RFC 2544. |
+| | |
+| | Steps 4 and 5 are executed different times with different |
+| | with different traffic rates, in order to find the maximum |
+| | supported traffic rate, accoring to the current definition |
+| | of throughput in RFC 2544. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The result of the test is a number between 0 and 100 which |
+| | represents the throughput in terms of percentage of the |
+| | available pktgen NIC bandwidth. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst
new file mode 100644
index 000000000..e176e633a
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst
@@ -0,0 +1,85 @@
+*************************************
+Yardstick Test Case Description TC008
+*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Packet Loss Extended Test |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC008_NW PERF, Packet loss Extended Test |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, packet size and throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of packet sizes and flows matter for the throughput between |
+| | VMs on different compute blades. Typically e.g. the |
+| | performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc008.yaml |
+| | |
+| | Packet size: 64, 128, 256, 512, 1024, 1280 and 1518 bytes. |
+| | |
+| | Number of ports: 1, 10, 50, 100, 500 and 1000. The amount of |
+| | configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each packet_size/port_amount combination is run|
+| | ten times, for 20 seconds each. Then the next |
+| | packet_size/port_amount combination is run, and so on. |
+| | |
+| | The client and server are distributed on different HW. |
+| | |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Docker |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst
new file mode 100644
index 000000000..e4002a884
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst
@@ -0,0 +1,84 @@
+*************************************
+Yardstick Test Case Description TC009
+*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Packet Loss |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC009_NW PERF, Packet loss |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, packets lost and throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of flows matter for the throughput between VMs on different |
+| | compute blades. |
+| | Typically e.g. the performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc009.yaml |
+| | |
+| | Packet size: 64 bytes |
+| | |
+| | Number of ports: 1, 10, 50, 100, 500 and 1000. The amount of |
+| | configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each port amount is run ten times, for 20 |
+| | seconds each. Then the next port_amount is run, and so on. |
+| | |
+| | The client and server are distributed on different HW. |
+| | |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Docker |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst
new file mode 100644
index 000000000..ebb74ea30
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst
@@ -0,0 +1,77 @@
+*************************************
+Yardstick Test Case Description TC010
+*************************************
+
+.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+
++-----------------------------------------------------------------------------+
+|Memory Latency |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC010_Memory Latency |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Latency in nanoseconds |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | Measure the memory read latency for varying memory sizes and |
+| | strides. Whole memory hierarchy is measured including all |
+| | levels of cache. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | File: opnfv_yardstick_tc010.yaml |
+| | |
+| | * SLA (max_latency): 30 nanoseconds |
+| | * Stride - 128 bytes |
+| | * Stop size - 64 megabytes |
+| | * Iterations: 10 - test is run 10 times iteratively. |
+| | * Interval: 1 - there is 1 second delay between each |
+| | iteration. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Lmbench |
+| | |
+| | Lmbench is a suite of operating system microbenchmarks. This |
+| | test uses lat_mem_rd tool from that suite. |
+| | Lmbench is not always part of a Linux distribution, hence it |
+| | needs to be installed in the test image |
+| | |
++--------------+--------------------------------------------------------------+
+|references | man-pages_ |
+| | |
+| | McVoy, Larry W.,and Carl Staelin. "lmbench: Portable Tools |
+| | for Performance Analysis." USENIX annual technical |
+| | conference 1996. |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different: |
+| | |
+| | * strides; |
+| | * stop_size; |
+| | * iterations and intervals. |
+| | |
+| | There are default values for each above-mentioned option. |
+| | |
+| | SLA (optional) : max_latency: The maximum memory latency |
+| | that is accepted. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with Lmbench included in the image. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The host is installed as client. Lmbench's lat_mem_rd tool |
+| | is invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Test fails if the measured memory latency is above the SLA |
+| | value or if there is a test case execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc012.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst
index b5768c0c5..e7889c14e 100644
--- a/docs/yardstick/opnfv_yardstick_tc012.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst
@@ -2,21 +2,26 @@
Yardstick Test Case Description TC012
*************************************
+.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+
+-----------------------------------------------------------------------------+
|Memory Bandwidth |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC012_Memory Bandwidth |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Megabyte per second (MBps) |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | Measure the rate at which data can be read from and written |
| | to the memory (this includes all levels of memory). |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc012.yaml |
| | |
-| | * SLA (optional): 15000 (MBps) |
-| | min_bw: The minimum amount of memory bandwidth that is |
-| | accepted. |
+| | * SLA (optional): 15000 (MBps) min_bw: The minimum amount of |
+| | memory bandwidth that is accepted. |
| | * Size: 10 240 kB - test allocates twice that size (20 480kB)|
| | zeros it and then measures the time it takes to copy from |
| | one side to another. |
@@ -27,42 +32,50 @@ Yardstick Test Case Description TC012
| | * Iterations: 10 - test is run 10 times iteratively. |
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | Lmbench |
| | |
| | Lmbench is a suite of operating system microbenchmarks. This |
| | test uses bw_mem tool from that suite. |
| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image |
-| | (See :ref:`guest-image` for how to generate a Linux image |
-| | for Glance with Lmbench included). |
+| | needs to be installed in the test image. |
+| | |
+--------------+--------------------------------------------------------------+
-|references | * http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html |
+|references | man-pages_ |
+| | |
+| | McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools |
+| | for Performance Analysis." USENIX annual technical |
+| | conference. 1996. |
| | |
-| | * McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools|
-| | for Performance Analysis." |
-| | * USENIX annual technical conference. 1996. |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different |
-| | * memory sizes; |
-| | * memory operations (such as rd, wr, rdwr, cp, frd, fwr, |
-| | fcp, bzero, bcopy); |
-| | * number of warmup iterations; |
-| | * iterations and intervals. |
+|applicability | Test can be configured with different: |
+| | |
+| | * memory sizes; |
+| | * memory operations (such as rd, wr, rdwr, cp, frd, fwr, |
+| | fcp, bzero, bcopy); |
+| | * number of warmup iterations; |
+| | * iterations and intervals. |
| | |
| | There are default values for each above-mentioned option. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with Lmbench included in the image. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The host is installed as client. | Logs are stored |
-| | | Lmbench's bw_mem tool is invoked | |
-| | | and logs are produced and stored.| |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The host is installed as client. Lmbench's bw_mem tool is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory bandwidth is below the SLA |
| | value or if there is a test case execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst
new file mode 100644
index 000000000..68d36ecd2
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst
@@ -0,0 +1,69 @@
+*************************************
+Yardstick Test Case Description TC014
+*************************************
+
+.. _unixbench: https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench
+
++-----------------------------------------------------------------------------+
+|Processing speed |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC014_Processing speed |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | score of single cpu running, score of parallel running |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS processing speed with regards to score |
+| | of single cpu running and parallel running |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons |
+| | and product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc014.yaml |
+| | |
+| | run_mode: Run unixbench in quiet mode or verbose mode |
+| | test_type: dhry2reg, whetstone and so on |
+| | |
+| | For SLA with single_score and parallel_score, both can be |
+| | set by user, default is NA |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | unixbench |
+| | |
+| | (unixbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with unixbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | unixbench_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different test types, dhry2reg, |
+| | whetstone and so on. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with unixbench included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as a client. unixbench is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst
new file mode 100644
index 000000000..9a5130f71
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst
@@ -0,0 +1,136 @@
+*************************************
+Yardstick Test Case Description TC020
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC0020_Virtual Traffic Classifier |
+| | Instantiation Test |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Failure |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify that a newly instantiated vTC is 'alive' and |
+| | functional and its instantiation is correctly supported by |
+| | the infrastructure. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc020.yaml |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | Installation and configuration of smcroute is required |
+| | before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The vTC is deployed, according to the configuration provided |
+| | by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Test case is executed with the parameters specified by the |
+| | the user: |
+| | - vTC flavor |
+| | - vNIC type |
+| | A constant rate traffic is sent to the vTC for 10 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The vTC instance tags all the packets and sends them back to |
+| | the packet generator for 10 seconds. |
+| | |
+| | The framework checks that the packet generator receives |
+| | back all the packets with the correct tag from the vTC. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The vTC is deemed to be successfully instantiated if all |
+| | packets are sent back with the right tag as requested, |
+| | else it is deemed DoA (Dead on arrival) |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst
new file mode 100644
index 000000000..a493ddfc0
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst
@@ -0,0 +1,152 @@
+*************************************
+Yardstick Test Case Description TC021
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC0021_Virtual Traffic Classifier |
+| | Instantiation Test in Presence of Noisy Neighbours |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Failure |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify that a newly instantiated vTC is 'alive' and |
+| | functional and its instantiation is correctly supported by |
+| | the infrastructure in the presence of noisy neighbours. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc021.yaml |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | num_of_neighbours: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | |
+| | amount_of_ram: RAM to be used by each neighbor. |
+| | Allowed values: ['250M', '1G', '2G', '3G', '4G', '5G', |
+| | '6G', '7G', '8G', '9G', '10G'] |
+| | Deault value: 256M |
+| | |
+| | number_of_cores: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | Default value: 1 |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDK Pktgen: DPDK Pktgen: DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | RFC 2544: rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | Installation and configuration of smcroute is required |
+| | before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The noisy neighbours are deployed as required by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is deployed, according to the configuration provided |
+| | by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | Test case is executed with the selected parameters: |
+| | - vTC flavor |
+| | - vNIC type |
+| | A constant rate traffic is sent to the vTC for 10 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | The vTC instance tags all the packets and sends them back to |
+| | the packet generator for 10 seconds. |
+| | |
+| | The framework checks if the packet generator receives back |
+| | all the packets with the correct tag from the vTC. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The vTC is deemed to be successfully instantiated if all |
+| | packets are sent back with the right tag as requested, |
+| | else it is deemed DoA (Dead on arrival) |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst
new file mode 100644
index 000000000..56c8227df
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst
@@ -0,0 +1,67 @@
+*************************************
+Yardstick Test Case Description TC027
+*************************************
+
+.. _ipv6: https://wiki.opnfv.org/ipv6_opnfv_project
+
++-----------------------------------------------------------------------------+
+|IPv6 connectivity between nodes on the tenant network |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC002_IPv6 connectivity |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | RTT, Round Trip Time |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To do a basic verification that IPv6 connectivity is within |
+| | acceptable boundaries when ipv6 packets travel between hosts |
+| | located on same or different compute blades. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc027.yaml |
+| | |
+| | Packet size 56 bytes. |
+| | SLA RTT is set to maximum 10 ms. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | ping6 |
+| | |
+| | Ping6 is normally part of Linux distribution, hence it |
+| | doesn't need to be installed. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ipv6_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test case can be configured with different run step |
+| | you can run setup, run benchmakr, teardown independently |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably lower RTT is expected. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with ping6 included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. Ping is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Test should not PASS if any RTT is above the optional SLA |
+| | value, or if there is a test case execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst
new file mode 100644
index 000000000..5c91f6bf1
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst
@@ -0,0 +1,99 @@
+*************************************
+Yardstick Test Case Description TC037
+*************************************
+
+.. _cirros: https://download.cirros-cloud.net
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Latency, CPU Load, Throughput, Packet Loss |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC037_Latency,CPU Load,Throughput,Packet Loss|
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, latency, throughput, CPU load, packet loss |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of flows matter for the throughput between hosts on different|
+| | compute blades. Typically e.g. the performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc037.yaml |
+| | |
+| | Packet size: 64 bytes |
+| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
+| | The amount configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each port amount is run two times, for 20 |
+| | seconds each. Then the next port_amount is run, and so on. |
+| | During the test CPU load on both client and server, and the |
+| | network latency between the client and server are measured. |
+| | The client and server are distributed on different HW. |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
+| | ping |
+| | |
+| | Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Glance image. |
+| | (For example also a cirros_ image can be downloaded, it |
+| | includes ping) |
+| | |
+| | mpstat |
+| | |
+| | (Mpstat is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping and Mpstat man pages |
+| | |
+| | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst
new file mode 100644
index 000000000..93c2cf3d8
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst
@@ -0,0 +1,99 @@
+*************************************
+Yardstick Test Case Description TC038
+*************************************
+
+.. _cirros: https://download.cirros-cloud.net
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Latency, CPU Load, Throughput, Packet Loss (Extended measurements) |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC038_Latency,CPU Load,Throughput,Packet Loss|
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, latency, throughput, CPU load, packet loss |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of flows matter for the throughput between hosts on different|
+| | compute blades. Typically e.g. the performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc038.yaml |
+| | |
+| | Packet size: 64 bytes |
+| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
+| | The amount configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each port amount is run ten times, for 20 |
+| | seconds each. Then the next port_amount is run, and so on. |
+| | During the test CPU load on both client and server, and the |
+| | network latency between the client and server are measured. |
+| | The client and server are distributed on different HW. |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
+| | ping |
+| | |
+| | Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Glance image. |
+| | (For example also a cirros_ image can be downloaded, it |
+| | includes ping) |
+| | |
+| | mpstat |
+| | |
+| | (Mpstat is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping and Mpstat man pages |
+| | |
+| | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst
new file mode 100644
index 000000000..044ccf193
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst
@@ -0,0 +1,60 @@
+*************************************
+Yardstick Test Case Description TC040
+*************************************
+
+.. _Parser: https://wiki.opnfv.org/parser
+
++-----------------------------------------------------------------------------+
+|Verify Parser Yang-to-Tosca |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC040 Verify Parser Yang-to-Tosca |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | 1. tosca file which is converted from yang file by Parser |
+| | 2. result whether the output is same with expected outcome |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify the function of Yang-to-Tosca in Parser. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc040.yaml |
+| | |
+| | yangfile: the path of the yangfile which you want to convert |
+| | toscafile: the path of the toscafile which is your expected |
+| | outcome. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Parser |
+| | |
+| | (Parser is not part of a Linux distribution, hence it |
+| | needs to be installed. As an example see the |
+| | /yardstick/benchmark/scenarios/parser/parser_setup.sh for |
+| | how to install it manual. Of course, it will be installed |
+| | and uninstalled automatically when you run this test case |
+| | by yardstick) |
++--------------+--------------------------------------------------------------+
+|references | Parser_ |
+| | |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different path of yangfile and |
+| | toscafile to fit your real environment to verify Parser |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | No POD specific requirements have been identified. |
+|conditions | it can be run without VM |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | parser is installed without VM, running Yang-to-Tosca module |
+| | to convert yang file to tosca file, validating output against|
+| | expected outcome. |
+| | |
+| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if output is different with expected outcome |
+| | or if there is a test case execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst b/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst
new file mode 100644
index 000000000..1b8754b05
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst
@@ -0,0 +1,64 @@
+.. Template to be used for test case descriptions in Yardstick Project.
+ Write one .rst per test case.
+ Upload the .rst for the test case in /docs/source/yardstick directory.
+ Review in Gerrit.
+
+*************************************
+Yardstick Test Case Description TCXXX
+*************************************
+
++-----------------------------------------------------------------------------+
+|test case slogan e.g. Network Latency |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | e.g. OPNFV_YARDSTICK_TC001_NW Latency |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | what will be measured, e.g. latency |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | describe what is the purpose of the test case |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | what .yaml file to use, state SLA if applicable, state |
+| | test duration, list and describe the scenario options used in|
+| | this TC and also list the options using default values. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | e.g. ping |
+| | |
++--------------+--------------------------------------------------------------+
+|references | e.g. RFCxxx, ETSI-NFVyyy |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | describe variations of the test case which can be |
+| | performend, e.g. run the test for different packet sizes |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | describe configuration in the tool(s) used to perform |
+|conditions | the measurements (e.g. fio, pktgen), POD-specific |
+| | configuration required to enable running the test |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | use this to describe tests that require sveveral steps e.g |
+| | collect logs. |
+| | |
+| | Result: what happens in this step e.g. logs collected |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | remove interface |
+| | |
+| | Result: interface down. |
+| | |
++--------------+--------------------------------------------------------------+
+|step N | what is done in step N |
+| | |
+| | Result: what happens |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | expected behavior, or SLA, pass/fail criteria |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/templates/testcase_description_v2_template.rst b/docs/templates/testcase_description_v2_template.rst
index da90f561e..1b8754b05 100644
--- a/docs/templates/testcase_description_v2_template.rst
+++ b/docs/templates/testcase_description_v2_template.rst
@@ -9,37 +9,56 @@ Yardstick Test Case Description TCXXX
+-----------------------------------------------------------------------------+
|test case slogan e.g. Network Latency |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | e.g. OPNFV_YARDSTICK_TC001_NW Latency |
+| | |
+--------------+--------------------------------------------------------------+
|metric | what will be measured, e.g. latency |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | describe what is the purpose of the test case |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | what .yaml file to use, state SLA if applicable, state |
| | test duration, list and describe the scenario options used in|
| | this TC and also list the options using default values. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | e.g. ping |
+| | |
+--------------+--------------------------------------------------------------+
|references | e.g. RFCxxx, ETSI-NFVyyy |
+| | |
+--------------+--------------------------------------------------------------+
|applicability | describe variations of the test case which can be |
| | performend, e.g. run the test for different packet sizes |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | describe configuration in the tool(s) used to perform |
|conditions | the measurements (e.g. fio, pktgen), POD-specific |
| | configuration required to enable running the test |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | use this to describe tests that | what happens in |
-| | | require several steps e.g. | this step |
-| | | step 1 collect logs | e.g. logs collected|
-| +------+----------------------------------+--------------------+
-| | 2 | remove interface | interface down |
-| +------+----------------------------------+--------------------+
-| | N | what is done in step N | what happens |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | use this to describe tests that require sveveral steps e.g |
+| | collect logs. |
+| | |
+| | Result: what happens in this step e.g. logs collected |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | remove interface |
+| | |
+| | Result: interface down. |
+| | |
++--------------+--------------------------------------------------------------+
+|step N | what is done in step N |
+| | |
+| | Result: what happens |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | expected behavior, or SLA, pass/fail criteria |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/user_guides/framework/03-installation.rst b/docs/userguide/yardstick_framework/03-installation.rst
index d2cae36b8..31f8a922e 100644
--- a/docs/user_guides/framework/03-installation.rst
+++ b/docs/userguide/yardstick_framework/03-installation.rst
@@ -92,7 +92,8 @@ via the OpenStack Dashboard.
Example command:
::
- glance image-create --name yardstick-trusty-server --is-public true \
+ glance --os-image-api-version 1 image-create \
+ --name yardstick-trusty-server --is-public true \
--disk-format qcow2 --container-format bare \
--file /tmp/workspace/yardstick/yardstick-trusty-server.img
diff --git a/docs/user_guides/framework/index.rst b/docs/userguide/yardstick_framework/index.rst
index f982c30ff..f982c30ff 100644
--- a/docs/user_guides/framework/index.rst
+++ b/docs/userguide/yardstick_framework/index.rst
diff --git a/docs/vTC/README.rst b/docs/vTC/README.rst
deleted file mode 100644
index ae6fefa59..000000000
--- a/docs/vTC/README.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-==========================
-Virtual Traffic Classifier
-==========================
-
-Overview
-========
-
-The virtual Traffic Classifier VNF [1], comprises in the current version of
-1 VNFC [2]. The VNFC contains both the Traffic Inspection module, and the
-Traffic forwarding module, needed to run the VNF. The exploitation of DPI
-methods for traffic classification is built around two basic assumptions:
-
-(i) third parties unaffiliated with either source or recipient are able to
-inspect each IP packet’s payload and
-(ii) the classifier knows the relevant syntax of each application’s packet
-payloads (protocol signatures, data patterns, etc.).
-
-The proposed DPI based approach will only use an indicative, small number of
-the initial packets from each flow in order to identify the content and not
-inspect each packet.
-
-In this respect it follows the Packet Based per Flow State (PBFS).
-This method uses a table to track each session based on the 5-tuples
-(src address,dest address,src port,dest port,transport protocol)
-that is maintained for each flow.
-
-Concepts
-========
-
-Traffic Inspection: The process of packet analysis and application
-identification of network traffic that passes through the vTC.
-
-Traffic Forwarding: The process of packet forwarding from an incoming
-network interface to a pre-defined outgoing network interface.
-
-Traffic Rule Application: The process of packet tagging, based on a
-predefined set of rules. Packet tagging may include e.g. ToS field
-modification.
-
-Architecture
-============
-
-The Traffic Inspection module is the most computationally intensive component
-of the VNF. It implements filtering and packet matching algorithms in order to
-support the enhanced traffic forwarding capability of the VNF. The component
-supports a flow table (exploiting hashing algorithms for fast indexing of
-flows) and an inspection engine for traffic classification.
-
-The implementation used for these experiments exploits the nDPI library.
-The packet capturing mechanism is implemented using libpcap. When the DPI
-engine identifies a new flow, the flow register is updated with the
-appropriate information and transmitted across the Traffic Forwarding module,
-which then applies any required policy updates.
-
-The Traffic Forwarding moudle is responsible for routing and packet forwarding.
-It accepts incoming network traffic, consults the flow table for classification
-information for each incoming flow and then applies pre-defined policies
-marking e.g. type of Service/Differentiated Services Code Point (TOS/DSCP)
-multimedia traffic for QoS enablement on the forwarded traffic.
-It is assumed that the traffic is forwarded using the default policy until it
-is identified and new policies are enforced.
-
-The expected response delay is considered to be negligible,as only a small
-number of packets are required to identify each flow.
-
-Graphical Overview
-==================
-
-Install
-=======
-
-run the build.sh with root privileges
-
-Run
-===
-
-sudo ./pfbridge -a eth1 -b eth2
-
-Custom Image
-============
-
-TBD
-
-Development Environment
-=======================
-
-Ubuntu 14.04 >= VM
diff --git a/docs/vTC/abbreviations.rst b/docs/vTC/abbreviations.rst
deleted file mode 100644
index a713ee66b..000000000
--- a/docs/vTC/abbreviations.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Abbreviations for the virtual Traffic Classifier
-================================================
-
-[1] VNF - Virtual Network Function
-[2] VNFC - Virtual Network Function Component
diff --git a/docs/yardstick/index.rst b/docs/yardstick/index.rst
deleted file mode 100644
index b14670bdd..000000000
--- a/docs/yardstick/index.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-======================
-Yardstick Config Guide
-======================
-
-Test Case Descriptions
-======================
-
-.. toctree::
- :maxdepth: 1
-
- opnfv_yardstick_tc001.rst
- opnfv_yardstick_tc002.rst
-
-Templates
-=========
-
-.. toctree::
- :maxdepth: 1
-
- ../templates/Yardstick_task_templates
- ../templates/testcase_description_v2_template
diff --git a/docs/yardstick/opnfv_yardstick_tc019.rst b/docs/yardstick/opnfv_yardstick_tc019.rst
new file mode 100644
index 000000000..482260b48
--- /dev/null
+++ b/docs/yardstick/opnfv_yardstick_tc019.rst
@@ -0,0 +1,129 @@
+*************************************
+Yardstick Test Case Description TC019
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node Openstack Service High Availability |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC019_HA: Control node Openstack service down|
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | This test case will verify the high availability of the |
+| | service provided by OpenStack (like nova-api, neutro-server) |
+| | on control node. |
+| | |
++--------------+--------------------------------------------------------------+
+|test method | This test case kills the processes of a specific Openstack |
+| | service on a selected control node, then checks whether the |
+| | request of the related Openstack command is OK and the killed|
+| | processes are recovered. |
+| | |
++--------------+--------------------------------------------------------------+
+|attackers | In this test case, an attacker called "kill-process" is |
+| | needed. This attacker includes three parameters: |
+| | 1) fault_type: which is used for finding the attacker's |
+| | scripts. It should be always set to "kill-process" in this |
+| | test case. |
+| | 2) process_name: which is the process name of the specified |
+| | OpenStack service. If there are multiple processes use the |
+| | same name on the host, all of them are killed by this |
+| | attacker. |
+| | 3) host: which is the name of a control node being attacked. |
+| | |
+| | e.g. |
+| | -fault_type: "kill-process" |
+| | -process_name: "nova-api" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|monitors | In this test case, two kinds of monitor are needed: |
+| | 1. the "openstack-cmd" monitor constantly request a specific |
+| | Openstack command, which needs two parameters: |
+| | 1) monitor_type: which is used for finding the monitor class |
+| | and related scritps. It should be always set to |
+| | "openstack-cmd" for this monitor. |
+| | 2) command_name: which is the command name used for request |
+| | |
+| | 2. the "process" monitor check whether a process is running |
+| | on a specific node, which needs three parameters: |
+| | 1) monitor_type: which used for finding the monitor class and|
+| | related scritps. It should be always set to "process" |
+| | for this monitor. |
+| | 2) process_name: which is the process name for monitor |
+| | 3) host: which is the name of the node runing the process |
+| | |
+| | e.g. |
+| | monitor1: |
+| | -monitor_type: "openstack-cmd" |
+| | -command_name: "nova image-list" |
+| | monitor2: |
+| | -monitor_type: "process" |
+| | -process_name: "nova-api" |
+| | -host: node1 |
+| | |
++--------------+--------------------------------------------------------------+
+|metrics | In this test case, there are two metrics: |
+| | 1)service_outage_time: which indicates the maximum outage |
+| | time (seconds) of the specified Openstack command request. |
+| | 2)process_recover_time: which indicates the maximun time |
+| | (seconds) from the process being killed to recovered |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Developed by the project. Please see folder: |
+| | "yardstick/benchmark/scenarios/availability/ha_tools" |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ETSI NFV REL001 |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files: |
+| | 1) test case file: opnfv_yardstick_tc019.yaml |
+| | -Attackers: see above "attackers" discription |
+| | -waiting_time: which is the time (seconds) from the process |
+| | being killed to stoping monitors the monitors |
+| | -Monitors: see above "monitors" discription |
+| | -SLA: see above "metrics" discription |
+| | |
+| | 2)POD file: pod.yaml |
+| | The POD configuration should record on pod.yaml first. |
+| | the "host" item in this test case will use the node name in |
+| | the pod.yaml. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | start monitors: |
+| | each monitor will run with independently process |
+| | |
+| | Result: The monitor info will be collected. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | do attacker: connect the host through SSH, and then execute |
+| | the kill process script with param value specified by |
+| | "process_name" |
+| | |
+| | Result: Process will be killed. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | stop monitors after a period of time specified by |
+| | "waiting_time" |
+| | |
+| | Result: The monitor info will be aggregated. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | verify the SLA |
+| | |
+| | Result: The test case is passed or not. |
+| | |
++--------------+--------------------------------------------------------------+
+|post-action | It is the action when the test cases exist. It will check the|
+| | status of the specified process on the host, and restart the |
+| | process if it is not running for next test cases |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/etc/yardstick/nodes/compass_sclab_physical/pod.yaml b/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
new file mode 100644
index 000000000..e062988c4
--- /dev/null
+++ b/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
@@ -0,0 +1,42 @@
+---
+# sample config file about the POD information, including the
+# name/IP/user/ssh key of Bare Metal and Controllers/Computes
+#
+# The options of this config file include:
+# name: the name of this node
+# role: node's role, support role: Master/Controller/Comupte/BareMetal
+# ip: the node's IP address
+# user: the username for login
+# key_filename:the path of the private key file for login
+
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: 10.1.0.50
+ user: root
+ password: root
+-
+ name: node2
+ role: Controller
+ ip: 10.1.0.51
+ user: root
+ password: root
+-
+ name: node3
+ role: Controller
+ ip: 10.1.0.52
+ user: root
+ password: root
+-
+ name: node4
+ role: Compute
+ ip: 10.1.0.53
+ user: root
+ password: root
+-
+ name: node5
+ role: Compute
+ ip: 10.1.0.54
+ user: root
+ password: root
diff --git a/etc/yardstick/yardstick.conf.sample b/etc/yardstick/yardstick.conf.sample
index 82326dd1b..63462c573 100644
--- a/etc/yardstick/yardstick.conf.sample
+++ b/etc/yardstick/yardstick.conf.sample
@@ -11,3 +11,8 @@
# file_path = /tmp/yardstick.out
# max_bytes = 0
# backup_count = 0
+
+[dispatcher_influxdb]
+# timeout = 5
+# target = http://127.0.0.1:8086
+# db_name = yardstick
diff --git a/run_tests.sh b/run_tests.sh
index d1567af9c..e093a20d3 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -74,6 +74,8 @@ run_functional_test() {
fi
}
+export PYTHONPATH='yardstick/vTC/apexlake'
+
run_flake8
run_tests
run_coverage
diff --git a/samples/cyclictest-node-context.yaml b/samples/cyclictest-node-context.yaml
new file mode 100644
index 000000000..d74d1e5e3
--- /dev/null
+++ b/samples/cyclictest-node-context.yaml
@@ -0,0 +1,50 @@
+---
+# Sample benchmark task config file
+# Measure system high resolution by using Cyclictest
+#
+# For this sample just like running the command below on the test vm and
+# getting latencies info back to the yardstick.
+#
+# sudo bash cyclictest -a 1 -i 1000 -p 99 -l 1000 -t 1 -h 90 -m -n -q
+#
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Cyclictest
+ options:
+ affinity: 1
+ interval: 1000
+ priority: 99
+ loops: 1000
+ threads: 1
+ histogram: 90
+ host: kvm.LF
+ runner:
+ type: Duration
+ duration: 1
+ interval: 1
+ sla:
+ max_min_latency: 50
+ max_avg_latency: 100
+ max_max_latency: 1000
+ action: monitor
+ setup_options:
+ rpm_dir: "/opt/rpm"
+ script_dir: "/opt/scripts"
+ image_dir: "/opt/image"
+ host_setup_seqs:
+ - "host-setup0.sh"
+ - "reboot"
+ - "host-setup1.sh"
+ - "host-run-qemu.sh"
+ guest_setup_seqs:
+ - "guest-setup0.sh"
+ - "reboot"
+ - "guest-setup1.sh"
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/pod.yaml
diff --git a/samples/dummy-no-context.yaml b/samples/dummy-no-context.yaml
new file mode 100644
index 000000000..5ab97a3a8
--- /dev/null
+++ b/samples/dummy-no-context.yaml
@@ -0,0 +1,14 @@
+---
+# Sample benchmark task config file
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Dummy
+
+ runner:
+ type: Duration
+ duration: 5
+ interval: 1
+
diff --git a/samples/dummy.yaml b/samples/dummy.yaml
new file mode 100644
index 000000000..e41ab9fba
--- /dev/null
+++ b/samples/dummy.yaml
@@ -0,0 +1,17 @@
+---
+# Sample benchmark task config file
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Dummy
+
+ runner:
+ type: Duration
+ duration: 5
+ interval: 1
+
+context:
+ type: Dummy
+
diff --git a/samples/ha-baremetal.yaml b/samples/ha-baremetal.yaml
new file mode 100755
index 000000000..9f9baf50c
--- /dev/null
+++ b/samples/ha-baremetal.yaml
@@ -0,0 +1,45 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "bare-metal-down"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "heat stack-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "neutron router-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/samples/ha-service.yaml b/samples/ha-service.yaml
new file mode 100755
index 000000000..e624f531e
--- /dev/null
+++ b/samples/ha-service.yaml
@@ -0,0 +1,42 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "nova-api"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/samples/lmbench.yaml b/samples/lmbench.yaml
index 2b8e99084..4ef53c30b 100644
--- a/samples/lmbench.yaml
+++ b/samples/lmbench.yaml
@@ -9,16 +9,18 @@ scenarios:
type: Lmbench
options:
test_type: "latency"
- stride: 64
- stop_size: 32
+ stop_size: 32.0
host: demeter.demo
runner:
type: Arithmetic
- name: stride
- stop: 128
- step: 64
+ iterators:
+ -
+ name: stride
+ start: 64
+ stop: 128
+ step: 64
sla:
max_latency: 35
@@ -27,16 +29,18 @@ scenarios:
type: Lmbench
options:
test_type: "bandwidth"
- size: 500
benchmark: "wr"
host: demeter.demo
runner:
type: Arithmetic
- name: size
- stop: 2000
- step: 500
+ iterators:
+ -
+ name: size
+ start: 500
+ stop: 2000
+ step: 500
sla:
min_bandwidth: 10000
diff --git a/samples/parser.yaml b/samples/parser.yaml
new file mode 100644
index 000000000..32d9abed1
--- /dev/null
+++ b/samples/parser.yaml
@@ -0,0 +1,21 @@
+---
+# Sample task config file
+# running Parser Yang-to-Tosca module as a tool
+# validating output against expected outcome
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Parser
+ options:
+ yangfile: /root/yardstick/samples/yang.yaml
+ toscafile: /root/yardstick/samples/tosca.yaml
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ type: Dummy
diff --git a/samples/ping-serial.yaml b/samples/ping-serial.yaml
index 37ea715a2..9c492e481 100644
--- a/samples/ping-serial.yaml
+++ b/samples/ping-serial.yaml
@@ -20,16 +20,17 @@ scenarios:
action: monitor
-
type: Ping
- options:
- packetsize: 100
host: apollo.demo
target: artemis.demo
runner:
type: Arithmetic
- name: packetsize
interval: 0.2
- stop: 6000
- step: 100
+ iterators:
+ -
+ name: packetsize
+ start: 100
+ stop: 6000
+ step: 100
context:
name: demo
diff --git a/samples/ping6.yaml b/samples/ping6.yaml
new file mode 100644
index 000000000..22b8bb9cc
--- /dev/null
+++ b/samples/ping6.yaml
@@ -0,0 +1,28 @@
+---
+# Sample test case for ipv6
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping6
+ options:
+ packetsize: 200
+ host: node1.IPV6
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+ run_step: 'setup,run,teardown'
+ sla:
+ max_rtt: 10
+ action: monitor
+
+
+context:
+ type: Node
+ name: IPV6
+ file: /root/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
+
+
diff --git a/samples/pktgen.yaml b/samples/pktgen.yaml
index 11d62795e..d621cb730 100644
--- a/samples/pktgen.yaml
+++ b/samples/pktgen.yaml
@@ -1,6 +1,8 @@
---
# Sample benchmark task config file
# measure network throughput using pktgen
+# with 2 stepping parameters. One stepping
+# in positive and the other in negative direction
schema: "yardstick:task:0.1"
@@ -8,8 +10,6 @@ scenarios:
-
type: Pktgen
options:
- packetsize: 60
- number_of_ports: 10
duration: 20
host: demeter.demo
@@ -17,10 +17,20 @@ scenarios:
runner:
type: Arithmetic
- name: number_of_ports
- # run twice with values 10 and 20
- stop: 20
- step: 10
+ interval: 2
+ iter_type: nested_for_loops
+ # run with packetsize/number_of_ports: 60,20; 60,10; ... 70,10
+ iterators:
+ -
+ name: packetsize
+ start: 60
+ stop: 70
+ step: 5
+ -
+ name: number_of_ports
+ start: 20
+ stop: 10
+ step: -10
sla:
max_ppm: 1000
diff --git a/samples/serviceha.yaml b/samples/serviceha.yaml
index 424732189..e624f531e 100755
--- a/samples/serviceha.yaml
+++ b/samples/serviceha.yaml
@@ -7,15 +7,30 @@ scenarios:
-
type: ServiceHA
options:
- component: "nova-api"
- fault_type: "stop-service"
- fault_time: 5
-
- host: node1.LF
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "nova-api"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
runner:
type: Duration
- duration: 6
+ duration: 1
sla:
outage_time: 5
action: monitor
@@ -25,5 +40,3 @@ context:
type: Node
name: LF
file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
-
-
diff --git a/samples/tosca.yaml b/samples/tosca.yaml
new file mode 100644
index 000000000..4472f7ef8
--- /dev/null
+++ b/samples/tosca.yaml
@@ -0,0 +1,149 @@
+tosca_definitions_version:tosca_simple_yaml_1_0_0
+description:
+ This module defines a VNF Deployment Unit.
+import:
+
+
+metadata:
+ ID:clearwater
+ Vendor:HP
+dsl_definitions:
+ compute_props_host_ellis:&compute_props_host_ellis
+ num_cpu:4
+ mem_size:4096
+ compute_props_host_bono:&compute_props_host_bono
+ num_cpu:3
+ mem_size:2048
+node_types:
+ tosca.nodes.compute.ellis:
+ derived_from:tosca.nodes.compute
+
+ tosca.nodes.compute.bono:
+ derived_from:tosca.nodes.compute
+
+topology_template:
+ # a description of the topology template
+ description:>
+ Vdus used in a vnfd
+ inputs:
+ storage_size:
+ type:scalar-unit.size
+ default:2048
+ description:The required storage resource
+ storage_location:
+ type:string
+ description:>
+ Block storage mount point (filesystem path).
+ node_templates:
+ ellis:
+ type:tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties:*compute_props_host_ellis
+ scalable:
+ properties:
+ min_instances:1
+ default_instances:1
+ requirements:
+ - local_storage:
+ node:ellis_BlockStorage
+ relationship:
+ type:AttachesTo
+ properties:
+ location:{ get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation:start.sh
+ delete:
+ implementaion:stop.sh
+ stop:
+ implementaion:shutdown.sh
+ ellis_BlockStorage:
+ type:tosca.nodes.BlockStorage
+ properties:
+ size:{ get_input:storage_size }
+ bono:
+ type:tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties:*compute_props_host_bono
+ scalable:
+ properties:
+ min_instances:3
+ default_instances:3
+ requirements:
+ - local_storage:
+ node:bono_BlockStorage
+ relationship:
+ type:AttachesTo
+ properties:
+ location:{ get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation:start.sh
+ delete:
+ implementaion:stop.sh
+ stop:
+ implementaion:shutdown.sh
+ bono_BlockStorage:
+ type:tosca.nodes.BlockStorage
+ properties:
+ size:{ get_input:storage_size }
+ clearwater_network1:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network1
+ clearwater_network2:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network2
+ clearwater_network1:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network1
+ clearwater_network2:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network2 \ No newline at end of file
diff --git a/samples/unixbench.yaml b/samples/unixbench.yaml
new file mode 100644
index 000000000..9af032f23
--- /dev/null
+++ b/samples/unixbench.yaml
@@ -0,0 +1,35 @@
+---
+# Sample benchmark task config file
+# measure CPU performance
+# There is one sample scenario for Dhrystone
+# Dhrystone (MIPS) - higher results are better, i.e. better integer performance.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: UnixBench
+ options:
+ run_mode: 'verbose'
+ test_type: 'dhry2reg'
+ host: Chang'e.demo
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ name: demo
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ servers:
+ Chang'e:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
+
diff --git a/samples/yang.yaml b/samples/yang.yaml
new file mode 100644
index 000000000..86b7b2f31
--- /dev/null
+++ b/samples/yang.yaml
@@ -0,0 +1,687 @@
+ module clearwater {
+
+ namespace "http://localhost/ietf-inet-types.yang";
+
+ prefix "yang";
+
+ organization "HP";
+
+ contact "TBD";
+
+ description "This module defines a VNF Deployment Unit.";
+ revision "2014-05-18" {
+
+ description
+
+ "Initial version";
+
+ reference
+
+ "RFC XXXX";
+
+ }
+ container clearwater {
+
+ description
+
+ "Vdus used in a vnfd";
+
+ list ellis {
+ key id;
+ leaf id{
+ type string;
+ description "key ID for vdu1";
+ }
+ description
+
+ "Vdu key";
+
+
+
+ leaf instance-num {
+
+ type uint16{
+ range 1..6;}
+ default 1;
+
+
+ description
+
+ "Number of instances of the said VDU which shall be
+ instantiated";
+
+ }
+
+ leaf vm-image {
+
+ type string;
+ reference "uri";
+
+
+ description
+
+ "Reference to a VM image";
+
+ }
+ container resource {
+ description
+
+ "The required resource";
+
+ container cpu {
+
+ description
+
+ "The required computation resource";
+
+
+
+ leaf vCPU_num {
+
+ type uint32{
+ range 4;
+ }
+
+ description
+
+ "The number of virtual CPU units";
+
+ }
+
+
+
+ list vCPU-spec {
+
+ key "name";
+
+
+
+ description
+
+ "Processor characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of vCPU-spec";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of vCPU-spec";
+
+ }
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of vCPU-spec";
+
+ }
+
+ }
+
+ }
+
+ container memory {
+
+ description
+
+ "The required memory resource";
+
+ leaf memory_size {
+
+ type uint32{
+ range 4096;
+ }
+
+ description
+
+ "Memory size, unit:MBytes";
+
+ }
+
+ list memory-spec {
+
+ key name;
+
+
+
+ description
+
+ "Memory characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of memory-spec";
+
+ }
+
+
+
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of memory-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of memory-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container disk {
+
+ description
+
+ "The required storage resource";
+
+
+
+ leaf disk-size {
+
+ type uint32{
+ range 2048;
+ }
+ description
+
+ "Virtual storage size, unit:MBytes";
+ }
+
+
+
+ list disk-KQI {
+
+ key name;
+
+
+
+ description
+
+ "Storage characteristics in the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of disk-KQI";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of disk-KQI";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of disk-KQI";
+
+ }
+
+ }
+
+ }
+
+
+
+ container vnic {
+
+ description
+
+ "Virtual network interface card (vnic) resource";
+
+
+
+ leaf vnic-num {
+
+ type uint32{
+ range 2;
+ }
+
+ description
+
+ "The total number of virtual vnic";
+
+ }
+ }
+
+ }
+
+
+
+ container workflow-script {
+
+ description
+
+ "VDU workflow script";
+
+
+
+ leaf init {
+
+ type string;
+ default "start.sh";
+
+
+ description
+
+ "VDU initialization script";
+ }
+
+
+
+ leaf terminate {
+
+ type string;
+ default "stop.sh";
+
+
+ description
+
+ "VDU termination script";
+ }
+
+ leaf graceful-shutdown {
+
+ type string;
+ default "shutdown.sh";
+
+
+ description
+
+ "VDU graceful shutdown script";
+
+ }
+
+ }
+
+ }
+ list bono {
+ key id;
+ leaf id{
+ type string;
+ description "key ID for vdu2";
+ }
+ description
+
+ "Vdu key";
+
+
+
+ leaf instance-num {
+
+ type uint16;
+ default 3;
+
+
+ description
+
+ "Number of instances of the said VDU which shall be
+ instantiated";
+
+ }
+
+
+
+ leaf vm-image {
+
+ type string;
+ reference "URI";
+
+
+ description
+
+ "Reference to a VM image";
+
+ }
+
+
+
+ container resource {
+ description
+
+ "The required resource";
+
+
+
+ container cpu {
+
+ description
+
+ "The required computation resource";
+
+
+
+ leaf vCPU_num {
+
+ type uint32{
+ range 3;
+ }
+
+ description
+
+ "The number of virtual CPU units";
+
+ }
+
+
+
+ list vCPU-spec {
+
+ key "name";
+
+
+
+ description
+
+ "Processor characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of vCPU-spec";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of vCPU-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of vCPU-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container memory {
+
+ description
+
+ "The required memory resource";
+
+
+
+ leaf memory_size {
+
+ type uint32{
+ range 2048;
+ }
+
+ description
+
+ "Memory size, unit:MBytes";
+
+ }
+
+ list memory-spec {
+
+ key name;
+
+ description
+
+ "Memory characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of memory-spec";
+
+ }
+
+
+
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of memory-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of memory-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container disk {
+
+ description
+
+ "The required storage resource";
+
+
+
+ leaf disk-size {
+
+ type uint32{
+ range 3000;
+ }
+
+ description
+
+ "Virtual storage size, unit:MBytes";
+
+ }
+
+
+
+ list disk-KQI {
+
+ key name;
+
+
+
+ description
+
+ "Storage characteristics in the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of disk-KQI";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of disk-KQI";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of disk-KQI";
+
+ }
+
+ }
+
+ }
+
+
+
+ container vnic {
+
+ description
+
+ "Virtual network interface card (vnic) resource";
+
+
+
+ leaf vnic-num {
+
+ type uint32{
+ range 2;
+ }
+
+ description
+
+ "The total number of virtual vnic";
+
+ }
+ }
+
+ }
+
+
+
+ container workflow-script {
+
+ description
+
+ "VDU workflow script";
+
+
+
+ leaf init {
+
+ type string;
+ default "start.sh";
+
+
+ description
+
+ "VDU initialization script";
+
+ }
+
+
+
+ leaf terminate {
+
+ type string;
+ default "stop.sh";
+
+
+ description
+
+ "VDU termination script";
+
+ }
+
+ leaf graceful-shutdown {
+
+ type string;
+ default "shutdown.sh";
+
+
+ description
+
+ "VDU graceful shutdown script";
+
+ }
+
+ }
+
+ }
+
+ }
+
+ }
+
diff --git a/setup.py b/setup.py
index 495291cef..eb55e9283 100755
--- a/setup.py
+++ b/setup.py
@@ -8,9 +8,14 @@ setup(
include_package_data=True,
package_data={
'yardstick': [
- 'benchmark/scenarios/availability/ha_tools/*.bash',
+ 'benchmark/scenarios/availability/attacker/*.yaml',
+ 'benchmark/scenarios/availability/attacker/scripts/*.bash',
+ 'benchmark/scenarios/availability/monitor/*.yaml',
+ 'benchmark/scenarios/availability/monitor/script_tools/*.bash',
'benchmark/scenarios/compute/*.bash',
'benchmark/scenarios/networking/*.bash',
+ 'benchmark/scenarios/networking/*.txt',
+ 'benchmark/scenarios/parser/*.sh',
'benchmark/scenarios/storage/*.bash',
'resources/files/*'
]
@@ -20,6 +25,7 @@ setup(
"coverage>=3.6",
"flake8",
"Jinja2>=2.6",
+ "lxml",
"PyYAML>=3.10",
"pbr<2.0,>=1.3",
"python-glanceclient>=0.12.0",
@@ -30,9 +36,11 @@ setup(
"mock>=1.0.1", # remove with python3
"paramiko",
"netifaces",
+ "scp",
"six",
"testrepository>=0.0.18",
"testtools>=1.4.0"
+ "nose"
],
extras_require={
'plot': ["matplotlib>=1.4.2"]
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
new file mode 100644
index 000000000..f89a3099e
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
@@ -0,0 +1,48 @@
+---
+# Yardstick TC005 config file
+# Measure Storage IOPS, throughput and latency using fio
+
+schema: "yardstick:task:0.1"
+scenarios:
+{% for rw in ['read', 'write', 'randwrite', 'randread', 'rw'] %}
+ {% for bs in ['4k', '64k', '1024k'] %}
+-
+ type: Fio
+ options:
+ filename: /home/ec2-user/data.raw
+ bs: {{bs}}
+ rw: {{rw}}
+ ramp_time: 10
+ duration: 20
+
+ host: fio.yardstick-TC005
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+ sla:
+ read_bw: 400
+ read_iops: 100
+ read_lat: 20000
+ write_bw: 400
+ write_iops: 100
+ write_lat: 20000
+ action: monitor
+ {% endfor %}
+{% endfor %}
+
+context:
+ name: yardstick-TC005
+ image: yardstick-trusty-server
+ flavor: m1.small
+ user: ec2-user
+
+ servers:
+ fio:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
new file mode 100644
index 000000000..3d4091293
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
@@ -0,0 +1,26 @@
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_throughput
+
+ options:
+ packet_size: 1280
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml
new file mode 100644
index 000000000..30d59f797
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml
@@ -0,0 +1,32 @@
+---
+# Sample benchmark task config file
+# vTC
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_throughput_noisy
+
+ options:
+ packet_size: 1280
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+ num_of_neighbours: 2
+ amount_of_ram: 1G
+ number_of_cores: 2
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
new file mode 100644
index 000000000..385e530ce
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
@@ -0,0 +1,58 @@
+---
+# Yardstick TC008 config file
+# Measure network throughput and packet loss using Pktgen.
+# Different amount of flows, from 2 up to 1001000, in combination
+# with different packet sizes are run in each test.
+# Each combination of packet size and flow amount is run 10 times.
+# First 10 times with the smallest packet size, starting with the
+# least amount of ports/flows, then next amount of ports with same
+# packet size, and so on. The test sequence continues with the next
+# packet size, with same ports/flows sequence as before.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+{% for pkt_size in [64, 128, 256, 512, 1024, 1280, 1518] %}
+ {% for num_ports in [1, 10, 50, 100, 500, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: {{pkt_size}}
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC008
+ target: poseidon.yardstick-TC008
+
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+ {% endfor %}
+{% endfor %}
+
+context:
+ name: yardstick-TC008
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml
new file mode 100644
index 000000000..4d46c0336
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc009.yaml
@@ -0,0 +1,53 @@
+---
+# Yardstick TC009 config file
+# Measure network throughput and packet loss using pktgen.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 10 times each. First 10 times with the least
+# amount of ports, then 10 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+{% for num_ports in [1, 10, 50, 100, 500, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC009
+ target: poseidon.yardstick-TC009
+
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC009
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml
new file mode 100644
index 000000000..42327f05f
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc010.yaml
@@ -0,0 +1,38 @@
+---
+# Yardstick TC010 config file
+# measure memory read latency using lmbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Lmbench
+ options:
+ test_type: "latency"
+ stride: 128
+ stop_size: 64.0
+
+ host: demeter.yardstick-TC010
+
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 1
+
+ sla:
+ max_latency: 30
+ action: monitor
+
+context:
+ name: yardstick-TC010
+ image: yardstick-trusty-server
+ flavor: m1.small
+ user: ec2-user
+
+ servers:
+ demeter:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
new file mode 100644
index 000000000..f1b995371
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
@@ -0,0 +1,32 @@
+---
+# Yardstick TC014 config file
+# Measure Processing speed using unixbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: UnixBench
+ options:
+ run_mode: 'verbose'
+ test_type: 'dhry2reg'
+ host: Chang'e.yardstick-TC014
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ name: yardstick-TC014
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ servers:
+ Chang'e:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24' \ No newline at end of file
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
new file mode 100644
index 000000000..181d7cd73
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc019.yaml
@@ -0,0 +1,38 @@
+---
+# Sample test case for the HA of controller node Openstack service
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "nova-api"
+ host: node1
+
+ wait_time: 10
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml
new file mode 100644
index 000000000..8d9edfe7b
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml
@@ -0,0 +1,31 @@
+---
+# Sample benchmark task config file
+# vTC
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_instantiation_validation
+
+ options:
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+# dummy context, will not be used by vTC
+context:
+ type: Node
+ name: LF
+ file: /etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml
new file mode 100644
index 000000000..c62ce2a32
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml
@@ -0,0 +1,28 @@
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_instantiation_validation_noisy
+
+ options:
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+ num_of_neighbours: 2
+ amount_of_ram: 1G
+ number_of_cores: 2
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml
new file mode 100644
index 000000000..9b5e86509
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml
@@ -0,0 +1,27 @@
+---
+# Yardstick TC027 config file
+# Measure IPV6 network latency using ping6
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping6
+ host: node1.IPV6
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+ run_step: 'setup,run,teardown'
+ sla:
+ max_rtt: 10
+ action: monitor
+
+
+context:
+ type: Node
+ name: IPV6
+ file: /root/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
+
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
new file mode 100644
index 000000000..a73dfee0a
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
@@ -0,0 +1,85 @@
+---
+# Yardstick TC037 config file
+# Measure network throughput and packet loss using pktgen.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 2 times each. First 2 times with the least
+# amount of ports, then 2 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements system load and network latency are
+# recorded/measured using ping and mpstat, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: demeter.yardstick-TC037
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: poseidon.yardstick-TC037
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC037
+ target: poseidon.yardstick-TC037
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC037
+ target: poseidon.yardstick-TC037
+
+ runner:
+ type: Iteration
+ iterations: 2
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC037
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
new file mode 100644
index 000000000..59608e312
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
@@ -0,0 +1,85 @@
+---
+# Yardstick TC038 config file
+# Measure network throughput and packet loss using pktgen.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 10 times each. First 10 times with the least
+# amount of ports, then 10 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements system load and network latency are
+# recorded/measured using ping and mpstat, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: demeter.yardstick-TC038
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: poseidon.yardstick-TC038
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC038
+ target: poseidon.yardstick-TC038
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC038
+ target: poseidon.yardstick-TC038
+
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC038
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
new file mode 100644
index 000000000..0a6dee656
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
@@ -0,0 +1,22 @@
+---
+# Yardstick TC040 config file
+# Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome
+
+schema: "yardstick:task:0.1"
+
+
+scenarios:
+-
+ type: Parser
+ options:
+ yangfile: /home/opnfv/repos/yardstick/samples/yang.yaml
+ toscafile: /home/opnfv/repos/yardstick//samples/tosca.yaml
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ type: Dummy
+
diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
index 8279d2378..04bac491f 100644
--- a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml
new file mode 100644
index 000000000..c3e68150d
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml
@@ -0,0 +1,18 @@
+---
+# ERICSSON POD2 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_ericsson_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
index e883f560f..ee13e6d9d 100644
--- a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml
new file mode 100644
index 000000000..1bb241ed8
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD2 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml
new file mode 100644
index 000000000..2ffacb1d0
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD5 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml
new file mode 100644
index 000000000..792bba2b0
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD6 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml
new file mode 100644
index 000000000..f10a854d2
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD8 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml
new file mode 100644
index 000000000..baade6987
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml
@@ -0,0 +1,18 @@
+---
+# LF POD 1 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_lf_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
index 4dece13f2..57c95cf69 100644
--- a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_vTC_daily.yaml b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml
new file mode 100644
index 000000000..37738b423
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml
@@ -0,0 +1,16 @@
+---
+# ERICSSON POD1 VTC daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_vTC_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc006.yaml
+-
+ file_name: opnfv_yardstick_tc007.yaml
+-
+ file_name: opnfv_yardstick_tc020.yaml
+-
+ file_name: opnfv_yardstick_tc021.yaml
diff --git a/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml
new file mode 100644
index 000000000..216648d6f
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml
@@ -0,0 +1,16 @@
+---
+# ERICSSON POD1 VTC weekly task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_vTC_weekly"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc006.yaml
+-
+ file_name: opnfv_yardstick_tc007.yaml
+-
+ file_name: opnfv_yardstick_tc020.yaml
+-
+ file_name: opnfv_yardstick_tc021.yaml
diff --git a/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml
new file mode 100644
index 000000000..8016b46b2
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml
@@ -0,0 +1,18 @@
+---
+# ZTE POD 1 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_zte_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/unit/benchmark/contexts/test_dummy.py b/tests/unit/benchmark/contexts/test_dummy.py
new file mode 100644
index 000000000..5214e6630
--- /dev/null
+++ b/tests/unit/benchmark/contexts/test_dummy.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.contexts.dummy
+
+import unittest
+
+from yardstick.benchmark.contexts import dummy
+
+
+class DummyContextTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.test_context = dummy.DummyContext()
+
+ def test__get_server(self):
+ self.test_context.init(None)
+ self.test_context.deploy()
+
+ result = self.test_context._get_server(None)
+ self.assertEqual(result, None)
+
+ self.test_context.undeploy()
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
new file mode 100644
index 000000000..340f94cb0
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+class ExecuteShellTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
+class AttackerBaremetalTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+
+ def test__attacker_baremetal_all_successful(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.inject_fault()
+ ins.recover()
+
+ def test__attacker_baremetal_check_failuer(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ mock_ssh.SSH().execute.return_value = (0, "error check", '')
+ ins.setup()
+
+ def test__attacker_baremetal_recover_successful(self, mock_ssh):
+
+ self.attacker_cfg["jump_host"] = 'node1'
+ self.context["node1"]["pwd"] = "123456"
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.recover()
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
new file mode 100644
index 000000000..eb0cce70d
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_process
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_process.ssh')
+class AttackerServiceTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.attacker_cfg = {
+ 'fault_type': 'kill-process',
+ 'process_name': 'nova-api',
+ 'host': 'node1',
+ }
+
+ def test__attacker_service_all_successful(self, mock_ssh):
+
+ cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
+ ins = cls(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.inject_fault()
+ ins.recover()
+
+ def test__attacker_service_check_failuer(self, mock_ssh):
+
+ cls = baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
+ ins = cls(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "error check", '')
+ ins.setup()
diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
new file mode 100644
index 000000000..13295273b
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor')
+class MonitorMgrTestCase(unittest.TestCase):
+
+ def setUp(self):
+ config = {
+ 'monitor_type': 'openstack-api',
+ }
+
+ self.monitor_configs = []
+ self.monitor_configs.append(config)
+
+ def test__MonitorMgr_setup_successful(self, mock_monitor):
+ instance = basemonitor.MonitorMgr()
+ instance.init_monitors(self.monitor_configs, None)
+ instance.start_monitors()
+ instance.wait_monitors()
+
+ ret = instance.verify_SLA()
+
+class BaseMonitorTestCase(unittest.TestCase):
+
+ class MonitorSimple(basemonitor.BaseMonitor):
+ __monitor_type__ = "MonitorForTest"
+ def setup(self):
+ self.monitor_result = False
+
+ def monitor_func(self):
+ return self.monitor_result
+
+ def setUp(self):
+ self.monitor_cfg = {
+ 'monitor_type': 'MonitorForTest',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 0.01,
+ 'sla': {'max_outage_time': 5}
+ }
+
+ def test__basemonitor_start_wait_successful(self):
+ ins = basemonitor.BaseMonitor(self.monitor_cfg, None)
+ ins.start_monitor()
+ ins.wait_monitor()
+
+
+ def test__basemonitor_all_successful(self):
+ ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins.setup()
+ ins.run()
+ ins.verify_SLA()
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing')
+ def test__basemonitor_func_false(self, mock_multiprocess):
+ ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins.setup()
+ mock_multiprocess.Event().is_set.return_value = False
+ ins.run()
+ ins.verify_SLA()
+
+ def test__basemonitor_getmonitorcls_successfule(self):
+ cls = None
+ try:
+ cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
+ except Exception:
+ pass
+ self.assertIsNone(cls)
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py
deleted file mode 100644
index 793871ca3..000000000
--- a/tests/unit/benchmark/scenarios/availability/test_monitor.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Unittest for yardstick.benchmark.scenarios.availability.monitor
-
-import mock
-import unittest
-
-from yardstick.benchmark.scenarios.availability import monitor
-
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess')
-class MonitorTestCase(unittest.TestCase):
-
- def test__fun_execute_shell_command_successful(self, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.return_value = (0, 'unittest')
- exitcode, output = monitor._execute_shell_command(cmd)
- self.assertEqual(exitcode, 0)
-
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.side_effect = RuntimeError
- exitcode, output = monitor._execute_shell_command(cmd)
- self.assertEqual(exitcode, -1)
-
- def test__fun_monitor_process_successful(self, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.return_value = (0, 'unittest')
- monitor._monitor_process(config, mock_queue, mock_event)
-
- def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.side_effect = RuntimeError
- monitor._monitor_process(config, mock_queue, mock_event)
-
- def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess):
- config = {
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.return_value = (-1, 'unittest')
- monitor._monitor_process(config, mock_queue, mock_event)
-
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing')
- def test_monitor_all_successful(self, mock_multip, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- p = monitor.Monitor()
- p.setup(config)
- mock_multip.Queue().get.return_value = 'started'
- p.start()
-
- result = "monitor unitest"
- mock_multip.Queue().get.return_value = result
- p.stop()
-
- ret = p.get_result()
-
- self.assertEqual(result, ret)
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
new file mode 100644
index 000000000..c8cda7dc7
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import monitor_command
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+class ExecuteShellTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = monitor_command._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = monitor_command._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+class MonitorOpenstackCmdTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.config = {
+ 'monitor_type': 'openstack-api',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 1,
+ 'sla': {'max_outage_time': 5}
+ }
+
+
+ def test__monitor_command_monitor_func_successful(self, mock_subprocess):
+
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ ret = instance.monitor_func()
+ self.assertEqual(ret, True)
+ instance._result = {"outage_time": 0}
+ instance.verify_SLA()
+
+ def test__monitor_command_monitor_func_failure(self, mock_subprocess):
+ mock_subprocess.check_output.return_value = (1, 'unittest')
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
+ mock_subprocess.check_output.side_effect = RuntimeError
+ ret = instance.monitor_func()
+ self.assertEqual(ret, False)
+ instance._result = {"outage_time": 10}
+ instance.verify_SLA()
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess):
+
+ self.config["host"] = "node1"
+ instance = monitor_command.MonitorOpenstackCmd(self.config, self.context)
+ instance.setup()
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ret = instance.monitor_func()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
new file mode 100644
index 000000000..dda104b4e
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import monitor_process
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh')
+class MonitorProcessTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.monitor_cfg = {
+ 'monitor_type': 'process',
+ 'process_name': 'nova-api',
+ 'host': "node1",
+ 'monitor_time': 1,
+ 'sla': {'max_recover_time': 5}
+ }
+
+ def test__monitor_process_all_successful(self, mock_ssh):
+
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ ins.monitor_func()
+ ins._result = {"outage_time": 0}
+ ins.verify_SLA()
+
+ def test__monitor_process_down_failuer(self, mock_ssh):
+
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ins.setup()
+ ins.monitor_func()
+ ins._result = {"outage_time": 10}
+ ins.verify_SLA()
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 861bacdc9..6e58b6e7a 100644
--- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -16,138 +16,58 @@ import unittest
from yardstick.benchmark.scenarios.availability import serviceha
-@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor')
+@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
class ServicehaTestCase(unittest.TestCase):
def setUp(self):
- self.args = {
- 'options':{
- 'component':'nova-api',
- 'fault_type':'stop-service',
- 'fault_time':0
- },
- 'sla':{
- 'outage_time':'2'
- }
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
}
- self.ctx = {
- 'host': {
- 'ip': '10.20.0.3',
- 'user': 'cirros',
- 'key_filename': 'mykey.key'
- }
+ self.ctx = {"nodes": {"node1": host}}
+ attacker_cfg = {
+ "fault_type": "kill-process",
+ "process_name": "nova-api",
+ "host": "node1"
}
+ attacker_cfgs = []
+ attacker_cfgs.append(attacker_cfg)
+ monitor_cfg = {
+ "monitor_cmd": "nova image-list",
+ "monitor_time": 0.1
+ }
+ monitor_cfgs = []
+ monitor_cfgs.append(monitor_cfg)
- def test__serviceha_setup_successful(self, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
-
- self.assertEqual(p.setup_done, True)
-
- def test__serviceha_setup_fail_service(self, mock_ssh):
-
- self.args['options']['component'] = 'error'
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
-
- self.assertEqual(p.setup_done, False)
-
- def test__serviceha_setup_fail_fault_type(self, mock_ssh):
-
- self.args['options']['fault_type'] = 'error'
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
-
- self.assertEqual(p.setup_done, False)
-
- def test__serviceha_setup_fail_check(self, mock_ssh):
-
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'error', '')
- p.setup()
-
- self.assertEqual(p.setup_done, False)
-
- def test__serviceha_setup_fail_script(self, mock_ssh):
+ options = {
+ "attackers": attacker_cfgs,
+ "monitors": monitor_cfgs
+ }
+ sla = {"outage_time": 5}
+ self.args = {"options": options, "sla": sla}
+ def test__serviceha_setup_run_successful(self, mock_attacker, mock_monitor):
p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (-1, 'false', '')
-
- self.assertRaises(RuntimeError, p.setup)
- self.assertEqual(p.setup_done, False)
-
- @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
- def test__serviceha_run_successful(self, mock_monitor, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
p.setup()
-
- monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0}
- mock_monitor.Monitor().get_result.return_value = monitor_result
-
- p.connection = mock_ssh.SSH()
- mock_ssh.SSH().execute.return_value = (0, 'success', '')
-
- result = {}
- p.run(result)
- self.assertEqual(result,{ 'outage_time': 0})
-
- def test__serviceha_run_fail_nosetup(self, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- p.run(None)
-
- @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
- def test__serviceha_run_fail_script(self, mock_monitor, mock_ssh):
+ self.assertEqual(p.setup_done, True)
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
+ ret = {}
+ p.run(ret)
+ p.teardown()
+"""
+ def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
-
- monitor_result = {'total_time': 5, 'outage_time': 0, 'total_count': 16, 'outage_count': 0}
- mock_monitor.Monitor().get_result.return_value = monitor_result
- p.connection = mock_ssh.SSH()
- mock_ssh.SSH().execute.return_value = (-1, 'error', '')
-
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
-
- @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
- def test__serviceha_run_fail_sla(self, mock_monitor, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
p.setup()
-
- monitor_result = {'total_time': 10, 'outage_time': 5, 'total_count': 16, 'outage_count': 0}
- mock_monitor.Monitor().get_result.return_value = monitor_result
-
- p.connection = mock_ssh.SSH()
- mock_ssh.SSH().execute.return_value = (0, 'success', '')
+ self.assertEqual(p.setup_done, True)
result = {}
- self.assertRaises(AssertionError, p.run, result)
-
- def test__serviceha_teardown_successful(self, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
- p.need_teardown = True
-
- mock_ssh.SSH().execute.return_value = (0, 'success', '')
- p.teardown()
-
- self.assertEqual(p.need_teardown, False)
-
- def test__serviceha_teardown_fail_script(self, mock_ssh):
- p = serviceha.ServiceHA(self.args, self.ctx)
- mock_ssh.SSH().execute.return_value = (0, 'running', '')
- p.setup()
- p.need_teardown = True
-
- mock_ssh.SSH().execute.return_value = (-1, 'false', '')
-
- self.assertRaises(RuntimeError, p.teardown)
+ result["outage_time"] = 10
+ mock_monitor.Monitor().get_result.return_value = result
+ ret = {}
+ self.assertRaises(AssertionError, p.run, ret)
+"""
diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index a87b39142..807429025 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -22,41 +22,65 @@ from yardstick.benchmark.scenarios.compute import cyclictest
class CyclictestTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.scenario_cfg = {
+ "host": "kvm.LF",
+ "setup_options": {
+ "rpm_dir": "/opt/rpm",
+ "host_setup_seqs": [
+ "host-setup0.sh",
+ "host-setup1.sh",
+ "host-run-qemu.sh"
+ ],
+ "script_dir": "/opt/scripts",
+ "image_dir": "/opt/image",
+ "guest_setup_seqs": [
+ "guest-setup0.sh",
+ "guest-setup1.sh"
+ ]
+ },
+ "sla": {
+ "action": "monitor",
+ "max_min_latency": 50,
+ "max_avg_latency": 100,
+ "max_max_latency": 1000
+ },
+ "options": {
+ "priority": 99,
+ "threads": 1,
+ "loops": 1000,
+ "affinity": 1,
+ "interval": 1000,
+ "histogram": 90
+ }
+ }
+ self.context_cfg = {
"host": {
- "ip": "192.168.50.28",
- "user": "root",
- "key_filename": "mykey.key"
+ "ip": "10.229.43.154",
+ "key_filename": "/yardstick/resources/files/yardstick_key",
+ "role": "BareMetal",
+ "name": "kvm.LF",
+ "user": "root"
}
}
def test_cyclictest_successful_setup(self, mock_ssh):
- c = cyclictest.Cyclictest({}, self.ctx)
- c.setup()
-
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
- self.assertIsNotNone(c.client)
+
+ c.setup()
+ self.assertIsNotNone(c.guest)
+ self.assertIsNotNone(c.host)
self.assertEqual(c.setup_done, True)
def test_cyclictest_successful_no_sla(self, mock_ssh):
-
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- args = {
- "options": options,
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.pop("sla", None)
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
-
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -65,29 +89,19 @@ class CyclictestTestCase(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_cyclictest_successful_sla(self, mock_ssh):
-
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- sla = {
- "max_min_latency": 100,
- "max_avg_latency": 500,
- "max_max_latency": 1000,
- }
- args = {
- "options": options,
- "sla": sla
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {
+ "action": "monitor",
+ "max_min_latency": 100,
+ "max_avg_latency": 500,
+ "max_max_latency": 1000
+ }
+ })
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
-
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -97,14 +111,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_min_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_min_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -112,14 +125,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_avg_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_avg_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -127,14 +139,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_max_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -142,27 +153,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- sla = {
- "max_min_latency": 100,
- "max_avg_latency": 500,
- "max_max_latency": 1000,
- }
- args = {
- "options": options,
- "sla": sla
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, result)
diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py
new file mode 100644
index 000000000..0935bcad2
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.compute import unixbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
+class UnixbenchTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "192.168.50.28",
+ "user": "root",
+ "key_filename": "mykey.key"
+ }
+ }
+
+ def test_unixbench_successful_setup(self, mock_ssh):
+
+ u = unixbench.Unixbench({}, self.ctx)
+ u.setup()
+
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ self.assertIsNotNone(u.client)
+ self.assertEqual(u.setup_done, True)
+
+ def test_unixbench_successful_no_sla(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ args = {
+ "options": options,
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+
+ sample_output = '{"Score":"4425.4"}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_unixbench_successful_in_quiet_mode(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'quiet',
+ "copies":1
+ }
+ args = {
+ "options": options,
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+
+ sample_output = '{"Score":"4425.4"}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+
+ def test_unixbench_successful_sla(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ sla = {
+ "single_score": '100',
+ "parallel_score": '500'
+ }
+ args = {
+ "options": options,
+ "sla": sla
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+
+ sample_output = '{"signle_score":"2251.7","parallel_score":"4395.9"}'
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+ u.run(result)
+ expected_result = json.loads(sample_output)
+ self.assertEqual(result, expected_result)
+
+ def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh):
+
+ args = {
+ "options": {},
+ "sla": {"single_score": "500"}
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+ sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
+
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, u.run, result)
+
+ def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
+
+ args = {
+ "options": {},
+ "sla": {"parallel_score": "4000"}
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+ sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
+
+ mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+ self.assertRaises(AssertionError, u.run, result)
+
+ def test_unixbench_unsuccessful_script_error(self, mock_ssh):
+
+ options = {
+ "test_type": 'dhry2reg',
+ "run_mode": 'verbose'
+ }
+ sla = {
+ "single_score": '100',
+ "parallel_score": '500'
+ }
+ args = {
+ "options": options,
+ "sla": sla
+ }
+ u = unixbench.Unixbench(args, self.ctx)
+ result = {}
+
+ u.server = mock_ssh.SSH()
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, u.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/dummy/__init__.py b/tests/unit/benchmark/scenarios/dummy/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/dummy/__init__.py
diff --git a/tests/unit/benchmark/scenarios/dummy/test_dummy.py b/tests/unit/benchmark/scenarios/dummy/test_dummy.py
new file mode 100644
index 000000000..1f9b729a9
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/dummy/test_dummy.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.dummy.dummy
+
+import unittest
+
+from yardstick.benchmark.scenarios.dummy import dummy
+
+
+class DummyTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.test_context = dummy.Dummy(None, None)
+
+ self.assertIsNone(self.test_context.scenario_cfg)
+ self.assertIsNone(self.test_context.context_cfg)
+ self.assertEqual(self.test_context.setup_done, False)
+
+ def test_run(self):
+ result = {}
+ self.test_context.run(result)
+
+ self.assertEqual(result["hello"], "yardstick")
+ self.assertEqual(self.test_context.setup_done, True)
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/tests/unit/benchmark/scenarios/networking/test_ping6.py
new file mode 100644
index 000000000..662b85c30
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import ping6
+
+
+class PingTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key",
+ 'password': "root"
+ },
+ }
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_pktgen_successful_setup(self, mock_ssh):
+
+ p = ping6.Ping6({}, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '0', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, True)
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_successful_no_sla(self, mock_ssh):
+
+ result = {}
+
+ p = ping6.Ping6({}, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ p.run(result)
+ self.assertEqual(result, {'rtt': 100.0})
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_successful_sla(self, mock_ssh):
+
+ args = {
+ 'sla': {'max_rtt': 150}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ p.run(result)
+ self.assertEqual(result, {'rtt': 100.0})
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_unsuccessful_sla(self, mock_ssh):
+
+ args = {
+ 'options': {'packetsize': 200},
+ 'sla': {'max_rtt': 50}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ self.assertRaises(AssertionError, p.run, result)
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_unsuccessful_script_error(self, mock_ssh):
+
+ args = {
+ 'options': {'packetsize': 200},
+ 'sla': {'max_rtt': 50}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, p.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
new file mode 100644
index 000000000..418dd39e6
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation
+
+
+class VtcInstantiationValidationTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+
+ self.vt = vtc_instantiation_validation.VtcInstantiationValidation(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
new file mode 100644
index 000000000..e0a46241c
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation_noisy
+
+
+class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+ scenario['options']['num_of_neighbours'] = '1'
+ scenario['options']['amount_of_ram'] = '1G'
+ scenario['options']['number_of_cores'] = '1'
+
+ self.vt = vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
new file mode 100644
index 000000000..ecdf555d2
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_throughput
+
+
+class VtcThroughputTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+
+ self.vt = vtc_throughput.VtcThroughput(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
new file mode 100644
index 000000000..98957b1de
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_throughput_noisy
+
+
+class VtcThroughputNoisyTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+ scenario['options']['num_of_neighbours'] = '1'
+ scenario['options']['amount_of_ram'] = '1G'
+ scenario['options']['number_of_cores'] = '1'
+
+ self.vt = vtc_throughput_noisy.VtcThroughputNoisy(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/parser/__init__.py b/tests/unit/benchmark/scenarios/parser/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/parser/__init__.py
diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py
new file mode 100644
index 000000000..d11a6d5c8
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/parser/test_parser.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.parser.Parser
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.parser import parser
+
+@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess')
+class ParserTestCase(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def test_parser_successful_setup(self, mock_subprocess):
+
+ p = parser.Parser({}, {})
+ mock_subprocess.call().return_value = 0
+ p.setup()
+ self.assertEqual(p.setup_done, True)
+
+ def test_parser_successful(self, mock_subprocess):
+ args = {
+ 'options': {'yangfile':'/root/yardstick/samples/yang.yaml',
+ 'toscafile':'/root/yardstick/samples/tosca.yaml'},
+ }
+ p = parser.Parser(args, {})
+ result = {}
+ mock_subprocess.call().return_value = 0
+ sample_output = '{"yangtotosca": "success"}'
+
+ p.run(result)
+ expected_result = json.loads(sample_output)
+
+ def test_parser_teardown_successful(self, mock_subprocess):
+
+ p = parser.Parser({}, {})
+ mock_subprocess.call().return_value = 0
+ p.teardown()
+ self.assertEqual(p.teardown_done, True)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/dispatcher/__init__.py b/tests/unit/dispatcher/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/dispatcher/__init__.py
diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py
new file mode 100644
index 000000000..5553c86a9
--- /dev/null
+++ b/tests/unit/dispatcher/test_influxdb.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.dispatcher.influxdb
+
+import mock
+import unittest
+
+from yardstick.dispatcher.influxdb import InfluxdbDispatcher
+
+class InfluxdbDispatcherTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.data1 = {
+ "runner_id": 8921,
+ "context_cfg": {
+ "host": {
+ "ip": "10.229.43.154",
+ "key_filename": "/root/yardstick/yardstick/resources/files/yardstick_key",
+ "name": "kvm.LF",
+ "user": "root"
+ },
+ "target": {
+ "ipaddr": "10.229.44.134"
+ }
+ },
+ "scenario_cfg": {
+ "runner": {
+ "interval": 1,
+ "object": "yardstick.benchmark.scenarios.networking.ping.Ping",
+ "output_filename": "/tmp/yardstick.out",
+ "runner_id": 8921,
+ "duration": 10,
+ "type": "Duration"
+ },
+ "host": "kvm.LF",
+ "type": "Ping",
+ "target": "10.229.44.134",
+ "sla": {
+ "action": "monitor",
+ "max_rtt": 10
+ },
+ "tc": "ping",
+ "task_id": "ea958583-c91e-461a-af14-2a7f9d7f79e7"
+ }
+ }
+ self.data2 = {
+ "benchmark": {
+ "timestamp": "1451478117.883505",
+ "errors": "",
+ "data": {
+ "rtt": 0.613
+ },
+ "sequence": 1
+ },
+ "runner_id": 8921
+ }
+ self.data3 ={
+ "benchmark": {
+ "data": {
+ "mpstat": {
+ "cpu0": {
+ "%sys": "0.00",
+ "%idle": "99.00"
+ },
+ "loadavg": [
+ "1.09",
+ "0.29"
+ ]
+ },
+ "rtt": "1.03"
+ }
+ }
+ }
+
+ def test_record_result_data_no_target(self):
+ influxdb = InfluxdbDispatcher(None)
+ influxdb.target = ''
+ self.assertEqual(influxdb.record_result_data(self.data1), -1)
+
+ def test_record_result_data_no_case_name(self):
+ influxdb = InfluxdbDispatcher(None)
+ self.assertEqual(influxdb.record_result_data(self.data2), -1)
+
+ @mock.patch('yardstick.dispatcher.influxdb.requests')
+ def test_record_result_data(self, mock_requests):
+ type(mock_requests.post.return_value).status_code = 204
+ influxdb = InfluxdbDispatcher(None)
+ self.assertEqual(influxdb.record_result_data(self.data1), 0)
+ self.assertEqual(influxdb.record_result_data(self.data2), 0)
+ self.assertEqual(influxdb.flush_result_data(), 0)
+
+ def test__dict_key_flatten(self):
+ line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
+ influxdb = InfluxdbDispatcher(None)
+ flattened_data = influxdb._dict_key_flatten(self.data3['benchmark']['data'])
+ result = ",".join([k+"="+v for k, v in flattened_data.items()])
+ self.assertEqual(result, line)
+
+ def test__get_nano_timestamp(self):
+ influxdb = InfluxdbDispatcher(None)
+ results = {'benchmark': {'timestamp': '1451461248.925574'}}
+ self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144')
+
+ @mock.patch('yardstick.dispatcher.influxdb.time')
+ def test__get_nano_timestamp_except(self, mock_time):
+ results = {}
+ influxdb = InfluxdbDispatcher(None)
+ mock_time.time.return_value = 1451461248.925574
+ self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144')
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/dispatcher/test_influxdb_line_protocol.py b/tests/unit/dispatcher/test_influxdb_line_protocol.py
new file mode 100644
index 000000000..cb05bf4d2
--- /dev/null
+++ b/tests/unit/dispatcher/test_influxdb_line_protocol.py
@@ -0,0 +1,55 @@
+# Unittest for yardstick.dispatcher.influxdb_line_protocol
+
+# yardstick comment: this file is a modified copy of
+# influxdb-python/influxdb/tests/test_line_protocol.py
+
+import unittest
+from yardstick.dispatcher.influxdb_line_protocol import make_lines
+
+
+class TestLineProtocol(unittest.TestCase):
+
+ def test_make_lines(self):
+ data = {
+ "tags": {
+ "empty_tag": "",
+ "none_tag": None,
+ "integer_tag": 2,
+ "string_tag": "hello"
+ },
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "string_val": "hello!",
+ "int_val": 1,
+ "float_val": 1.1,
+ "none_field": None,
+ "bool_val": True,
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ make_lines(data),
+ 'test,integer_tag=2,string_tag=hello '
+ 'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n'
+ )
+
+ def test_string_val_newline(self):
+ data = {
+ "points": [
+ {
+ "measurement": "m1",
+ "fields": {
+ "multi_line": "line1\nline1\nline3"
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ make_lines(data),
+ 'm1 multi_line="line1\\nline1\\nline3"\n'
+ )
diff --git a/tools/ubuntu-server-cloudimg-modify.sh b/tools/ubuntu-server-cloudimg-modify.sh
index 58fcd9277..11e6051cd 100755
--- a/tools/ubuntu-server-cloudimg-modify.sh
+++ b/tools/ubuntu-server-cloudimg-modify.sh
@@ -42,14 +42,21 @@ EOF
apt-get update
apt-get install -y \
fio \
+ git \
+ gcc \
iperf3 \
linux-tools-common \
linux-tools-generic \
lmbench \
+ make \
netperf \
+ patch \
+ perl \
rt-tests \
stress \
sysstat
+git clone https://github.com/kdlucas/byte-unixbench.git /opt/tempT
+make --directory /opt/tempT/UnixBench/
# restore symlink
ln -sf /run/resolvconf/resolv.conf /etc/resolv.conf
diff --git a/yardstick/__init__.py b/yardstick/__init__.py
index 0c25416bd..7114f8008 100644
--- a/yardstick/__init__.py
+++ b/yardstick/__init__.py
@@ -16,3 +16,10 @@ logging.basicConfig(
'%(levelname)s %(message)s', # noqa
datefmt='%m/%d/%y %H:%M:%S')
logging.getLogger(__name__).setLevel(logging.INFO)
+
+# Hack to be able to run apexlake unit tests
+# without having to install apexlake.
+import sys
+import os
+import yardstick.vTC.apexlake as apexlake
+sys.path.append(os.path.dirname(apexlake.__file__))
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
new file mode 100644
index 000000000..6901b2617
--- /dev/null
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+
+from yardstick.benchmark.contexts.base import Context
+
+
+LOG = logging.getLogger(__name__)
+
+
+class DummyContext(Context):
+ '''Class that handle dummy info'''
+
+ __context_type__ = "Dummy"
+
+ def __init__(self):
+ super(self.__class__, self).__init__()
+
+ def init(self, attrs):
+ pass
+
+ def deploy(self):
+ '''don't need to deploy'''
+ pass
+
+ def undeploy(self):
+ '''don't need to undeploy'''
+ pass
+
+ def _get_server(self, attr_name):
+ return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 7bd430bc5..8c514d250 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -129,8 +129,27 @@ class HeatContext(Context):
scheduler_hints = {}
for pg in server.placement_groups:
update_scheduler_hints(scheduler_hints, added_servers, pg)
- server.add_to_template(template, self.networks, scheduler_hints)
- added_servers.append(server.stack_name)
+ # workround for openstack nova bug, check JIRA: YARDSTICK-200
+ # for details
+ if len(availability_servers) == 2:
+ if len(scheduler_hints["different_host"]) == 0:
+ scheduler_hints.pop("different_host", None)
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
+ else:
+ scheduler_hints["different_host"] = \
+ scheduler_hints["different_host"][0]
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
+ else:
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
# create list of servers with affinity policy
affinity_servers = []
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index 04c8e7ca3..54ee076f4 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -83,12 +83,5 @@ class NodeContext(Context):
sys.exit(-1)
node = nodes[0]
-
- server = {
- "name": attr_name,
- "ip": node["ip"],
- "user": node["user"],
- "key_filename": node["key_filename"]
- }
-
- return server
+ node["name"] = attr_name
+ return node
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 4eab6643e..7e516ea1e 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -7,8 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-'''A runner that every run arithmetically steps a specified input value to
-the scenario. This just means a step value is added to the previous value.
+'''A runner that every run arithmetically steps specified input value(s) to
+the scenario. This just means step value(s) is added to the previous value(s).
+It is possible to combine several named input values and run with those either
+as nested for loops or combine each i:th index of each "input value list"
+until the end of the shortest list is reached (optimally all lists should be
+defined with the same number of values when using such iter_type).
'''
import os
@@ -16,6 +20,7 @@ import multiprocessing
import logging
import traceback
import time
+import itertools
from yardstick.benchmark.runners import base
@@ -30,16 +35,15 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
runner_cfg = scenario_cfg['runner']
interval = runner_cfg.get("interval", 1)
- arg_name = runner_cfg.get('name')
- stop = runner_cfg.get('stop')
- step = runner_cfg.get('step')
- options = scenario_cfg['options']
- start = options.get(arg_name, 0)
+ if 'options' in scenario_cfg:
+ options = scenario_cfg['options']
+ else: # options must be instatiated if not present in yaml
+ options = {}
+ scenario_cfg['options'] = options
runner_cfg['runner_id'] = os.getpid()
- LOG.info("worker START, step(%s, %d, %d, %d), class %s",
- arg_name, start, stop, step, cls)
+ LOG.info("worker START, class %s", cls)
benchmark = cls(scenario_cfg, context_cfg)
benchmark.setup()
@@ -52,18 +56,39 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
- margin = 1 if step > 0 else -1
- for value in range(start, stop+margin, step):
+ # To both be able to include the stop value and handle backwards stepping
+ margin = lambda start, stop: -1 if start > stop else 1
+
+ param_iters = \
+ [xrange(d['start'], d['stop'] + margin(d['start'], d['stop']),
+ d['step']) for d in runner_cfg['iterators']]
+ param_names = [d['name'] for d in runner_cfg['iterators']]
+
+ iter_type = runner_cfg.get("iter_type", "nested_for_loops")
+
+ if iter_type == 'nested_for_loops':
+ # Create a complete combination set of all parameter lists
+ loop_iter = itertools.product(*param_iters)
+ elif iter_type == 'tuple_loops':
+ # Combine each i;th index of respective parameter list
+ loop_iter = itertools.izip(*param_iters)
+ else:
+ LOG.warning("iter_type unrecognized: %s", iter_type)
+ raise
+
+ # Populate options and run the requested method for each value combination
+ for comb_values in loop_iter:
if aborted.is_set():
break
- options[arg_name] = value
-
LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
{"runner": runner_cfg["runner_id"], "sequence": sequence})
+ for i, value in enumerate(comb_values):
+ options[param_names[i]] = value
+
data = {}
errors = ""
@@ -99,7 +124,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sequence += 1
- if errors:
+ if (errors and sla_action is None):
break
benchmark.teardown()
@@ -107,29 +132,39 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
class ArithmeticRunner(base.Runner):
- '''Run a scenario arithmetically stepping an input value
+ '''Run a scenario arithmetically stepping input value(s)
Parameters
interval - time to wait between each scenario invocation
type: int
unit: seconds
default: 1 sec
- name - name of scenario option that will be increased for each invocation
+ iter_type: - Iteration type of input parameter(s): nested_for_loops
+ or tuple_loops
type: string
unit: na
- default: none
- start - value to use in first invocation of scenario
- type: int
- unit: na
- default: none
- step - value added to start value in next invocation of scenario
- type: int
- unit: na
- default: none
- stop - value indicating end of invocation
- type: int
- unit: na
- default: none
+ default: nested_for_loops
+ -
+ name - name of scenario option that will be increased for each invocation
+ type: string
+ unit: na
+ default: na
+ start - value to use in first invocation of scenario
+ type: int
+ unit: na
+ default: none
+ stop - value indicating end of invocation. Can be set to same
+ value as start for one single value.
+ type: int
+ unit: na
+ default: none
+ step - value added to start value in next invocation of scenario.
+ Must not be set to zero. Can be set negative if start > stop
+ type: int
+ unit: na
+ default: none
+ -
+ name - and so on......
'''
__execution_type__ = 'Arithmetic'
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index e38ed3749..c24957b1a 100755..100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -30,12 +30,15 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
interval = runner_cfg.get("interval", 1)
iterations = runner_cfg.get("iterations", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
LOG.info("worker START, iterations %d times, class %s", iterations, cls)
runner_cfg['runner_id'] = os.getpid()
benchmark = cls(scenario_cfg, context_cfg)
- benchmark.setup()
+ if "setup" in run_step:
+ benchmark.setup()
+
method = getattr(benchmark, method_name)
queue.put({'runner_id': runner_cfg['runner_id'],
@@ -45,53 +48,55 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
-
- while True:
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- data = {}
- errors = ""
-
- try:
- method(data)
- except AssertionError as assertion:
- # SLA validation failed in scenario, determine what to do now
- if sla_action == "assert":
- raise
- elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
- errors = assertion.args
- except Exception as e:
- errors = traceback.format_exc()
- LOG.exception(e)
-
- time.sleep(interval)
-
- benchmark_output = {
- 'timestamp': time.time(),
- 'sequence': sequence,
- 'data': data,
- 'errors': errors
- }
-
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- sequence += 1
-
- if (errors and sla_action is None) or \
- (sequence > iterations or aborted.is_set()):
- LOG.info("worker END")
- break
-
- benchmark.teardown()
+ if "run" in run_step:
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ time.sleep(interval)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ record = {'runner_id': runner_cfg['runner_id'],
+ 'benchmark': benchmark_output}
+
+ queue.put(record)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or \
+ (sequence > iterations or aborted.is_set()):
+ LOG.info("worker END")
+ break
+ if "teardown" in run_step:
+ benchmark.teardown()
class IterationRunner(base.Runner):
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index 47708fc5e..b5fae37ad 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -96,7 +96,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sequence += 1
- if errors or aborted.is_set():
+ if (errors and sla_action is None) or aborted.is_set():
break
benchmark.teardown()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/__init__.py b/yardstick/benchmark/scenarios/availability/attacker/__init__.py
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
new file mode 100644
index 000000000..b35869d07
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -0,0 +1,129 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import traceback
+import subprocess
+import yardstick.ssh as ssh
+from baseattacker import BaseAttacker
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command, stdin=None):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, stdin=stdin, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+class BaremetalAttacker(BaseAttacker):
+
+ __attacker_type__ = 'bare-metal-down'
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+ self.host_ip = ip
+
+ self.ipmi_ip = host.get("ipmi_ip", None)
+ self.ipmi_user = host.get("ipmi_user", "root")
+ self.ipmi_pwd = host.get("ipmi_pwd", None)
+
+ self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
+ self.check_script = self.get_script_fullpath(
+ self.fault_cfg['check_script'])
+ self.recovery_script = self.get_script_fullpath(
+ self.fault_cfg['recovery_script'])
+
+ if self.check():
+ self.setup_done = True
+
+ def check(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0} -W 10".format(self.host_ip),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("check ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not stdout or "running" not in stdout:
+ LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+ return False
+
+ return True
+
+ def inject_fault(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "shutdown -h now")
+ LOG.debug("inject fault ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not exit_status:
+ LOG.info("inject fault success")
+
+ def recover(self):
+ jump_host_name = self._config.get("jump_host", None)
+ self.jump_connection = None
+ if jump_host_name is not None:
+ host = self._context.get(jump_host_name, None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ pwd = host.get("pwd", None)
+
+ LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+ self.jump_connection = ssh.SSH(user, ip, password=pwd)
+ self.jump_connection.wait(timeout=600)
+ LOG.debug("ssh jump host success!")
+
+ if self.jump_connection is not None:
+ exit_status, stdout, stderr = self.jump_connection.execute(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+ else:
+ exit_status, stdout = _execute_shell_command(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+ ins = BaremetalAttacker(attacker_cfg, context)
+ ins.setup()
+ ins.inject_fault()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
new file mode 100644
index 000000000..5118ad628
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -0,0 +1,67 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from baseattacker import BaseAttacker
+import yardstick.ssh as ssh
+
+LOG = logging.getLogger(__name__)
+
+
+class ProcessAttacker(BaseAttacker):
+
+ __attacker_type__ = 'kill-process'
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.service_name = self._config['process_name']
+ self.fault_cfg = BaseAttacker.attacker_cfgs.get('kill-process')
+
+ self.check_script = self.get_script_fullpath(
+ self.fault_cfg['check_script'])
+ self.inject_script = self.get_script_fullpath(
+ self.fault_cfg['inject_script'])
+ self.recovery_script = self.get_script_fullpath(
+ self.fault_cfg['recovery_script'])
+
+ if self.check():
+ self.setup_done = True
+
+ def check(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.service_name),
+ stdin=open(self.check_script, "r"))
+
+ if stdout and "running" in stdout:
+ LOG.info("check the envrioment success!")
+ return True
+ else:
+ LOG.error(
+ "the host envrioment is error, stdout:%s, stderr:%s" %
+ (stdout, stderr))
+ return False
+
+ def inject_fault(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.service_name),
+ stdin=open(self.inject_script, "r"))
+
+ def recover(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0} ".format(self.service_name),
+ stdin=open(self.recovery_script, "r"))
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
new file mode 100644
index 000000000..a1c6999e5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -0,0 +1,47 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import yaml
+import logging
+import os
+
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+attacker_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "attacker_conf.yaml")
+
+
+class BaseAttacker(object):
+
+ attacker_cfgs = {}
+
+ def __init__(self, config, context):
+ if not BaseAttacker.attacker_cfgs:
+ with open(attacker_conf_path) as stream:
+ BaseAttacker.attacker_cfgs = yaml.load(stream)
+
+ self._config = config
+ self._context = context
+ self.setup_done = False
+
+ @staticmethod
+ def get_attacker_cls(attacker_cfg):
+ '''return attacker instance of specified type'''
+ attacker_type = attacker_cfg['fault_type']
+ for attacker_cls in utils.itersubclasses(BaseAttacker):
+ if attacker_type == attacker_cls.__attacker_type__:
+ return attacker_cls
+ raise RuntimeError("No such runner_type %s" % attacker_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(attacker_conf_path)
+ return os.path.join(base_path, path)
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
new file mode 100644
index 000000000..3f6c2aa8f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -0,0 +1,13 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+kill-process:
+ check_script: ha_tools/check_process_python.bash
+ inject_script: ha_tools/fault_process_kill.bash
+ recovery_script: ha_tools/start_service.bash
+
+bare-metal-down:
+ check_script: ha_tools/check_host_ping.bash
+ recovery_script: ha_tools/ipmi_power.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
new file mode 100755
index 000000000..0f160e2a8
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check wether the host is running
+
+set -e
+
+host_ip=$1
+shift
+options="$@"
+
+ping -c 1 $options $host_ip | grep ttl | wc -l
+EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ exit 1
+else
+ echo "running"
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
new file mode 100755
index 000000000..83d7e36c1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a openstack command
+
+set -e
+
+cmd=$1
+
+source /root/openrc
+
+exec $cmd
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
new file mode 100755
index 000000000..88baed7d9
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+process_name=$1
+
+ps aux | grep -e .*python.*$process_name.* | grep -v grep | wc -l
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
new file mode 100755
index 000000000..d0e2f1683
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+process_name=$1
+
+killall -9 $process_name
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml b/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml
deleted file mode 100644
index 67e56eb4f..000000000
--- a/yardstick/benchmark/scenarios/availability/ha_tools/ha_conf.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# sample config file for ha test
-#
-schema: "yardstick:task:0.1"
-
-nova-api:
--
- type: stop-service
- inject_script: ha_tools/stop_service.bash
- recovery_script: ha_tools/start_service.bash
- check_script: ha_tools/check_service.bash
- monitor_cmd: nova image-list
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
new file mode 100755
index 000000000..ea621facd
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop a service and check the service is stoped
+
+set -e
+
+ipmi_ip=$1
+ipmi_user=$2
+ipmi_pwd=$3
+
+action=$4
+ipmitool -I lanplus -H $ipmi_ip -U $ipmi_user -P $ipmi_pwd power $action
diff --git a/yardstick/benchmark/scenarios/availability/monitor.py b/yardstick/benchmark/scenarios/availability/monitor.py
deleted file mode 100755
index 3193d3304..000000000
--- a/yardstick/benchmark/scenarios/availability/monitor.py
+++ /dev/null
@@ -1,114 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import logging
-import multiprocessing
-import subprocess
-import traceback
-import time
-
-LOG = logging.getLogger(__name__)
-
-
-def _execute_shell_command(command):
- '''execute shell script with error handling'''
- exitcode = 0
- output = []
- try:
- output = subprocess.check_output(command, shell=True)
- except Exception:
- exitcode = -1
- output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n " % command)
- LOG.error(traceback.format_exc())
-
- return exitcode, output
-
-
-def _monitor_process(config, queue, event):
-
- total_time = 0
- outage_time = 0
- total_count = 0
- outage_count = 0
- first_outage = 0
- last_outage = 0
-
- wait_time = config.get("duration", 0)
- cmd = config.get("monitor_cmd", None)
- if cmd is None:
- LOG.error("There are no monitor cmd!")
- return
-
- queue.put("started")
-
- begin_time = time.time()
- while True:
-
- total_count = total_count + 1
-
- one_check_begin_time = time.time()
- exit_status, stdout = _execute_shell_command(cmd)
- one_check_end_time = time.time()
-
- LOG.info("the exit_status:%s stdout:%s" % (exit_status, stdout))
- if exit_status:
- outage_count = outage_count + 1
-
- outage_time = outage_time + (
- one_check_end_time - one_check_begin_time)
-
- if not first_outage:
- first_outage = one_check_begin_time
-
- last_outage = one_check_end_time
-
- if event.is_set():
- LOG.debug("the monitor process stop")
- break
-
- if wait_time > 0:
- time.sleep(wait_time)
-
- end_time = time.time()
- total_time = end_time - begin_time
-
- queue.put({"total_time": total_time,
- "outage_time": last_outage-first_outage,
- "total_count": total_count,
- "outage_count": outage_count})
-
-
-class Monitor:
-
- def __init__(self):
- self._result = []
- self._monitor_process = []
-
- def setup(self, config):
- self._config = config
-
- def start(self):
- self._queue = multiprocessing.Queue()
- self._event = multiprocessing.Event()
- self._monitor_process = multiprocessing.Process(
- target=_monitor_process, name="Monitor",
- args=(self._config, self._queue, self._event))
-
- self._monitor_process.start()
- ret = self._queue.get()
- if ret == "started":
- LOG.debug("monitor process started!")
-
- def stop(self):
- self._event.set()
- self._result = self._queue.get()
- LOG.debug("stop the monitor process. the result:%s" % self._result)
-
- def get_result(self):
- return self._result
diff --git a/yardstick/benchmark/scenarios/availability/monitor/__init__.py b/yardstick/benchmark/scenarios/availability/monitor/__init__.py
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
new file mode 100644
index 000000000..983c3a3ac
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import multiprocessing
+import time
+import os
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+monitor_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "monitor_conf.yaml")
+
+
+class MonitorMgr(object):
+ """docstring for MonitorMgr"""
+ def __init__(self):
+ self._monitor_list = []
+
+ def init_monitors(self, monitor_cfgs, context):
+ LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+
+ for monitor_cfg in monitor_cfgs:
+ monitor_type = monitor_cfg["monitor_type"]
+ monitor_cls = BaseMonitor.get_monitor_cls(monitor_type)
+ monitor_ins = monitor_cls(monitor_cfg, context)
+
+ self._monitor_list.append(monitor_ins)
+
+ def start_monitors(self):
+ for _monotor_instace in self._monitor_list:
+ _monotor_instace.start_monitor()
+
+ def wait_monitors(self):
+ for monitor in self._monitor_list:
+ monitor.wait_monitor()
+
+ def verify_SLA(self):
+ sla_pass = True
+ for monitor in self._monitor_list:
+ sla_pass = sla_pass & monitor.verify_SLA()
+ return sla_pass
+
+
+class BaseMonitor(multiprocessing.Process):
+ """docstring for BaseMonitor"""
+
+ def __init__(self, config, context):
+ multiprocessing.Process.__init__(self)
+ self._config = config
+ self._context = context
+ self._queue = multiprocessing.Queue()
+ self._event = multiprocessing.Event()
+ self.setup_done = False
+
+ @staticmethod
+ def get_monitor_cls(monitor_type):
+ '''return monitor class of specified type'''
+
+ for monitor in utils.itersubclasses(BaseMonitor):
+ if monitor_type == monitor.__monitor_type__:
+ return monitor
+ raise RuntimeError("No such monitor_type %s" % monitor_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(monitor_conf_path)
+ return os.path.join(base_path, path)
+
+ def run(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+
+ self.setup()
+ monitor_time = self._config.get("monitor_time", 0)
+
+ total_time = 0
+ outage_time = 0
+ total_count = 0
+ outage_count = 0
+ first_outage = 0
+ last_outage = 0
+
+ begin_time = time.time()
+ while True:
+ total_count = total_count + 1
+
+ one_check_begin_time = time.time()
+ exit_status = self.monitor_func()
+ one_check_end_time = time.time()
+
+ if exit_status is False:
+ outage_count = outage_count + 1
+
+ outage_time = outage_time + (
+ one_check_end_time - one_check_begin_time)
+
+ if not first_outage:
+ first_outage = one_check_begin_time
+
+ last_outage = one_check_end_time
+
+ if self._event.is_set():
+ LOG.debug("the monitor process stop")
+ break
+
+ if one_check_end_time - begin_time > monitor_time:
+ LOG.debug("the monitor max_time finished and exit!")
+ break
+
+ end_time = time.time()
+ total_time = end_time - begin_time
+
+ self._queue.put({"total_time": total_time,
+ "outage_time": last_outage-first_outage,
+ "total_count": total_count,
+ "outage_count": outage_count})
+
+ def start_monitor(self):
+ self.start()
+
+ def wait_monitor(self):
+ self.join()
+ self._result = self._queue.get()
+ LOG.debug("the monitor result:%s" % self._result)
+
+ def setup(self): # pragma: no cover
+ pass
+
+ def monitor_func(self): # pragma: no cover
+ pass
+
+ def verify_SLA(self):
+ pass
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
new file mode 100644
index 000000000..c285024e1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -0,0 +1,108 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import subprocess
+import traceback
+import yardstick.ssh as ssh
+import basemonitor as basemonitor
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+class MonitorOpenstackCmd(basemonitor.BaseMonitor):
+ """docstring for MonitorApi"""
+
+ __monitor_type__ = "openstack-cmd"
+
+ def setup(self):
+ self.connection = None
+ node_name = self._config.get("host", None)
+ if node_name:
+ host = self._context[node_name]
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.check_script = self.get_script_fullpath(
+ "ha_tools/check_openstack_cmd.bash")
+
+ self.cmd = self._config["command_name"]
+
+ def monitor_func(self):
+ exit_status = 0
+ if self.connection:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s '{0}'".format(self.cmd),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
+ (exit_status, stdout, stderr))
+ else:
+ exit_status, stdout = _execute_shell_command(self.cmd)
+ if exit_status:
+ return False
+ return True
+
+ def verify_SLA(self):
+ outage_time = self._result.get('outage_time', None)
+ LOG.debug("the _result:%s" % self._result)
+ max_outage_time = self._config["sla"]["max_outage_time"]
+ if outage_time > max_outage_time:
+ LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ return False
+ else:
+ LOG.info("the sla is passed")
+ return True
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ip": "192.168.235.22",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ monitor_configs = []
+ config = {
+ 'monitor_type': 'openstack-cmd',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 1,
+ 'host': 'node1',
+ 'sla': {'max_outage_time': 5}
+ }
+ monitor_configs.append(config)
+
+ p = basemonitor.MonitorMgr()
+ p.init_monitors(monitor_configs, context)
+ p.start_monitors()
+ p.wait_monitors()
+ p.verify_SLA()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
new file mode 100644
index 000000000..53a6d8e4d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -0,0 +1,81 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import yardstick.ssh as ssh
+
+import basemonitor as basemonitor
+
+LOG = logging.getLogger(__name__)
+
+
+class MonitorProcess(basemonitor.BaseMonitor):
+ """docstring for MonitorApi"""
+
+ __monitor_type__ = "process"
+
+ def setup(self):
+ host = self._context[self._config["host"]]
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+ self.check_script = self.get_script_fullpath(
+ "ha_tools/check_process_python.bash")
+ self.process_name = self._config["process_name"]
+
+ def monitor_func(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.process_name),
+ stdin=open(self.check_script, "r"))
+ if not stdout or int(stdout) <= 0:
+ LOG.info("the process (%s) is not running!" % self.process_name)
+ return False
+
+ return True
+
+ def verify_SLA(self):
+ LOG.debug("the _result:%s" % self._result)
+ outage_time = self._result.get('outage_time', None)
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ if outage_time > max_outage_time:
+ LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ return False
+ else:
+ return True
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ monitor_configs = []
+ config = {
+ 'monitor_type': 'process',
+ 'process_name': 'nova-api',
+ 'host': "node1",
+ 'monitor_time': 1,
+ 'sla': {'max_recover_time': 5}
+ }
+ monitor_configs.append(config)
+
+ p = basemonitor.MonitorMgr()
+ p.init_monitors(monitor_configs, context)
+ p.start_monitors()
+ p.wait_monitors()
+ p.verify_SLA()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 3e03e1da5..aee94ee09 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -6,13 +6,10 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import pkg_resources
import logging
-import time
-import yaml
-import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base
-from yardstick.benchmark.scenarios.availability import monitor
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
LOG = logging.getLogger(__name__)
@@ -22,86 +19,34 @@ class ServiceHA(base.Scenario):
"""
__scenario_type__ = "ServiceHA"
- HA_CONF = "ha_tools/ha_conf.yaml"
-
def __init__(self, scenario_cfg, context_cfg):
+ LOG.debug(
+ "scenario_cfg:%s context_cfg:%s" %
+ (scenario_cfg, context_cfg))
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.service_name = scenario_cfg["options"]["component"]
- self.fault_type = scenario_cfg["options"]["fault_type"]
- self.fault_time = scenario_cfg["options"].get("fault_time", 0)
- self.fault_cfg = None
self.setup_done = False
- self.need_teardown = False
def setup(self):
'''scenario setup'''
- self.ha_conf_file = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability",
- ServiceHA.HA_CONF)
- ha_cfg = []
- with open(self.ha_conf_file) as stream:
- ha_cfg = yaml.load(stream)
- LOG.debug("ha_cfg content:%s" % ha_cfg)
-
- # check the ha_conf contains the service defined in test cases yaml
- service_cfg = ha_cfg.get(self.service_name, None)
- if not service_cfg:
- LOG.error(
- "The component %s can not be supported!" % self.service_name)
+ nodes = self.context_cfg.get("nodes", None)
+ if nodes is None:
+ LOG.error("the nodes info is none")
return
- for fault in service_cfg:
- if fault["type"] == self.fault_type:
- self.fault_cfg = fault
- break
- if not self.fault_cfg:
- LOG.error(
- "The fualt_type %s can not be supproted!" % self.fault_type)
- return
- LOG.debug("the fault_cfg :%s" % self.fault_cfg)
-
- self.fault_script = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability",
- self.fault_cfg["inject_script"])
- self.recovery_script = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability",
- self.fault_cfg["recovery_script"])
- self.check_script = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability",
- self.fault_cfg["check_script"])
-
- host = self.context_cfg.get("host", None)
- ip = host.get("ip", None)
- user = host.get("user", "root")
- key_filename = host.get("key_filename", "~/.ssh/id_rsa")
- LOG.info("The host: %s the service: %s" % (ip, self.service_name))
- LOG.debug("The params, host:%s fault_cfg:%s" % (host, self.fault_cfg))
+ self.attackers = []
+ attacker_cfgs = self.scenario_cfg["options"]["attackers"]
+ for attacker_cfg in attacker_cfgs:
+ attacker_cls = baseattacker.BaseAttacker.get_attacker_cls(
+ attacker_cfg)
+ attacker_ins = attacker_cls(attacker_cfg, nodes)
+ attacker_ins.setup()
+ self.attackers.append(attacker_ins)
- LOG.debug(
- "ssh connection ip:%s, user:%s, key_file:%s",
- ip, user, key_filename)
- self.connection = ssh.SSH(user, ip, key_filename=key_filename)
- self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
-
- # check the host envrioment
- exit_status, stdout, stderr = self.connection.execute(
- "/bin/sh -s {0}".format(self.service_name),
- stdin=open(self.check_script, "r"))
- LOG.info(
- "the exit_status:%s stdout:%s stderr:%s" %
- (exit_status, stdout, stderr))
- if exit_status:
- raise RuntimeError(stderr)
-
- if stdout and "running" in stdout:
- LOG.info("check the envrioment success!")
- else:
- LOG.error(
- "the host envrioment is error, stdout:%s, stderr:%s" %
- (stdout, stderr))
- return
+ monitor_cfgs = self.scenario_cfg["options"]["monitors"]
+
+ self.monitorMgr = basemonitor.MonitorMgr()
+ self.monitorMgr.init_monitors(monitor_cfgs, nodes)
self.setup_done = True
@@ -111,69 +56,51 @@ class ServiceHA(base.Scenario):
LOG.error("The setup not finished!")
return
- monitorInstance = monitor.Monitor()
- monitorInstance.setup(self.fault_cfg)
- monitorInstance.start()
+ self.monitorMgr.start_monitors()
LOG.info("monitor start!")
- LOG.info("Inject fault!")
- exit_status, stdout, stderr = self.connection.execute(
- "/bin/sh -s {0}".format(self.service_name),
- stdin=open(self.fault_script, "r"))
-
- if exit_status != 0:
- monitorInstance.stop()
- raise RuntimeError(stderr)
+ for attacker in self.attackers:
+ attacker.inject_fault()
- self.need_teardown = True
- time.sleep(self.fault_time)
-
- monitorInstance.stop()
+ self.monitorMgr.wait_monitors()
LOG.info("monitor stop!")
- ret = monitorInstance.get_result()
- LOG.info("The monitor result:%s" % ret)
- outage_time = ret.get("outage_time")
- result["outage_time"] = outage_time
- LOG.info("the result:%s" % result)
-
- if "sla" in self.scenario_cfg:
- sla_outage_time = int(self.scenario_cfg["sla"]["outage_time"])
- assert outage_time <= sla_outage_time, "outage_time %f > sla:outage_time(%f)" % \
- (outage_time, sla_outage_time)
+ sla_pass = self.monitorMgr.verify_SLA()
+ assert sla_pass is True, "the test cases is not pass the SLA"
return
def teardown(self):
'''scenario teardown'''
- LOG.info("recory the everiment!")
-
- if self.need_teardown:
- exit_status, stdout, stderr = self.connection.execute(
- "/bin/sh -s {0} ".format(self.service_name),
- stdin=open(self.recovery_script, "r"))
+ for attacker in self.attackers:
+ attacker.recover()
- if exit_status:
- raise RuntimeError(stderr)
- else:
- self.need_teardown = False
-"""
-def _test():
+def _test(): # pragma: no cover
'''internal test function'''
host = {
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
- ctx = {"host": host}
-
- logger = logging.getLogger("yardstick")
- logger.setLevel(logging.DEBUG)
+ ctx = {"nodes": {"node1": host}}
+ attacker_cfg = {
+ "fault_type": "kill-process",
+ "process_name": "nova-api",
+ "host": "node1"
+ }
+ attacker_cfgs = []
+ attacker_cfgs.append(attacker_cfg)
+ monitor_cfg = {
+ "monitor_cmd": "nova image-list"
+ }
+ monitor_cfgs = []
+ monitor_cfgs.append(monitor_cfg)
options = {
- "component": "nova-api",
- "fault_type": "stop-service"
+ "attackers": attacker_cfgs,
+ "wait_time": 10,
+ "monitors": monitor_cfgs
}
sla = {"outage_time": 5}
args = {"options": options, "sla": sla}
@@ -188,6 +115,5 @@ def _test():
terstInstance.teardown()
-if __name__ == '__main__':
+if __name__ == '__main__': # pragma: no cover
_test()
-"""
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index e8fc63cf7..478b0a1a2 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -9,6 +9,9 @@
import pkg_resources
import logging
import json
+import re
+import time
+import os
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base
@@ -53,30 +56,104 @@ class Cyclictest(base.Scenario):
__scenario_type__ = "Cyclictest"
TARGET_SCRIPT = "cyclictest_benchmark.bash"
+ WORKSPACE = "/root/workspace/"
+ REBOOT_CMD_PATTERN = r";\s*reboot\b"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
- def setup(self):
- '''scenario setup'''
- self.target_script = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.compute",
- Cyclictest.TARGET_SCRIPT)
+ def _put_files(self, client):
+ setup_options = self.scenario_cfg["setup_options"]
+ rpm_dir = setup_options["rpm_dir"]
+ script_dir = setup_options["script_dir"]
+ image_dir = setup_options["image_dir"]
+ LOG.debug("Send RPMs from %s to workspace %s" %
+ (rpm_dir, self.WORKSPACE))
+ client.put(rpm_dir, self.WORKSPACE, recursive=True)
+ LOG.debug("Send scripts from %s to workspace %s" %
+ (script_dir, self.WORKSPACE))
+ client.put(script_dir, self.WORKSPACE, recursive=True)
+ LOG.debug("Send guest image from %s to workspace %s" %
+ (image_dir, self.WORKSPACE))
+ client.put(image_dir, self.WORKSPACE, recursive=True)
+
+ def _connect_host(self):
+ host = self.context_cfg["host"]
+ user = host.get("user", "root")
+ ip = host.get("ip", None)
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ LOG.debug("user:%s, host:%s", user, ip)
+ self.host = ssh.SSH(user, ip, key_filename=key_filename)
+ self.host.wait(timeout=600)
+
+ def _connect_guest(self):
host = self.context_cfg["host"]
user = host.get("user", "root")
ip = host.get("ip", None)
key_filename = host.get("key_filename", "~/.ssh/id_rsa")
LOG.debug("user:%s, host:%s", user, ip)
- print "key_filename:" + key_filename
- self.client = ssh.SSH(user, ip, key_filename=key_filename)
- self.client.wait(timeout=600)
+ self.guest = ssh.SSH(user, ip, port=5555, key_filename=key_filename)
+ self.guest.wait(timeout=600)
+
+ def _run_setup_cmd(self, client, cmd):
+ LOG.debug("Run cmd: %s" % cmd)
+ status, stdout, stderr = client.execute(cmd)
+ if status:
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ LOG.debug("Error on reboot")
+ else:
+ raise RuntimeError(stderr)
+
+ def _run_host_setup_scripts(self, scripts):
+ setup_options = self.scenario_cfg["setup_options"]
+ script_dir = os.path.basename(setup_options["script_dir"])
+
+ for script in scripts:
+ cmd = "cd %s/%s; export PATH=./:$PATH; %s" %\
+ (self.WORKSPACE, script_dir, script)
+ self._run_setup_cmd(self.host, cmd)
+
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ time.sleep(3)
+ self._connect_host()
+
+ def _run_guest_setup_scripts(self, scripts):
+ setup_options = self.scenario_cfg["setup_options"]
+ script_dir = os.path.basename(setup_options["script_dir"])
+
+ for script in scripts:
+ cmd = "cd %s/%s; export PATH=./:$PATH; %s" %\
+ (self.WORKSPACE, script_dir, script)
+ self._run_setup_cmd(self.guest, cmd)
+
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ time.sleep(3)
+ self._connect_guest()
+
+ def setup(self):
+ '''scenario setup'''
+ setup_options = self.scenario_cfg["setup_options"]
+ host_setup_seqs = setup_options["host_setup_seqs"]
+ guest_setup_seqs = setup_options["guest_setup_seqs"]
+
+ self._connect_host()
+ self._put_files(self.host)
+ self._run_host_setup_scripts(host_setup_seqs)
+
+ self._connect_guest()
+ self._put_files(self.guest)
+ self._run_guest_setup_scripts(guest_setup_seqs)
# copy script to host
- self.client.run("cat > ~/cyclictest_benchmark.sh",
- stdin=open(self.target_script, "rb"))
+ self.target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Cyclictest.TARGET_SCRIPT)
+ self.guest.run("cat > ~/cyclictest_benchmark.sh",
+ stdin=open(self.target_script, "rb"))
self.setup_done = True
@@ -98,9 +175,9 @@ class Cyclictest(base.Scenario):
cmd_args = "-a %s -i %s -p %s -l %s -t %s -h %s %s" \
% (affinity, interval, priority, loops,
threads, histogram, default_args)
- cmd = "sudo bash cyclictest_benchmark.sh %s" % (cmd_args)
+ cmd = "bash cyclictest_benchmark.sh %s" % (cmd_args)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, stderr = self.guest.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -121,7 +198,7 @@ class Cyclictest(base.Scenario):
assert sla_error == "", sla_error
-def _test():
+def _test(): # pragma: no cover
'''internal test function'''
key_filename = pkg_resources.resource_filename("yardstick.resources",
"files/yardstick_key")
@@ -159,5 +236,5 @@ def _test():
cyclictest.run(result)
print result
-if __name__ == '__main__':
+if __name__ == '__main__': # pragma: no cover
_test()
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index b9adf5079..e15fe7eb4 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -31,9 +31,9 @@ class Lmbench(base.Scenario):
unit: bytes
default: 128
stop_size - maximum array size to test (minimum value is 0.000512)
- type: int
+ type: float
unit: megabytes
- default: 16
+ default: 16.0
Results are accurate to the ~2-5 nanosecond range.
@@ -98,8 +98,8 @@ class Lmbench(base.Scenario):
if test_type == 'latency':
stride = options.get('stride', 128)
- stop_size = options.get('stop_size', 16)
- cmd = "sudo bash lmbench_latency.sh %d %d" % (stop_size, stride)
+ stop_size = options.get('stop_size', 16.0)
+ cmd = "sudo bash lmbench_latency.sh %f %d" % (stop_size, stride)
elif test_type == 'bandwidth':
size = options.get('size', 128)
benchmark = options.get('benchmark', 'rd')
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
new file mode 100644
index 000000000..e6318b92e
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -0,0 +1,156 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Unixbench(base.Scenario):
+ """Execute Unixbench cpu benchmark in a host
+ The Run script takes a number of options which you can use to customise a
+ test, and you can specify the names of the tests to run. The full usage
+ is:
+
+ Run [ -q | -v ] [-i <n> ] [-c <n> [-c <n> ...]] [test ...]
+
+ -i <count> Run <count> iterations for each test -- slower tests
+ use <count> / 3, but at least 1. Defaults to 10 (3 for
+ slow tests).
+ -c <n> Run <n> copies of each test in parallel.
+
+ Parameters for setting unixbench
+ run_mode - Run in quiet mode or verbose mode
+ type: string
+ unit: None
+ default: None
+ test_type - The available tests are organised into categories;
+ type: string
+ unit: None
+ default: None
+ iterations - Run <count> iterations for each test -- slower tests
+ use <count> / 3, but at least 1. Defaults to 10 (3 for slow tests).
+ type: int
+ unit: None
+ default: None
+ copies - Run <n> copies of each test in parallel.
+ type: int
+ unit: None
+ default: None
+
+ more info https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench
+ """
+ __scenario_type__ = "UnixBench"
+
+ TARGET_SCRIPT = "unixbench_benchmark.bash"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Unixbench.TARGET_SCRIPT)
+
+ host = self.context_cfg["host"]
+ user = host.get("user", "ubuntu")
+ ip = host.get("ip", None)
+ key_filename = host.get('key_filename', "~/.ssh/id_rsa")
+
+ LOG.info("user:%s, host:%s", user, ip)
+ self.client = ssh.SSH(user, ip, key_filename=key_filename)
+ self.client.wait(timeout=600)
+
+ # copy scripts to host
+ self.client.run("cat > ~/unixbench_benchmark.sh",
+ stdin=open(self.target_script, 'rb'))
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+
+ if not self.setup_done:
+ self.setup()
+
+ options = self.scenario_cfg["options"]
+
+ run_mode = options.get("run_mode", None)
+ LOG.debug("Executing run_mode: %s", run_mode)
+ cmd_args = ""
+ if run_mode == "quiet":
+ cmd_args = "-q"
+ elif run_mode == "verbose":
+ cmd_args = "-v"
+
+ option_pair_list = [("iterations", "-i"),
+ ("copies", "-c")]
+ for option_pair in option_pair_list:
+ if option_pair[0] in options:
+ cmd_args += " %s %s " % (option_pair[1],
+ options[option_pair[0]])
+
+ test_type = options.get("test_type", None)
+ if test_type is not None:
+ cmd_args += " %s " % (test_type)
+
+ cmd = "sudo bash unixbench_benchmark.sh %s" % (cmd_args)
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ result.update(json.loads(stdout))
+
+ if "sla" in self.scenario_cfg:
+ sla_error = ""
+ for t, score in result.items():
+ if t not in self.scenario_cfg['sla']:
+ continue
+ sla_score = float(self.scenario_cfg['sla'][t])
+ score = float(score)
+ if score < sla_score:
+ sla_error += "%s score %f < sla:%s_score(%f); " % \
+ (t, score, t, sla_score)
+ assert sla_error == "", sla_error
+
+
+def _test(): # pragma: no cover
+ """internal test function"""
+ key_filename = pkg_resources.resource_filename('yardstick.resources',
+ 'files/yardstick_key')
+ ctx = {
+ 'host': {
+ 'ip': '10.229.47.137',
+ 'user': 'root',
+ 'key_filename': key_filename
+ }
+ }
+
+ options = {
+ 'test_type': 'dhrystone',
+ 'run_mode': 'verbose'
+ }
+
+ args = {'options': options}
+ result = {}
+
+ p = Unixbench(args, ctx)
+ p.run(result)
+ print result
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
new file mode 100644
index 000000000..5a5dbc394
--- /dev/null
+++ b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+OPTIONS="$@"
+OUTPUT_FILE=/tmp/unixbench-out.log
+
+# run unixbench test
+run_unixbench()
+{
+ cd /opt/tempT/UnixBench/
+ ./Run $OPTIONS > $OUTPUT_FILE
+}
+
+# write the result to stdout in json format
+output_json()
+{
+ single_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | head -1 )
+ parallel_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | tail -1 )
+ echo -e "{ \
+ \"single_score\":\"$single_score\", \
+ \"parallel_score\":\"$parallel_score\" \
+ }"
+}
+
+# main entry
+main()
+{
+ # run the test
+ run_unixbench
+
+ # output result
+ output_json
+}
+
+main
diff --git a/yardstick/benchmark/scenarios/dummy/__init__.py b/yardstick/benchmark/scenarios/dummy/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/dummy/__init__.py
diff --git a/yardstick/benchmark/scenarios/dummy/dummy.py b/yardstick/benchmark/scenarios/dummy/dummy.py
new file mode 100644
index 000000000..de6742c40
--- /dev/null
+++ b/yardstick/benchmark/scenarios/dummy/dummy.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Dummy(base.Scenario):
+ """Execute Dummy echo
+ """
+ __scenario_type__ = "Dummy"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+ if not self.setup_done:
+ self.setup()
+
+ result["hello"] = "yardstick"
+ LOG.info("Dummy echo hello yardstick!")
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
new file mode 100644
index 000000000..629f62be5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -0,0 +1,119 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import pkg_resources
+import logging
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Ping6(base.Scenario): # pragma: no cover
+ """Execute ping6 between two hosts
+
+ read link below for more ipv6 info description:
+ http://wiki.opnfv.org/ipv6_opnfv_project
+ """
+ __scenario_type__ = "Ping6"
+
+ TARGET_SCRIPT = 'ping6_benchmark.bash'
+ SETUP_SCRIPT = 'ping6_setup.bash'
+ TEARDOWN_SCRIPT = 'ping6_teardown.bash'
+ METADATA_SCRIPT = 'ping6_metadata.txt'
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+ self.run_done = False
+
+ def _ssh_host(self):
+ # ssh host1
+ host = self.context_cfg['host']
+ host_user = host.get('user', 'ubuntu')
+ host_ip = host.get('ip', None)
+ host_pwd = host.get('password', 'root')
+ LOG.info("user:%s, host:%s", host_user, host_ip)
+ self.client = ssh.SSH(host_user, host_ip, password=host_pwd)
+ self.client.wait(timeout=600)
+
+ def setup(self):
+ '''scenario setup'''
+ self.setup_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.SETUP_SCRIPT)
+
+ self.ping6_metadata_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.METADATA_SCRIPT)
+ # ssh host1
+ self._ssh_host()
+ # run script to setup ipv6
+ self.client.run("cat > ~/setup.sh",
+ stdin=open(self.setup_script, "rb"))
+ self.client.run("cat > ~/metadata.txt",
+ stdin=open(self.ping6_metadata_script, "rb"))
+ cmd = "sudo bash setup.sh"
+ status, stdout, stderr = self.client.execute(cmd)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+ # ssh vm1
+ self.ping6_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.TARGET_SCRIPT)
+
+ if not self.setup_done:
+ self._ssh_host()
+
+ self.client.run("cat > ~/ping6.sh",
+ stdin=open(self.ping6_script, "rb"))
+ cmd = "sudo bash ping6.sh"
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ print stdout
+ if status:
+ raise RuntimeError(stderr)
+
+ if stdout:
+ result["rtt"] = float(stdout)
+
+ if "sla" in self.scenario_cfg:
+ sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
+ assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
+ (result["rtt"], sla_max_rtt)
+ else:
+ LOG.error("ping6 timeout")
+ self.run_done = True
+
+ def teardown(self):
+ """teardown the benchmark"""
+
+ if not self.run_done:
+ self._ssh_host()
+
+ self.teardown_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.TEARDOWN_SCRIPT)
+ self.client.run("cat > ~/teardown.sh",
+ stdin=open(self.teardown_script, "rb"))
+ cmd = "sudo bash teardown.sh"
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ if stdout:
+ pass
+ else:
+ LOG.error("ping6 teardown failed")
diff --git a/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash b/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash
new file mode 100644
index 000000000..6df354a1b
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a single ping6 command towards a ipv6 router
+
+set -e
+
+# TODO find host
+sudo ip netns exec qdhcp-$(neutron net-list | grep -w ipv4-int-network1 | awk '{print $2}') bash
+# TODO find VM ip
+ssh -i vRouterkey fedora@20.0.0.4
+ping6 -c 1 2001:db8:0:1::1 | grep ttl | awk -F [=\ ] '{printf $10}'
diff --git a/yardstick/benchmark/scenarios/networking/ping6_metadata.txt b/yardstick/benchmark/scenarios/networking/ping6_metadata.txt
new file mode 100644
index 000000000..5dc08d30f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_metadata.txt
@@ -0,0 +1,82 @@
+#cloud-config
+bootcmd:
+ - /usr/sbin/ifdown eth0
+ - /usr/sbin/ifup eth0
+ - /usr/sbin/ifdown eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - /usr/sbin/ifup eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - setenforce 0
+ - /sbin/sysctl -w net.ipv6.conf.all.forwarding=1
+ - /sbin/sysctl -w net.ipv6.conf.eth0.accept_ra=2
+ - /sbin/sysctl -w net.ipv6.conf.eth0.accept_ra_defrtr=1
+ - /sbin/sysctl -w net.ipv6.conf.eth0.router_solicitations=1
+packages:
+ - radvd
+runcmd:
+ - /usr/sbin/ifdown eth1
+ - /usr/sbin/ifup eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - /usr/bin/systemctl disable NetworkManager
+ - /usr/bin/systemctl start radvd
+ - echo 'complete' >> /tmp/cloud-config.log
+write_files:
+ - content: |
+ TYPE="Ethernet"
+ BOOTPROTO="dhcp"
+ DEFROUTE="yes"
+ PEERDNS="yes"
+ PEERROUTES="yes"
+ IPV4_FAILURE_FATAL="no"
+ IPV6INIT="yes"
+ IPV6_AUTOCONF="yes"
+ IPV6_DEFROUTE="yes"
+ IPV6_PEERROUTES="yes"
+ IPV6_PEERDNS="yes"
+ IPV6_FAILURE_FATAL="no"
+ NAME="eth0"
+ DEVICE="eth0"
+ ONBOOT="yes"
+ path: /etc/sysconfig/network-scripts/ifcfg-eth0
+ permissions: '0755'
+ owner: root:root
+ - content: |
+ TYPE="Ethernet"
+ BOOTPROTO=static
+ IPV6INIT=yes
+ IPV6ADDR="2001:db8:0:2::1/64"
+ NAME=eth1
+ DEVICE=eth1
+ ONBOOT=yes
+ NM_CONTROLLED=no
+ path: /etc/sysconfig/network-scripts/ifcfg-eth1
+ permissions: '0755'
+ owner: root:root
+ - content: |
+ interface eth1
+ {
+ AdvSendAdvert on;
+ MinRtrAdvInterval 3;
+ MaxRtrAdvInterval 10;
+ AdvHomeAgentFlag off;
+ AdvManagedFlag on;
+ AdvOtherConfigFlag on;
+ prefix 2001:db8:0:2::/64
+ {
+ AdvOnLink on;
+ ### On link tells the host that the default router is on the same "link" as it is
+ AdvAutonomous on;
+ AdvRouterAddr off;
+ };
+ };
+ path: /etc/radvd.conf
+ permissions: '0644'
+ owner: root:root
+ - content: |
+ IPV6FORWARDING=yes
+ path: /etc/sysconfig/network
+ permissions: '0644'
+ owner: root:root \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/networking/ping6_setup.bash b/yardstick/benchmark/scenarios/networking/ping6_setup.bash
new file mode 100644
index 000000000..2a54da2ba
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_setup.bash
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# download and create image
+source /opt/admin-openrc.sh
+wget https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2
+glance image-create --name 'Fedora22' --disk-format qcow2 \
+--container-format bare --file ./Fedora-Cloud-Base-22-20150521.x86_64.qcow2
+
+# create external network
+neutron net-create net04_ext --router:external --provider:physical_network physnet \
+--provider:network_type vlan --provider:segmentation_id 1411
+neutron subnet-create net04_ext 10.145.140.0/24 --name net04_ext__subnet \
+--allocation-pool start=10.145.140.13,end=10.145.140.20 --disable-dhcp --gateway 10.145.140.1
+
+# create router
+neutron router-create ipv4-router
+neutron router-create ipv6-router
+
+
+# create (ipv4,ipv6)router and net and subnet
+neutron net-create --port_security_enabled=False ipv4-int-network1
+neutron net-create --port_security_enabled=False ipv6-int-network2
+
+# Create IPv4 subnet and associate it to ipv4-router
+neutron subnet-create --name ipv4-int-subnet1 \
+--dns-nameserver 8.8.8.8 ipv4-int-network1 20.0.0.0/24
+neutron router-interface-add ipv4-router ipv4-int-subnet1
+
+# Associate the net04_ext to the Neutron routers
+neutron router-gateway-set ipv6-router net04_ext
+neutron router-gateway-set ipv4-router net04_ext
+
+# Create two subnets, one IPv4 subnet ipv4-int-subnet2 and
+# one IPv6 subnet ipv6-int-subnet2 in ipv6-int-network2, and associate both subnets to ipv6-router
+neutron subnet-create --name ipv4-int-subnet2 --dns-nameserver 8.8.8.8 ipv6-int-network2 10.0.0.0/24
+neutron subnet-create --name ipv6-int-subnet2 \
+ --ip-version 6 --ipv6-ra-mode slaac --ipv6-address-mode slaac ipv6-int-network2 2001:db8:0:1::/64
+
+
+neutron router-interface-add ipv6-router ipv4-int-subnet2
+neutron router-interface-add ipv6-router ipv6-int-subnet2
+
+
+# create key
+nova keypair-add vRouterKey > ~/vRouterKey
+
+# Create ports for vRouter
+neutron port-create --name eth0-vRouter --mac-address fa:16:3e:11:11:11 ipv6-int-network2
+neutron port-create --name eth1-vRouter --mac-address fa:16:3e:22:22:22 ipv4-int-network1
+
+# Create ports for VM1 and VM2.
+neutron port-create --name eth0-VM1 --mac-address fa:16:3e:33:33:33 ipv4-int-network1
+neutron port-create --name eth0-VM2 --mac-address fa:16:3e:44:44:44 ipv4-int-network1
+
+# Update ipv6-router with routing information to subnet 2001:db8:0:2::/64
+neutron router-update ipv6-router \
+ --routes type=dict list=true destination=2001:db8:0:2::/64,nexthop=2001:db8:0:1:f816:3eff:fe11:1111
+
+# vRouter boot
+nova boot --image Fedora22 --flavor m1.small \
+--user-data ./metadata.txt \
+--nic port-id=$(neutron port-list | grep -w eth0-vRouter | awk '{print $2}') \
+--nic port-id=$(neutron port-list | grep -w eth1-vRouter | awk '{print $2}') \
+--key-name vRouterKey vRouter
+
+# VM create
+nova boot --image Fedora22 --flavor m1.small \
+--nic port-id=$(neutron port-list | grep -w eth0-VM1 | awk '{print $2}') \
+--key-name vRouterKey VM1
+
+nova boot --image Fedora22 --flavor m1.small \
+--nic port-id=$(neutron port-list | grep -w eth0-VM2 | awk '{print $2}') \
+--key-name vRouterKey VM2
+
+nova list
diff --git a/yardstick/benchmark/scenarios/networking/ping6_teardown.bash b/yardstick/benchmark/scenarios/networking/ping6_teardown.bash
new file mode 100644
index 000000000..7ab145523
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_teardown.bash
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# delete VM
+nova delete VM1
+nova delete VM2
+nova delete vRouter
+#clear routes
+neutron router-update ipv6-router --routes action=clear
+
+#VM1,VM2 port delete
+neutron port-delete --name eth0-VM1
+neutron port-delete --name eth0-VM2
+
+#vRouter port delete
+neutron port-delete --name eth0-vRouter
+neutron port-delete --name eth1-vRouter
+
+#delete key
+nova keypair-delete vRouterKey
+
+#delete ipv6 router interface
+neutron router-interface-delete ipv6-router ipv6-int-subnet2
+neutron router-interface-delete ipv6-router ipv4-int-subnet2
+
+#delete subnet
+neutron subnet-delete --name ipv6-int-subnet2
+neutron subnet-delete --name ipv4-int-subnet2
+
+#clear gateway
+neutron router-gateway-clear ipv4-router net04_ext
+neutron router-gateway-clear ipv6-router net04_ext
+
+#delete ipv4 router interface
+neutron router-interface-delete ipv4-router ipv4-int-subnet1
+neutron subnet-delete --name ipv4-int-subnet1
+
+#delete network
+neutron net-delete ipv6-int-network2
+neutron net-delete ipv4-int-network1
+
+# delete router
+neutron router-delete ipv4-router
+neutron router-delete ipv6-router
+
+# delete ext net
+neutron subnet-delete net04_ext__subnet
+neutron net-delete net04_ext
+
+# delete glance image
+glance --os-image-api-version 1 image-delete Fedora22 \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py
new file mode 100644
index 000000000..509fa847b
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py
@@ -0,0 +1,85 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcInstantiationValidation(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_instantiation_validation"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'instantiation_validation_benchmark.' \
+ 'InstantiationValidationBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['throughput'] = '1'
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py
new file mode 100644
index 000000000..4834a5fc7
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py
@@ -0,0 +1,92 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcInstantiationValidationNoisy(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_instantiation_validation_noisy"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'instantiation_validation_noisy_neighbors_' \
+ 'benchmark.' \
+ 'InstantiationValidationNoisyNeighborsBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['throughput'] = '1'
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+ test_case['params']['num_of_neighbours'] = \
+ str(self.options['num_of_neighbours'])
+ test_case['params']['amount_of_ram'] = \
+ str(self.options['amount_of_ram'])
+ test_case['params']['number_of_cores'] = \
+ str(self.options['number_of_cores'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_throughput.py b/yardstick/benchmark/scenarios/networking/vtc_throughput.py
new file mode 100644
index 000000000..fe7a88470
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_throughput.py
@@ -0,0 +1,85 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+from experimental_framework import api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcThroughput(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_throughput"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'rfc2544_throughput_benchmark.' \
+ 'RFC2544ThroughputBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['packet_size'] = str(self.options['packet_size'])
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py b/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py
new file mode 100644
index 000000000..ad3832fb5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py
@@ -0,0 +1,91 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcThroughputNoisy(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_throughput_noisy"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'multi_tenancy_throughput_benchmark.' \
+ 'MultiTenancyThroughputBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['packet_size'] = str(self.options['packet_size'])
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+ test_case['params']['num_of_neighbours'] = \
+ str(self.options['num_of_neighbours'])
+ test_case['params']['amount_of_ram'] = \
+ str(self.options['amount_of_ram'])
+ test_case['params']['number_of_cores'] = \
+ str(self.options['number_of_cores'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/parser/__init__.py b/yardstick/benchmark/scenarios/parser/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/__init__.py
diff --git a/yardstick/benchmark/scenarios/parser/parser.py b/yardstick/benchmark/scenarios/parser/parser.py
new file mode 100644
index 000000000..006258d05
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser.py
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import subprocess
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Parser(base.Scenario):
+ """running Parser Yang-to-Tosca module as a tool
+ validating output against expected outcome
+
+ more info https://wiki.opnfv.org/parser
+ """
+ __scenario_type__ = "Parser"
+
+ SETUP_SCRIPT = "parser_setup.sh"
+ TEARDOWN_SCRIPT = "parser_teardown.sh"
+ PARSER_SCRIPT = "parser.sh"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.setup_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.SETUP_SCRIPT)
+ cmd = "%s" % (self.setup_script)
+
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the translation"""
+ options = self.scenario_cfg['options']
+ yangfile = options.get("yangfile", '~/yardstick/samples/yang.yaml')
+ toscafile = options.get("toscafile", '~/yardstick/samples/tosca.yaml')
+
+ self.parser_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.PARSER_SCRIPT)
+
+ if not self.setup_done:
+ self.setup()
+
+ cmd1 = "%s %s %s" % (self.parser_script, yangfile, toscafile)
+ cmd2 = "chmod 777 %s" % (self.parser_script)
+ subprocess.call(cmd2, shell=True)
+ output = subprocess.call(cmd1, shell=True, stdout=subprocess.PIPE)
+ print "yangtotosca finished"
+
+ result['yangtotosca'] = "success" if output == 0 else "fail"
+
+ def teardown(self):
+ ''' for scenario teardown remove parser and pyang '''
+ self.teardown_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.TEARDOWN_SCRIPT)
+ subprocess.call(self.teardown_script, shell=True)
+ self.teardown_done = True
+
+
+def _test():
+ '''internal test function'''
+ pass
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/parser/parser.sh b/yardstick/benchmark/scenarios/parser/parser.sh
new file mode 100755
index 000000000..4408e637c
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+yangfile=$1
+base_dir=$(dirname $yangfile)
+shift
+toscafile=$1
+OUTPUT_FILE=/tmp/parser-out.log
+
+# run parser test
+run_parser()
+{
+ cd /tmp/parser/yang2tosca
+ python tosca_translator.py --filename $yangfile> $OUTPUT_FILE
+}
+
+# write the result to stdout in json format
+check_result()
+{
+
+ if (diff -q $toscafile ${yangfile%'.yaml'}"_tosca.yaml" >> $OUTPUT_FILE);
+ then
+ exit 0
+ else
+ exit 1
+ fi
+
+}
+
+# main entry
+main()
+{
+ # run the test
+ run_parser
+
+ # output result
+ check_result
+}
+
+main
diff --git a/yardstick/benchmark/scenarios/parser/parser_setup.sh b/yardstick/benchmark/scenarios/parser/parser_setup.sh
new file mode 100755
index 000000000..44356447d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser_setup.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+git clone https://github.com/mbj4668/pyang.git /tmp/pyang
+cd /tmp/pyang
+python setup.py install
+git clone https://gerrit.opnfv.org/gerrit/parser /tmp/parser
+
diff --git a/yardstick/benchmark/scenarios/parser/parser_teardown.sh b/yardstick/benchmark/scenarios/parser/parser_teardown.sh
new file mode 100755
index 000000000..727e9decd
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser_teardown.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+rm -rf /tmp/pyang
+rm -rf /tmp/parser
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index a56824aac..17e8f4c42 100755
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -16,7 +16,9 @@ import atexit
import ipaddress
import time
import logging
+import uuid
from itertools import ifilter
+
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.task_template import TaskTemplate
@@ -81,7 +83,9 @@ class TaskCommands(object):
for i in range(0, len(task_files)):
one_task_start_time = time.time()
parser.path = task_files[i]
- scenarios, run_in_parallel = parser.parse_task(task_args[i],
+ task_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+ scenarios, run_in_parallel = parser.parse_task(task_name,
+ task_args[i],
task_args_fnames[i])
self._run(scenarios, run_in_parallel, args.output_file)
@@ -199,7 +203,7 @@ class TaskParser(object):
return suite_params
- def parse_task(self, task_args=None, task_args_file=None):
+ def parse_task(self, task_name, task_args=None, task_args_file=None):
'''parses the task file and return an context and scenario instances'''
print "Parsing task config:", self.path
@@ -233,8 +237,10 @@ class TaskParser(object):
# TODO: support hybrid context type
if "context" in cfg:
context_cfgs = [cfg["context"]]
- else:
+ elif "contexts" in cfg:
context_cfgs = cfg["contexts"]
+ else:
+ context_cfgs = [{"type": "Dummy"}]
for cfg_attrs in context_cfgs:
context_type = cfg_attrs.get("type", "Heat")
@@ -248,6 +254,12 @@ class TaskParser(object):
run_in_parallel = cfg.get("run_in_parallel", False)
+ # add tc and task id for influxdb extended tags
+ task_id = str(uuid.uuid4())
+ for scenario in cfg["scenarios"]:
+ scenario["tc"] = task_name
+ scenario["task_id"] = task_id
+
# TODO we need something better here, a class that represent the file
return cfg["scenarios"], run_in_parallel
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
new file mode 100644
index 000000000..2f3ff089f
--- /dev/null
+++ b/yardstick/dispatcher/influxdb.py
@@ -0,0 +1,149 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import json
+import logging
+import requests
+import time
+
+from oslo_config import cfg
+
+from yardstick.dispatcher.base import Base as DispatchBase
+from yardstick.dispatcher.influxdb_line_protocol import make_lines
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+influx_dispatcher_opts = [
+ cfg.StrOpt('target',
+ default='http://127.0.0.1:8086',
+ help='The target where the http request will be sent. '
+ 'If this is not set, no data will be posted. For '
+ 'example: target = http://hostname:1234/path'),
+ cfg.StrOpt('db_name',
+ default='yardstick',
+ help='The database name to store test results.'),
+ cfg.IntOpt('timeout',
+ default=5,
+ help='The max time in seconds to wait for a request to '
+ 'timeout.'),
+]
+
+CONF.register_opts(influx_dispatcher_opts, group="dispatcher_influxdb")
+
+
+class InfluxdbDispatcher(DispatchBase):
+ """Dispatcher class for posting data into an influxdb target.
+ """
+
+ __dispatcher_type__ = "Influxdb"
+
+ def __init__(self, conf):
+ super(InfluxdbDispatcher, self).__init__(conf)
+ self.timeout = CONF.dispatcher_influxdb.timeout
+ self.target = CONF.dispatcher_influxdb.target
+ self.db_name = CONF.dispatcher_influxdb.db_name
+ self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
+ self.raw_result = []
+ self.case_name = ""
+ self.tc = ""
+ self.task_id = -1
+ self.static_tags = {
+ "pod_name": os.environ.get('POD_NAME', 'unknown'),
+ "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
+ "version": os.environ.get('YARDSTICK_VERSION', 'unknown')
+ }
+
+ def _dict_key_flatten(self, data):
+ next_data = {}
+
+ if not [v for v in data.values()
+ if type(v) == dict or type(v) == list]:
+ return data
+
+ for k, v in data.iteritems():
+ if type(v) == dict:
+ for n_k, n_v in v.iteritems():
+ next_data["%s.%s" % (k, n_k)] = n_v
+ elif type(v) == list:
+ for index, item in enumerate(v):
+ next_data["%s%d" % (k, index)] = item
+ else:
+ next_data[k] = v
+
+ return self._dict_key_flatten(next_data)
+
+ def _get_nano_timestamp(self, results):
+ try:
+ timestamp = results["benchmark"]["timestamp"]
+ except Exception:
+ timestamp = time.time()
+
+ return str(int(float(timestamp) * 1000000000))
+
+ def _get_extended_tags(self, data):
+ tags = {
+ "runner_id": data["runner_id"],
+ "tc": self.tc,
+ "task_id": self.task_id
+ }
+
+ return tags
+
+ def _data_to_line_protocol(self, data):
+ msg = {}
+ point = {}
+ point["measurement"] = self.case_name
+ point["fields"] = self._dict_key_flatten(data["benchmark"]["data"])
+ point["time"] = self._get_nano_timestamp(data)
+ point["tags"] = self._get_extended_tags(data)
+ msg["points"] = [point]
+ msg["tags"] = self.static_tags
+
+ return make_lines(msg).encode('utf-8')
+
+ def record_result_data(self, data):
+ LOG.debug('Test result : %s' % json.dumps(data))
+ self.raw_result.append(data)
+ if self.target == '':
+ # if the target was not set, do not do anything
+ LOG.error('Dispatcher target was not set, no data will'
+ 'be posted.')
+ return -1
+
+ if isinstance(data, dict) and "scenario_cfg" in data:
+ self.case_name = data["scenario_cfg"]["type"]
+ self.tc = data["scenario_cfg"]["tc"]
+ self.task_id = data["scenario_cfg"]["task_id"]
+ return 0
+
+ if self.case_name == "":
+ LOG.error('Test result : %s' % json.dumps(data))
+ LOG.error('The case_name cannot be found, no data will be posted.')
+ return -1
+
+ try:
+ line = self._data_to_line_protocol(data)
+ LOG.debug('Test result line format : %s' % line)
+ res = requests.post(self.influxdb_url,
+ data=line,
+ timeout=self.timeout)
+ if res.status_code != 204:
+ LOG.error('Test result posting finished with status code'
+ ' %d.' % res.status_code)
+ except Exception as err:
+ LOG.exception('Failed to record result data: %s',
+ err)
+ return -1
+ return 0
+
+ def flush_result_data(self):
+ LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+ return 0
diff --git a/yardstick/dispatcher/influxdb_line_protocol.py b/yardstick/dispatcher/influxdb_line_protocol.py
new file mode 100644
index 000000000..3e830ed5e
--- /dev/null
+++ b/yardstick/dispatcher/influxdb_line_protocol.py
@@ -0,0 +1,114 @@
+# yardstick comment: this file is a modified copy of
+# influxdb-python/influxdb/line_protocol.py
+
+from __future__ import unicode_literals
+from copy import copy
+
+from six import binary_type, text_type, integer_types
+
+
+def _escape_tag(tag):
+ tag = _get_unicode(tag, force=True)
+ return tag.replace(
+ "\\", "\\\\"
+ ).replace(
+ " ", "\\ "
+ ).replace(
+ ",", "\\,"
+ ).replace(
+ "=", "\\="
+ )
+
+
+def _escape_value(value):
+ value = _get_unicode(value)
+ if isinstance(value, text_type) and value != '':
+ return "\"{}\"".format(
+ value.replace(
+ "\"", "\\\""
+ ).replace(
+ "\n", "\\n"
+ )
+ )
+ elif isinstance(value, integer_types) and not isinstance(value, bool):
+ return str(value) + 'i'
+ else:
+ return str(value)
+
+
+def _get_unicode(data, force=False):
+ """
+ Try to return a text aka unicode object from the given data.
+ """
+ if isinstance(data, binary_type):
+ return data.decode('utf-8')
+ elif data is None:
+ return ''
+ elif force:
+ return str(data)
+ else:
+ return data
+
+
+def make_lines(data):
+ """
+ Extracts the points from the given dict and returns a Unicode string
+ matching the line protocol introduced in InfluxDB 0.9.0.
+
+ line protocol format:
+ <measurement>[,<tag-key>=<tag-value>...] <field-key>=<field-value>\
+ [,<field2-key>=<field2-value>...] [unix-nano-timestamp]
+
+ Ref:
+ https://influxdb.com/docs/v0.9/write_protocols/write_syntax.html
+ https://influxdb.com/docs/v0.9/write_protocols/line.html
+ """
+ lines = []
+ static_tags = data.get('tags', None)
+ for point in data['points']:
+ elements = []
+
+ # add measurement name
+ measurement = _escape_tag(_get_unicode(
+ point.get('measurement', data.get('measurement'))
+ ))
+ key_values = [measurement]
+
+ # add tags
+ if static_tags is None:
+ tags = point.get('tags', {})
+ else:
+ tags = copy(static_tags)
+ tags.update(point.get('tags', {}))
+
+ # tags should be sorted client-side to take load off server
+ for tag_key in sorted(tags.keys()):
+ key = _escape_tag(tag_key)
+ value = _escape_tag(tags[tag_key])
+
+ if key != '' and value != '':
+ key_values.append("{key}={value}".format(key=key, value=value))
+ key_values = ','.join(key_values)
+ elements.append(key_values)
+
+ # add fields
+ field_values = []
+ for field_key in sorted(point['fields'].keys()):
+ key = _escape_tag(field_key)
+ value = _escape_value(point['fields'][field_key])
+ if key != '' and value != '':
+ field_values.append("{key}={value}".format(
+ key=key,
+ value=value
+ ))
+ field_values = ','.join(field_values)
+ elements.append(field_values)
+
+ # add timestamp
+ if 'time' in point:
+ elements.append(point['time'])
+
+ line = ' '.join(elements)
+ lines.append(line)
+ lines = '\n'.join(lines)
+ return lines + '\n'
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 253fd2e3d..339f834b7 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -63,6 +63,7 @@ import socket
import time
import paramiko
+from scp import SCPClient
import six
import logging
@@ -254,3 +255,9 @@ class SSH(object):
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
diff --git a/yardstick/vTC/__init__.py b/yardstick/vTC/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/vTC/__init__.py
diff --git a/yardstick/vTC/apexlake/.gitignore b/yardstick/vTC/apexlake/.gitignore
new file mode 100644
index 000000000..ddcd58683
--- /dev/null
+++ b/yardstick/vTC/apexlake/.gitignore
@@ -0,0 +1,2 @@
+benchmark.log
+
diff --git a/yardstick/vTC/apexlake/MANIFEST.in b/yardstick/vTC/apexlake/MANIFEST.in
new file mode 100644
index 000000000..57649e597
--- /dev/null
+++ b/yardstick/vTC/apexlake/MANIFEST.in
@@ -0,0 +1,7 @@
+recursive-include bin *.py
+recursive-include benchmarks *.py
+recursive-include heat_templates *
+recursive-include packet_generators *
+recursive-include etc *.cfg *.json
+include *.py
+include README.md
diff --git a/yardstick/vTC/apexlake/README.md b/yardstick/vTC/apexlake/README.md
new file mode 100644
index 000000000..ca4f4ded6
--- /dev/null
+++ b/yardstick/vTC/apexlake/README.md
@@ -0,0 +1,116 @@
+Welcome to ApexLake's documentation!
+====================================
+ApexLake is a framework that provides automatic execution of experiment and related data collection to help
+the user validating the infrastructure from a Virtual Network Function perspective.
+
+Install framework and dependencies
+----------------------------------
+Before to start the framework, a set of dependencies are required.
+In the following a set of instructions to be executed on the Linux shell to install dependencies and configure the environment.
+
+1. Install dependencies
+ - # apt-get install python-dev
+ - # apt-get install python-pip
+ - # apt-get install python-mock
+ - # apt-get install tcpreplay
+ - # apt-get install libpcap-dev
+
+2. Install the framework on the system
+ - # python setup.py install
+
+3. Source OpenStack openrc file
+ - $ source openrc
+
+4. Create 2 Networks (and subnets) based on VLANs (provider:network_type = vlan) in Neutron
+ - $ neutron net-create apexlake_inbound_network --provider:network_type vlan --provider:physical_network physnet1
+ - $ neutron subnet-create apexlake_inbound_network 192.168.0.0/24 --name apexlake_inbound_subnet
+ - $ neutron net-create apexlake_outbound_network --provider:network_type vlan --provider:physical_network physnet1
+ - $ neutron subnet-create apexlake_outbound_network 192.168.1.0/24 --name apexlake_outbound_subnet
+
+5. Insert VLAN tags related to the networks have to ApexLake, either:
+ - into the "conf.cfg" configuration file, or
+ - through the Python API.
+
+
+Install and configure DPDK Pktgen
++++++++++++++++++++++++++++++++++
+The execution of the framework is based on DPDK Pktgen.
+If DPDK Pktgen has not been installed on the system by the user, it is necessary to download, compile and configure it.
+The user can create a directory and download the dpdk packet generator source code:
+ - $ cd experimental_framework/libraries
+ - $ mkdir dpdk_pktgen
+ - $ git clone https://github.com/pktgen/Pktgen-DPDK.git
+
+For the installation and configuration of DPDK and DPDK Pktgen please follow the official DPDK Pktgen README file.
+Once the installation is completed, it is necessary to load the DPDK kernel driver, as follow:
+ - # insmod uio
+ - # insmod DPDK_DIR/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+
+It is required to properly set the configuration file according to the system on Pktgen runs on.
+An example is provided in the following:
+
+ - [PacketGen]
+ - packet_generator = dpdk_pktgen
+ - pktgen_directory = /home/user/apexlake/experimental_framework/libraries/dpdk_pktgen/dpdk/examples/pktgen/
+ -- This is the directory where the packet generator is installed (if the user previously installed dpdk-pktgen, it is required to provide the director where it is installed).
+ - dpdk_directory = /home/user/apexlake/experimental_framework/libraries/Pktgen-DPDK/dpdk/
+ -- This is the directory where DPDK is installed
+ - program_name = app/app/x86_64-native-linuxapp-gcc/pktgen
+ -- This is the name of the dpdk-pktgen program that starts the packet generator
+ - coremask = 1f
+ -- DPDK coremask (see DPDK-Pktgen readme)
+ - memory_channels = 3
+ -- DPDK memory channels (see DPDK-Pktgen readme)
+ - name_if_1 = p1p1
+ -- Name of the interface of the pktgen to be used to send traffic
+ - name_if_2 = p1p2
+ -- Name of the interface of the pktgen to be used to receive traffic
+ - bus_slot_nic_1 = 01:00.0
+ -- PCI bus address correspondent to the if_1
+ - bus_slot_nic_2 = 01:00.1
+ -- PCI bus address correspondent to the if_2
+
+
+To find the parameters related to names of the NICs and addresses of the PCI buses the user may find useful to run the DPDK tool nic_bind as follows:
+
+ - $ DPDK_DIR/tools/dpdk_nic_bind.py --status
+
+which lists the NICs available on the system, show the available drivers and bus addresses for each interface.
+Please make sure to select NICs which are DPDK compatible.
+
+Installation and configuration of smcroute
+++++++++++++++++++++++++++++++++++++++++++
+The user is required to install smcroute which is used by the framework to support multicast communications.
+In the following a list of commands to be ran to download and install smroute is provided.
+
+ - $ cd ~
+ - $ git clone https://github.com/troglobit/smcroute.git
+ - $ cd smcroute
+ - $ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ - $ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ - $ ./autogen.sh
+ - $ ./configure
+ - $ make
+ - $ sudo make install
+ - $ cd ..
+
+It is also required to create a configuration file using the following command:
+
+ - $ SMCROUTE_NIC=(name of the nic)
+
+where name of the nic is the name used previously for the variable "name_if_2".
+In the example it would be:
+
+ - $ SMCROUTE_NIC=p1p2
+
+Then create the smcroute configuration file /etc/smcroute.conf
+
+ - $echo mgroup from $SMCROUTE_NIC group 224.192.16.1 > /etc/smcroute.conf
+
+
+Experiment using SR-IOV configuration on the compute node
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+In order to enable SR-IOV interfaces on the physical NIC of the compute node, a compatible NIC is required.
+NIC configuration depends on model and vendor. After proper configuration to support SR-IOV, a proper configuration of openstack is required.
+For further information, please look at the following link:
+https://wiki.openstack.org/wiki/SR-IOV-Passthrough-For-Networking
diff --git a/yardstick/vTC/apexlake/__init__.py b/yardstick/vTC/apexlake/__init__.py
new file mode 100644
index 000000000..8898092d0
--- /dev/null
+++ b/yardstick/vTC/apexlake/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Benchmarking Framework
+"""
diff --git a/yardstick/vTC/apexlake/apexlake.conf b/yardstick/vTC/apexlake/apexlake.conf
new file mode 100644
index 000000000..56937d2ef
--- /dev/null
+++ b/yardstick/vTC/apexlake/apexlake.conf
@@ -0,0 +1,69 @@
+[General]
+## template_base is the name of the base template from which generate all the others
+#template_base_name = vTC.yaml
+## Benchmarks to be executed on the VNF under test (uncomment one of the following)
+#benchmarks = instantiation_validation_benchmark.InstantiationValidationBenchmark
+#benchmarks = instantiation_validation_noisy_neighbors_benchmark.InstantiationValidationNoisyNeighborsBenchmark
+#benchmarks = rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark
+#benchmarks = multi_tenancy_throughput_benchmark.MultiTenancyThroughputBenchmark
+## Number of iterations
+#iterations = 1
+#debug = true
+
+
+[OpenStack]
+## ip_controller is the IP address of the OpenStack Controller
+#ip_controller = 10.2.1.1
+## Heat URL is the URL for the Heat endpoint
+#heat_url = http://IP_ADDRESS_CONTROLLER:8004/v1/TENANT_ID
+## user is the OpenStack user name
+#user = admin
+## password is the OpenStack password
+#password = password
+## auth_uri is the authentication URI provided by keystone
+#auth_uri = http://IP_ADDRESS_CONTROLLER:5000/v2.0
+## project is the name of the project on which create the VNF instances
+#project = demo
+
+
+[Experiment-VNF]
+## List of all the variables and the values that will be tested by the framework
+#VNIC_TYPE = @string "direct", "normal"
+#VTC_FLAVOR = @string "m1.small" "m1.medium" "m1.large"
+
+
+[InfluxDB]
+influxdb_ip_address = 192.168.1.1
+influxdb_port = 8086
+influxdb_db_name = database
+
+
+[PacketGen]
+packet_generator = dpdk_pktgen
+pktgen_directory = /root/programs/Pktgen-DPDK/dpdk/examples/pktgen/
+dpdk_directory = /root/programs/Pktgen-DPDK/dpdk/
+program_name = app/app/x86_64-native-linuxapp-gcc/pktgen
+coremask = 1f
+memory_channels = 3
+bus_slot_nic_1 = 01:00.0
+bus_slot_nic_2 = 01:00.1
+name_if_1 = p2p1
+name_if_2 = p2p2
+
+
+[Deployment-parameters]
+#default_net = monitoring
+#default_subnet = monitoring_subnet
+#source_net = inbound_traffic_network
+#source_subnet = inbound_traffic_subnet
+#destination_net = destination_B_network
+#destination_subnet = destination_B_subnet
+#destination_subnet = destination_B_subnet
+#key_name = destination_B_subnet
+
+
+[Testcase-parameters]
+#packet_size = 1280
+#throughput = 1
+#vlan_sender = 1000
+#vlan_receiver = 1001 \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/bin/run_tests.sh b/yardstick/vTC/apexlake/bin/run_tests.sh
index 01592ddc7..6707ad75e 100755
--- a/yardstick/vTC/apexlake/bin/run_tests.sh
+++ b/yardstick/vTC/apexlake/bin/run_tests.sh
@@ -1 +1,2 @@
+export PYTHONPATH=`pwd`
nosetests --with-coverage --cover-erase --cover-package experimental_framework
diff --git a/yardstick/vTC/apexlake/docs/source/api.rst b/yardstick/vTC/apexlake/docs/source/api.rst
new file mode 100644
index 000000000..38085900b
--- /dev/null
+++ b/yardstick/vTC/apexlake/docs/source/api.rst
@@ -0,0 +1,5 @@
+.. automodule:: experimental_framework.api
+ :members:
+ :undoc-members:
+ :inherited-members:
+ :show-inheritance:
diff --git a/yardstick/vTC/apexlake/experimental_framework/__init__.py b/yardstick/vTC/apexlake/experimental_framework/__init__.py
new file mode 100644
index 000000000..d4ab29e9d
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Experimental Framework
+'''
diff --git a/yardstick/vTC/apexlake/experimental_framework/api.py b/yardstick/vTC/apexlake/experimental_framework/api.py
new file mode 100644
index 000000000..1851f1b09
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/api.py
@@ -0,0 +1,148 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import experimental_framework.benchmarking_unit as b_unit
+from experimental_framework import heat_template_generation, common
+
+
+class FrameworkApi(object):
+
+ @staticmethod
+ def init():
+ """
+ Initializes the Framework
+
+ :return: None
+ """
+ common.init(api=True)
+
+ # @staticmethod
+ # def get_available_test_cases():
+ # """
+ # Returns a list of available test cases.
+ # This list include eventual modules developed by the user, if any.
+ # Each test case is returned as a string that represents the full name
+ # of the test case and that can be used to get more information
+ # calling get_test_case_features(test_case_name)
+ #
+ # :return: list of strings
+ # """
+ # return b_unit.BenchmarkingUnit.get_available_test_cases()
+
+ @staticmethod
+ def get_test_case_features(test_case):
+ """
+ Returns a list of features (description, requested parameters,
+ allowed values, etc.) for a specified test case.
+
+ :param test_case: name of the test case (string)
+ The string represents the test case and can be
+ obtained calling "get_available_test_cases()"
+ method.
+
+ :return: dict() containing the features of the test case
+ """
+ if not isinstance(test_case, str):
+ raise ValueError('The provided test_case parameter has to be '
+ 'a string')
+ benchmark = b_unit.BenchmarkingUnit.get_required_benchmarks(
+ [test_case])[0]
+ return benchmark.get_features()
+
+ @staticmethod
+ def execute_framework(
+ test_cases,
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials
+ ):
+ """
+ Executes the framework according the inputs
+
+ :param test_cases: Test cases to be ran on the workload
+ (dict() of dict())
+
+ Example:
+ test_case = dict()
+ test_case['name'] = 'module.Class'
+ test_case['params'] = dict()
+ test_case['params']['throughput'] = '1'
+ test_case['params']['vlan_sender'] = '1007'
+ test_case['params']['vlan_receiver'] = '1006'
+ test_cases = [test_case]
+
+ :param iterations: Number of cycles to be executed (int)
+
+ :param heat_template: (string) File name of the heat template of the
+ workload to be deployed. It contains the
+ parameters to be evaluated in the form of
+ #parameter_name. (See heat_templates/vTC.yaml as
+ example).
+
+ :param heat_template_parameters: (dict) Parameters to be provided
+ as input to the heat template.
+ See http://docs.openstack.org/developer/heat/
+ template_guide/hot_guide.html - section
+ "Template input parameters" for further info.
+
+ :param deployment_configuration: ( dict[string] = list(strings) ) )
+ Dictionary of parameters representing the
+ deployment configuration of the workload
+ The key is a string corresponding to the name of
+ the parameter, the value is a list of strings
+ representing the value to be assumed by a specific
+ param.
+ The parameters are user defined: they have to
+ correspond to the place holders (#parameter_name)
+ specified in the heat template.
+
+ :return: dict() Containing results
+ """
+ common.init(api=True)
+
+ # Input Validation
+ common.InputValidation.validate_os_credentials(openstack_credentials)
+ credentials = openstack_credentials
+
+ msg = 'The provided heat_template does not exist'
+ template = "{}{}".format(common.get_template_dir(), heat_template)
+ common.InputValidation.validate_file_exist(template, msg)
+
+ msg = 'The provided iterations variable must be an integer value'
+ common.InputValidation.validate_integer(iterations, msg)
+
+ msg = 'The provided heat_template_parameters variable must be a ' \
+ 'dictionary'
+ common.InputValidation.validate_dictionary(heat_template_parameters,
+ msg)
+ log_msg = "Generation of all the heat templates " \
+ "required by the experiment"
+ common.LOG.info(log_msg)
+ heat_template_generation.generates_templates(heat_template,
+ deployment_configuration)
+ benchmarking_unit = \
+ b_unit.BenchmarkingUnit(
+ heat_template, credentials, heat_template_parameters,
+ iterations, test_cases)
+ try:
+ common.LOG.info("Benchmarking Unit initialization")
+ benchmarking_unit.initialize()
+ common.LOG.info("Benchmarking Unit Running")
+ results = benchmarking_unit.run_benchmarks()
+ finally:
+ common.LOG.info("Benchmarking Unit Finalization")
+ benchmarking_unit.finalize()
+ return results
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py b/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
new file mode 100644
index 000000000..1963696f8
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+The Benchmarking Unit manages the Benchmarking of VNFs orchestrating the
+initialization, execution and finalization
+'''
+
+
+import json
+import time
+import inspect
+
+from experimental_framework.benchmarks import benchmark_base_class as base
+from experimental_framework import common
+# from experimental_framework import data_manager as data
+from experimental_framework import heat_template_generation as heat
+from experimental_framework import deployment_unit as deploy
+
+
+class BenchmarkingUnit:
+ """
+ Management of the overall Benchmarking process
+ """
+
+ def __init__(self, heat_template_name, openstack_credentials,
+ heat_template_parameters, iterations, benchmarks):
+ """
+ :param heat_template_name: (str) Name of the heat template.
+
+ :param openstack_credentials: (dict) Credentials for openstack.
+ Required fields are: 'ip_controller', 'heat_url',
+ 'user', 'password', 'auth_uri', 'project'.
+
+ :param heat_template_parameters: (dict) parameters to be given as
+ input to the heat template. Required keys depend on
+ the specific heat template.
+
+ :param iterations: (int) number of cycles to be executed.
+
+ :param benchmarks: (list[str]) List of the names of the
+ benchmarks/test_cases to be executed in the cycle.
+
+ :return: None
+ """
+ # Loads vars from configuration file
+ self.template_file_extension = common.TEMPLATE_FILE_EXTENSION
+ self.template_dir = common.get_template_dir()
+ self.results_directory = str(common.RESULT_DIR) + str(time.time())
+
+ # Initializes other internal variable from parameters
+ self.template_name = heat_template_name
+ self.iterations = iterations
+ self.required_benchmarks = benchmarks
+ self.template_files = []
+ self.benchmarks = list()
+ self.benchmark_names = list()
+ # self.data_manager = data.DataManager(self.results_directory)
+ self.heat_template_parameters = heat_template_parameters
+ self.template_files = \
+ heat.get_all_heat_templates(self.template_dir,
+ self.template_file_extension)
+ common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
+
+ def initialize(self):
+ """
+ Initialize the environment in order to run the benchmarking
+
+ :return: None
+ """
+ for benchmark in self.required_benchmarks:
+ benchmark_class = BenchmarkingUnit.get_benchmark_class(
+ benchmark['name'])
+ # Need to generate a unique name for the benchmark
+ # (since there is the possibility to have different
+ # instances of the same benchmark)
+ self.benchmarks.append(benchmark_class(
+ self.get_benchmark_name(benchmark['name']),
+ benchmark['params']))
+
+ # for template_file_name in self.template_files:
+ # experiment_name = BenchmarkingUnit.extract_experiment_name(
+ # template_file_name)
+ # self.data_manager.create_new_experiment(experiment_name)
+ # for benchmark in self.benchmarks:
+ # self.data_manager.add_benchmark(experiment_name,
+ # benchmark.get_name())
+
+ def finalize(self):
+ """
+ Finalizes the Benchmarking Unit
+ Destroys all the stacks deployed by the framework and save results on
+ csv file.
+
+ :return: None
+ """
+ # self.data_manager.generate_result_csv_file()
+ common.DEPLOYMENT_UNIT.destroy_all_deployed_stacks()
+
+ def run_benchmarks(self):
+ """
+ Runs all the requested benchmarks and collect the results.
+
+ :return: None
+ """
+ common.LOG.info('Run Benchmarking Unit')
+
+ experiment = dict()
+ result = dict()
+ for iteration in range(0, self.iterations):
+ common.LOG.info('Iteration ' + str(iteration))
+ for template_file_name in self.template_files:
+ experiment_name = BenchmarkingUnit.\
+ extract_experiment_name(template_file_name)
+ experiment['experiment_name'] = experiment_name
+ configuration = self.\
+ get_experiment_configuration(template_file_name)
+ # self.data_manager.add_configuration(experiment_name,
+ # configuration)
+ for key in configuration.keys():
+ experiment[key] = configuration[key]
+ # metadata = dict()
+ # metadata['experiment_name'] = experiment_name
+ # self.data_manager.add_metadata(experiment_name, metadata)
+
+ # For each benchmark in the cycle the workload is deployed
+ for benchmark in self.benchmarks:
+ log_msg = 'Benchmark {} started on {}'.format(
+ benchmark.get_name(), template_file_name
+ )
+ common.LOG.info(log_msg)
+
+ # Initialization of Benchmark
+ benchmark.init()
+ log_msg = 'Template {} deployment START'.\
+ format(experiment_name)
+ common.LOG.info(log_msg)
+
+ # Deployment of the workload
+ deployment_success = \
+ common.DEPLOYMENT_UNIT.deploy_heat_template(
+ self.template_dir + template_file_name,
+ experiment_name,
+ self.heat_template_parameters)
+
+ if deployment_success:
+ log_msg = 'Template {} deployment COMPLETED'.format(
+ experiment_name)
+ common.LOG.info(log_msg)
+ else:
+ log_msg = 'Template {} deployment FAILED'.format(
+ experiment_name)
+ common.LOG.info(log_msg)
+ continue
+
+ # Running the Benchmark/test case
+ result = benchmark.run()
+ # self.data_manager.add_data_points(experiment_name,
+ # benchmark.get_name(),
+ # result)
+
+ # Terminate the workload
+ log_msg = 'Destroying deployment for experiment {}'.\
+ format(experiment_name)
+ common.LOG.info(log_msg)
+ common.DEPLOYMENT_UNIT.destroy_heat_template(
+ experiment_name)
+
+ # Finalize the benchmark
+ benchmark.finalize()
+ log_msg = 'Benchmark {} terminated'.format(
+ benchmark.__class__.__name__)
+ common.LOG.info(log_msg)
+ # self.data_manager.generate_result_csv_file()
+
+ experiment['benchmark'] = benchmark.get_name()
+ for key in benchmark.get_params():
+ experiment[key] = benchmark.get_params()[key]
+ common.LOG.info('Benchmark Finished')
+ # self.data_manager.generate_result_csv_file()
+ common.LOG.info('Benchmarking Unit: Experiments completed!')
+ return result
+
+ def get_experiment_configuration(self, template_file_name):
+ """
+ Reads and returns the configuration for the specific experiment
+ (heat template)
+
+ :param template_file_name: (str) Name of the file for the heat
+ template for which it is requested the configuration
+
+ :return: dict() Configuration parameters and values
+ """
+ file_name = "{}{}.json".format(self.template_dir, template_file_name)
+ with open(file_name) as json_file:
+ configuration = json.load(json_file)
+ return configuration
+
+ def get_benchmark_name(self, name, instance=0):
+ """
+ Returns the name to be used for the benchmark/test case (TC).
+ This is required since each benchmark/TC could be run more than once
+ within the same cycle, with different initialization parameters.
+ In order to distinguish between them, a unique name is generated.
+
+ :param name: (str) original name of the benchmark/TC
+
+ :param instance: (int) number of instance already in the queue for
+ this type of benchmark/TC.
+
+ :return: (str) name to be assigned to the benchmark/TC
+ """
+ if name + "_" + str(instance) in self.benchmark_names:
+ instance += 1
+ return self.get_benchmark_name(name, instance)
+ self.benchmark_names.append(name + "_" + str(instance))
+ return name + "_" + str(instance)
+
+ @staticmethod
+ def extract_experiment_name(template_file_name):
+ """
+ Generates a unique experiment name for a given template.
+
+ :param template_file_name: (str) File name of the template used
+ during the experiment string
+
+ :return: (str) Experiment Name
+ """
+ strings = template_file_name.split('.')
+ return ".".join(strings[:(len(strings)-1)])
+
+ @staticmethod
+ def get_benchmark_class(complete_module_name):
+ """
+ Returns the classes included in a given module.
+
+ :param complete_module_name: (str) Complete name of the module as
+ returned by get_available_test_cases.
+
+ :return: Class related to the benchmark/TC present in the requested
+ module.
+ """
+ strings = complete_module_name.split('.')
+ class_name = 'experimental_framework.benchmarks.{}'.format(strings[0])
+ pkg = __import__(class_name, globals(), locals(), [], -1)
+ module = getattr(getattr(pkg, 'benchmarks'), strings[0])
+ members = inspect.getmembers(module)
+ for m in members:
+ if inspect.isclass(m[1]):
+ class_name = m[1]("", dict()).__class__.__name__
+ if isinstance(m[1]("", dict()), base.BenchmarkBaseClass) and \
+ not class_name == 'BenchmarkBaseClass':
+ return m[1]
+
+ @staticmethod
+ def get_required_benchmarks(required_benchmarks):
+ """
+ Returns instances of required test cases.
+
+ :param required_benchmarks: (list() of strings) Benchmarks to be
+ executed by the experimental framework.
+
+ :return: list() of BenchmarkBaseClass
+ """
+ benchmarks = list()
+ for b in required_benchmarks:
+ class_ = BenchmarkingUnit.get_benchmark_class(b)
+ instance = class_("", dict())
+ benchmarks.append(instance)
+ return benchmarks
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/__init__.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/__init__.py
new file mode 100644
index 000000000..99635a45a
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Benchmarks to be executed within the framework
+"""
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/benchmark_base_class.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/benchmark_base_class.py
index 756962714..41235635c 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/benchmark_base_class.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/benchmark_base_class.py
@@ -44,6 +44,9 @@ class BenchmarkBaseClass(object):
def get_name(self):
return self.name
+ def get_params(self):
+ return self.params
+
def get_features(self):
features = dict()
features['description'] = 'Please implement the method ' \
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_benchmark.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_benchmark.py
index 29a87f2ee..049912305 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_benchmark.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_benchmark.py
@@ -36,8 +36,9 @@ class InstantiationValidationBenchmark(base.BenchmarkBaseClass):
def __init__(self, name, params):
base.BenchmarkBaseClass.__init__(self, name, params)
- self.base_dir = common.get_base_dir() + \
- fp.EXPERIMENTAL_FRAMEWORK_DIR + fp.DPDK_PKTGEN_DIR
+ self.base_dir = "{}{}{}".format(
+ common.get_base_dir(), fp.EXPERIMENTAL_FRAMEWORK_DIR,
+ fp.DPDK_PKTGEN_DIR)
self.results_file = self.base_dir + PACKETS_FILE_NAME
self.lua_file = self.base_dir + 'constant_traffic.lua'
self.res_dir = ''
@@ -143,7 +144,7 @@ class InstantiationValidationBenchmark(base.BenchmarkBaseClass):
if self.res_dir:
packet_checker_res = \
int(common.get_file_first_line(self.res_dir +
- '/packet_checker.res'))
+ 'packet_checker.res'))
pkt_gen_res = int(common.get_file_first_line(self.results_file))
if pkt_gen_res <= packet_checker_res or \
(float(pkt_gen_res - packet_checker_res) / pkt_gen_res) <= 0.1:
@@ -158,7 +159,7 @@ class InstantiationValidationBenchmark(base.BenchmarkBaseClass):
:return:
"""
# Kill any other process running from previous failed execution
- self.res_dir = os.getcwd()
+ self.res_dir = common.get_result_dir()
pids = self._get_pids()
for pid in pids:
os.kill(pid, signal.SIGTERM)
@@ -192,6 +193,9 @@ class InstantiationValidationBenchmark(base.BenchmarkBaseClass):
common.run_command(command)
# Start the packet checker
+ # TODO: Compile "make" the packet sniffer
+ command = "chmod +x {}".format(self.pkt_checker_command)
+ common.run_command(command)
command = self.pkt_checker_command
command += self.interface_name + '.' + self.params[VLAN_RECEIVER]
command += ' 128'
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py
index 4e3b640d8..9610bc165 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py
@@ -45,11 +45,11 @@ class InstantiationValidationNoisyNeighborsBenchmark(
features['allowed_values'][NUMBER_OF_CORES] = \
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
features['allowed_values'][AMOUNT_OF_RAM] = \
- ['250M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
+ ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
'10G']
features['default_values'][NUM_OF_NEIGHBORS] = '1'
features['default_values'][NUMBER_OF_CORES] = '1'
- features['default_values'][AMOUNT_OF_RAM] = '250M'
+ features['default_values'][AMOUNT_OF_RAM] = '256M'
return features
def init(self):
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/multi_tenancy_throughput_benchmark.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/multi_tenancy_throughput_benchmark.py
index ba1e0cc81..3182837c5 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/multi_tenancy_throughput_benchmark.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/multi_tenancy_throughput_benchmark.py
@@ -40,11 +40,11 @@ class MultiTenancyThroughputBenchmark(base.RFC2544ThroughputBenchmark):
features['allowed_values']['number_of_cores'] = \
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
features['allowed_values']['amount_of_ram'] = \
- ['250M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
+ ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
'10G']
features['default_values']['num_of_neighbours'] = '1'
features['default_values']['number_of_cores'] = '1'
- features['default_values']['amount_of_ram'] = '250M'
+ features['default_values']['amount_of_ram'] = '256M'
return features
def init(self):
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py
index 2ac3ea9c4..e026fa377 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py
@@ -1,3 +1,4 @@
+
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,9 +75,7 @@ class RFC2544ThroughputBenchmark(benchmark_base_class.BenchmarkBaseClass):
to measure the throughput of the workload
:return: Results of the testcase (type: dict)
"""
- ret_val = dict()
packet_size = self._extract_packet_size_from_params()
- ret_val[PACKET_SIZE] = packet_size
# Packetgen management
packetgen = dpdk.DpdkPacketGenerator()
@@ -93,12 +92,7 @@ class RFC2544ThroughputBenchmark(benchmark_base_class.BenchmarkBaseClass):
packetgen.send_traffic()
common.LOG.debug('Stop the packet generator')
- # Result Collection
- results = self._get_results()
- for metric_name in results.keys():
- ret_val[metric_name] = results[metric_name]
- self._reset_lua_file()
- return ret_val
+ return self._get_results()
def _extract_packet_size_from_params(self):
"""
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarks/test_benchmark.py b/yardstick/vTC/apexlake/experimental_framework/benchmarks/test_benchmark.py
index d530168da..cbb930d21 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarks/test_benchmark.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarks/test_benchmark.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import time
from experimental_framework.benchmarks import benchmark_base_class as base
@@ -33,4 +34,5 @@ class TestBenchmark(base.BenchmarkBaseClass):
return features
def run(self):
+ time.sleep(10)
return dict()
diff --git a/yardstick/vTC/apexlake/experimental_framework/common.py b/yardstick/vTC/apexlake/experimental_framework/common.py
new file mode 100644
index 000000000..afe70241a
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/common.py
@@ -0,0 +1,600 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import ConfigParser
+import logging
+import fileinput
+from experimental_framework.constants import conf_file_sections as cf
+from experimental_framework.constants import framework_parameters as fp
+
+
+# ------------------------------------------------------
+# List of common variables
+# ------------------------------------------------------
+
+LOG = None
+CONF_FILE = None
+DEPLOYMENT_UNIT = None
+ITERATIONS = None
+
+BASE_DIR = None
+RESULT_DIR = None
+TEMPLATE_DIR = None
+TEMPLATE_NAME = None
+TEMPLATE_FILE_EXTENSION = None
+
+PKTGEN = None
+PKTGEN_DIR = None
+PKTGEN_DPDK_DIRECTORY = None
+PKTGEN_PROGRAM = None
+PKTGEN_COREMASK = None
+PKTGEN_MEMCHANNEL = None
+PKTGEN_BUS_SLOT_NIC_1 = None
+PKTGEN_BUS_SLOT_NIC_2 = None
+PKTGEN_NAME_NIC_1 = None
+PKTGEN_NAME_NIC_2 = None
+
+# TODO: remove Influx
+INFLUXDB_IP = None
+INFLUXDB_PORT = None
+INFLUXDB_DB_NAME = None
+
+
+# ------------------------------------------------------
+# Initialization and Input 'heat_templates/'validation
+# ------------------------------------------------------
+
+def init(api=False):
+ global BASE_DIR
+ # BASE_DIR = os.getcwd()
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ BASE_DIR = BASE_DIR.replace('/experimental_framework', '')
+ BASE_DIR = InputValidation.validate_directory_exist_and_format(
+ BASE_DIR, "Error 000001")
+
+ init_conf_file(api)
+ init_log()
+ init_general_vars(api)
+ if len(CONF_FILE.get_variable_list(cf.CFS_PKTGEN)) > 0:
+ init_pktgen()
+
+
+def init_conf_file(api=False):
+ global CONF_FILE
+ if api:
+ CONF_FILE = ConfigurationFile(cf.get_sections_api(),
+ '/tmp/apexlake/apexlake.conf')
+ else:
+ CONF_FILE = ConfigurationFile(cf.get_sections(),
+ '/tmp/apexlake/apexlake.conf')
+
+
+def init_general_vars(api=False):
+ global TEMPLATE_FILE_EXTENSION
+ global TEMPLATE_NAME
+ global TEMPLATE_DIR
+ global RESULT_DIR
+ global ITERATIONS
+
+ TEMPLATE_FILE_EXTENSION = '.yaml'
+
+ # Check Section in Configuration File
+ InputValidation.\
+ validate_configuration_file_section(
+ cf.CFS_GENERAL,
+ "Section " + cf.CFS_GENERAL +
+ "is not present in configuration file")
+
+ TEMPLATE_DIR = '/tmp/apexlake/heat_templates/'
+ # if not os.path.exists(TEMPLATE_DIR):
+ # os.makedirs(TEMPLATE_DIR)
+ # cmd = "cp /tmp/apexlake/heat_templates/*.yaml {}".format(TEMPLATE_DIR)
+ # run_command(cmd)
+
+ if not api:
+ # Validate template name
+ InputValidation.\
+ validate_configuration_file_parameter(
+ cf.CFS_GENERAL,
+ cf.CFSG_TEMPLATE_NAME,
+ "Parameter " + cf.CFSG_TEMPLATE_NAME +
+ "is not present in configuration file")
+ TEMPLATE_NAME = CONF_FILE.get_variable(cf.CFS_GENERAL,
+ cf.CFSG_TEMPLATE_NAME)
+ InputValidation.validate_file_exist(
+ TEMPLATE_DIR + TEMPLATE_NAME,
+ "The provided template file does not exist")
+
+ RESULT_DIR = "/tmp/apexlake/results/"
+
+ # Validate and assign Iterations
+ if cf.CFSG_ITERATIONS in CONF_FILE.get_variable_list(cf.CFS_GENERAL):
+ ITERATIONS = int(CONF_FILE.get_variable(cf.CFS_GENERAL,
+ cf.CFSG_ITERATIONS))
+ else:
+ ITERATIONS = 1
+
+
+def init_log():
+ global LOG
+ LOG = logging.getLogger()
+ LOG.setLevel(level=logging.INFO)
+ log_formatter = logging.Formatter("%(asctime)s --- %(message)s")
+ file_handler = logging.FileHandler("{0}/{1}.log".format("./", "benchmark"))
+ file_handler.setFormatter(log_formatter)
+ file_handler.setLevel(logging.DEBUG)
+ LOG.addHandler(file_handler)
+
+
+# ------------------------------------------------------
+# InfluxDB conf variables
+# ------------------------------------------------------
+def init_influxdb():
+ global INFLUXDB_IP
+ global INFLUXDB_PORT
+ global INFLUXDB_DB_NAME
+
+ INFLUXDB_IP = CONF_FILE.get_variable(cf.CFS_INFLUXDB, cf.CFSI_IDB_IP)
+ INFLUXDB_PORT = CONF_FILE.get_variable(cf.CFS_INFLUXDB, cf.CFSI_IDB_PORT)
+ INFLUXDB_DB_NAME = CONF_FILE.get_variable(cf.CFS_INFLUXDB,
+ cf.CFSI_IDB_DB_NAME)
+
+
+# ------------------------------------------------------
+# Packet Generator conf variables
+# ------------------------------------------------------
+def init_pktgen():
+ global PKTGEN
+ global PKTGEN_DIR
+ global PKTGEN_PROGRAM
+ global PKTGEN_COREMASK
+ global PKTGEN_MEMCHANNEL
+ global PKTGEN_BUS_SLOT_NIC_1
+ global PKTGEN_BUS_SLOT_NIC_2
+ global PKTGEN_DPDK_DIRECTORY
+ global PKTGEN_NAME_NIC_1
+ global PKTGEN_NAME_NIC_2
+
+ msg = "Section {} is not present in the configuration file".\
+ format(cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_section(cf.CFS_PKTGEN, msg)
+
+ pktgen_var_list = CONF_FILE.get_variable_list(cf.CFS_PKTGEN)
+ PKTGEN = 'dpdk_pktgen' # default value
+ if cf.CFSP_PACKET_GENERATOR in pktgen_var_list:
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_PACKET_GENERATOR, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_PACKET_GENERATOR, msg)
+ PKTGEN = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_PACKET_GENERATOR)
+
+ if PKTGEN not in fp.get_supported_packet_generators():
+ raise ValueError('The specified packet generator is not supported '
+ 'by the framework')
+
+ # Check if the packet gen is dpdk_pktgen
+ if PKTGEN == cf.CFSP_PG_DPDK:
+ # Validation of DPDK pktgen directory
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_PKTGEN_DIRECTORY, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_PKTGEN_DIRECTORY, msg)
+ PKTGEN_DIR = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_PKTGEN_DIRECTORY)
+ msg = "The directory {} does not exist.".format(PKTGEN_DIR)
+ PKTGEN_DIR = InputValidation.validate_directory_exist_and_format(
+ PKTGEN_DIR, msg)
+
+ # Validation of the DPDK program name
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_PROGRAM_NAME, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_PROGRAM_NAME, msg)
+ PKTGEN_PROGRAM = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_PROGRAM_NAME)
+
+ # Validation of the DPDK Coremask parameter
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_COREMASK, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_COREMASK, msg)
+ PKTGEN_COREMASK = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_COREMASK)
+
+ # Validation of the DPDK Memory Channel parameter
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_MEMORY_CHANNEL, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_MEMORY_CHANNEL, msg)
+ PKTGEN_MEMCHANNEL = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_MEMORY_CHANNEL)
+
+ # Validation of the DPDK Bus Slot 1
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_BUS_SLOT_NIC_1, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_1, msg)
+ PKTGEN_BUS_SLOT_NIC_1 = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_1)
+
+ # Validation of the DPDK Bus Slot 2
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_BUS_SLOT_NIC_2, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_2, msg)
+ PKTGEN_BUS_SLOT_NIC_2 = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_2)
+
+ # Validation of the DPDK NIC 1
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_NAME_IF_1, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_1, msg)
+ PKTGEN_NAME_NIC_1 = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_1)
+
+ # Validation of the DPDK NIC 2
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_NAME_IF_2, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_2, msg)
+ PKTGEN_NAME_NIC_2 = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_2)
+
+ # Validation of DPDK directory parameter
+ msg = "Parameter {} is not present in section {}".format(
+ cf.CFSP_DPDK_DPDK_DIRECTORY, cf.CFS_PKTGEN)
+ InputValidation.validate_configuration_file_parameter(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_DPDK_DIRECTORY, msg)
+ PKTGEN_DPDK_DIRECTORY = CONF_FILE.get_variable(
+ cf.CFS_PKTGEN, cf.CFSP_DPDK_DPDK_DIRECTORY)
+ msg = "Directory {} does not exist".format(
+ cf.CFSP_DPDK_DPDK_DIRECTORY)
+ PKTGEN_DPDK_DIRECTORY = InputValidation.\
+ validate_directory_exist_and_format(PKTGEN_DPDK_DIRECTORY, msg)
+
+
+# ------------------------------------------------------
+# Configuration file access
+# ------------------------------------------------------
+
+class ConfigurationFile:
+ """
+ Used to extract data from the configuration file
+ """
+
+ def __init__(self, sections, config_file='conf.cfg'):
+ """
+ Reads configuration file sections
+
+ :param sections: list of strings representing the sections to be
+ loaded
+ :param config_file: name of the configuration file (string)
+ :return: None
+ """
+ InputValidation.validate_string(
+ config_file, "The configuration file name must be a string")
+ # config_file = BASE_DIR + config_file
+ InputValidation.validate_file_exist(
+ config_file, 'The provided configuration file does not exist')
+ self.config = ConfigParser.ConfigParser()
+ self.config.read(config_file)
+ for section in sections:
+ setattr(
+ self, section, ConfigurationFile.
+ _config_section_map(section, self.config))
+
+ @staticmethod
+ def _config_section_map(section, config_file):
+ """
+ Returns a dictionary with the configuration values for the specific
+ section
+
+ :param section: section to be loaded (string)
+ :param config_file: name of the configuration file (string)
+ :return: dict
+ """
+ dict1 = dict()
+ options = config_file.options(section)
+ for option in options:
+ dict1[option] = config_file.get(section, option)
+ return dict1
+
+ def get_variable(self, section, variable_name):
+ """
+ Returns the value correspondent to a variable
+
+ :param section: section to be loaded (string)
+ :param variable_name: name of the variable (string)
+ :return: string
+ """
+ message = "The variable name must be a string"
+ InputValidation.validate_string(variable_name, message)
+ if variable_name in self.get_variable_list(section):
+ sect = getattr(self, section)
+ return sect[variable_name]
+ else:
+ exc_msg = 'Parameter {} is not in the {} section of the ' \
+ 'conf file'.format(variable_name, section)
+ raise ValueError(exc_msg)
+
+ def get_variable_list(self, section):
+ """
+ Returns the list of the available variables in a section
+ :param section: section to be loaded (string)
+ :return: list
+ """
+ try:
+ return getattr(self, section)
+ except:
+ msg = 'Section {} not found in the configuration file'.\
+ format(section)
+ raise ValueError(msg)
+
+
+# ------------------------------------------------------
+# Get OpenStack Credentials
+# ------------------------------------------------------
+def get_credentials():
+ """
+ Returns the credentials for OpenStack access from the configuration file
+ :return: dictionary
+ """
+ credentials = dict()
+ credentials[cf.CFSO_IP_CONTROLLER] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_IP_CONTROLLER)
+ credentials[cf.CFSO_HEAT_URL] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_HEAT_URL)
+ credentials[cf.CFSO_USER] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_USER)
+ credentials[cf.CFSO_PASSWORD] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_PASSWORD)
+ credentials[cf.CFSO_AUTH_URI] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_AUTH_URI)
+ credentials[cf.CFSO_PROJECT] = CONF_FILE.get_variable(
+ cf.CFS_OPENSTACK, cf.CFSO_PROJECT)
+ return credentials
+
+
+# ------------------------------------------------------
+# Manage files
+# ------------------------------------------------------
+
+def get_heat_template_params():
+ """
+ Returns the list of deployment parameters from the configuration file
+ for the heat template
+
+ :return: dict
+ """
+ heat_parameters_list = CONF_FILE.get_variable_list(
+ cf.CFS_DEPLOYMENT_PARAMETERS)
+ testcase_parameters = dict()
+ for param in heat_parameters_list:
+ testcase_parameters[param] = CONF_FILE.get_variable(
+ cf.CFS_DEPLOYMENT_PARAMETERS, param)
+ return testcase_parameters
+
+
+def get_testcase_params():
+ """
+ Returns the list of testcase parameters from the configuration file
+
+ :return: dict
+ """
+ testcase_parameters = dict()
+ parameters = CONF_FILE.get_variable_list(cf.CFS_TESTCASE_PARAMETERS)
+ for param in parameters:
+ testcase_parameters[param] = CONF_FILE.get_variable(
+ cf.CFS_TESTCASE_PARAMETERS, param)
+ return testcase_parameters
+
+
+def get_file_first_line(file_name):
+ """
+ Returns the first line of a file
+
+ :param file_name: name of the file to be read (str)
+ :return: str
+ """
+ message = "The name of the file must be a string"
+ InputValidation.validate_string(file_name, message)
+ message = 'The file {} does not exist'.format(file_name)
+ InputValidation.validate_file_exist(file_name, message)
+ res = open(file_name, 'r')
+ return res.readline()
+
+
+def replace_in_file(file, text_to_search, text_to_replace):
+ """
+ Replaces a string within a file
+
+ :param file: name of the file (str)
+ :param text_to_search: text to be replaced
+ :param text_to_replace: new text that will replace the previous
+ :return: None
+ """
+ message = 'The text to be replaced in the file must be a string'
+ InputValidation.validate_string(text_to_search, message)
+ message = 'The text to replace in the file must be a string'
+ InputValidation.validate_string(text_to_replace, message)
+ message = "The name of the file must be a string"
+ InputValidation.validate_string(file, message)
+ message = "The file does not exist"
+ InputValidation.validate_file_exist(file, message)
+ for line in fileinput.input(file, inplace=True):
+ print(line.replace(text_to_search, text_to_replace).rstrip())
+
+
+# ------------------------------------------------------
+# Shell interaction
+# ------------------------------------------------------
+def run_command(command):
+ LOG.info("Running command: {}".format(command))
+ return os.system(command)
+
+
+def push_data_influxdb(data):
+ ip = INFLUXDB_IP
+ port = INFLUXDB_PORT
+ db_name = INFLUXDB_DB_NAME
+ command = "curl -i -XPOST 'http://{}:{}/write?db={}' " \
+ "--data-binary {}".format(ip, port, db_name, data)
+ run_command(command)
+
+
+# ------------------------------------------------------
+# Expose variables to other modules
+# ------------------------------------------------------
+
+def get_base_dir():
+ return BASE_DIR
+
+
+def get_template_dir():
+ return TEMPLATE_DIR
+
+
+def get_result_dir():
+ return RESULT_DIR
+
+
+def get_dpdk_pktgen_vars():
+ if not (PKTGEN == 'dpdk_pktgen'):
+ return dict()
+ ret_val = dict()
+ ret_val[cf.CFSP_DPDK_PKTGEN_DIRECTORY] = PKTGEN_DIR
+ ret_val[cf.CFSP_DPDK_DPDK_DIRECTORY] = PKTGEN_DPDK_DIRECTORY
+ ret_val[cf.CFSP_DPDK_PROGRAM_NAME] = PKTGEN_PROGRAM
+ ret_val[cf.CFSP_DPDK_COREMASK] = PKTGEN_COREMASK
+ ret_val[cf.CFSP_DPDK_MEMORY_CHANNEL] = PKTGEN_MEMCHANNEL
+ ret_val[cf.CFSP_DPDK_BUS_SLOT_NIC_1] = PKTGEN_BUS_SLOT_NIC_1
+ ret_val[cf.CFSP_DPDK_BUS_SLOT_NIC_2] = PKTGEN_BUS_SLOT_NIC_2
+ ret_val[cf.CFSP_DPDK_NAME_IF_1] = PKTGEN_NAME_NIC_1
+ ret_val[cf.CFSP_DPDK_NAME_IF_2] = PKTGEN_NAME_NIC_2
+ return ret_val
+
+
+# ------------------------------------------------------
+# Configuration Variables from Config File
+# ------------------------------------------------------
+def get_deployment_configuration_variables_from_conf_file():
+ variables = dict()
+ types = dict()
+ all_variables = CONF_FILE.get_variable_list(cf.CFS_EXPERIMENT_VNF)
+ for var in all_variables:
+ v = CONF_FILE.get_variable(cf.CFS_EXPERIMENT_VNF, var)
+ type = re.findall(r'@\w*', v)
+ values = re.findall(r'\"(.+?)\"', v)
+ variables[var] = values
+ try:
+ types[var] = type[0][1:]
+ except IndexError:
+ LOG.debug("No type has been specified for variable " + var)
+ return variables
+
+
+# ------------------------------------------------------
+# benchmarks from Config File
+# ------------------------------------------------------
+def get_benchmarks_from_conf_file():
+ requested_benchmarks = list()
+ benchmarks = \
+ CONF_FILE.get_variable(cf.CFS_GENERAL, cf.CFSG_BENCHMARKS).split(', ')
+ for benchmark in benchmarks:
+ requested_benchmarks.append(benchmark)
+ return requested_benchmarks
+
+
+class InputValidation(object):
+
+ @staticmethod
+ def validate_string(param, message):
+ if not isinstance(param, str):
+ raise ValueError(message)
+ return True
+
+ @staticmethod
+ def validate_integer(param, message):
+ if not isinstance(param, int):
+ raise ValueError(message)
+ return True
+
+ @staticmethod
+ def validate_dictionary(param, message):
+ if not isinstance(param, dict):
+ raise ValueError(message)
+ return True
+
+ @staticmethod
+ def validate_file_exist(file_name, message):
+ if not os.path.isfile(file_name):
+ raise ValueError(message + ' ' + file_name)
+ return True
+
+ @staticmethod
+ def validate_directory_exist_and_format(directory, message):
+ if not os.path.isdir(directory):
+ raise ValueError(message)
+ if not directory.endswith('/'):
+ return directory + '/'
+ return directory
+
+ @staticmethod
+ def validate_configuration_file_parameter(section, parameter, message):
+ params = CONF_FILE.get_variable_list(section)
+ if parameter not in params:
+ raise ValueError(message)
+ return True
+
+ @staticmethod
+ def validate_configuration_file_section(section, message):
+ if section not in cf.get_sections():
+ raise ValueError(message)
+ return True
+
+ @staticmethod
+ def validate_boolean(boolean, message):
+ if isinstance(boolean, bool):
+ return boolean
+ if isinstance(boolean, str):
+ if boolean == 'True':
+ return True
+ if boolean == 'False':
+ return False
+ raise ValueError(message)
+
+ @staticmethod
+ def validate_os_credentials(credentials):
+ if not isinstance(credentials, dict):
+ raise ValueError(
+ 'The provided openstack_credentials '
+ 'variable must be in dictionary format')
+
+ credential_keys = ['ip_controller', 'heat_url', 'user', 'password',
+ 'auth_uri', 'project']
+ missing = [
+ credential_key
+ for credential_key in credential_keys
+ if credential_key not in credentials.keys()
+ ]
+ if len(missing) == 0:
+ return True
+ msg = 'OpenStack Credentials Error! ' \
+ 'The following parameters are missing: {}'.\
+ format(", ".join(missing))
+ raise ValueError(msg)
diff --git a/yardstick/vTC/apexlake/experimental_framework/constants/conf_file_sections.py b/yardstick/vTC/apexlake/experimental_framework/constants/conf_file_sections.py
index eed00bce0..f397984e9 100644
--- a/yardstick/vTC/apexlake/experimental_framework/constants/conf_file_sections.py
+++ b/yardstick/vTC/apexlake/experimental_framework/constants/conf_file_sections.py
@@ -23,6 +23,7 @@ CFS_EXPERIMENT_VNF = 'Experiment-VNF'
CFS_EXPERIMENT_GENERIC = 'Experiment-generic'
CFS_TESTCASE_PARAMETERS = 'Testcase-parameters'
CFS_DEPLOYMENT_PARAMETERS = 'Deployment-parameters'
+CFS_INFLUXDB = 'InfluxDB'
def get_sections():
@@ -31,9 +32,10 @@ def get_sections():
CFS_GENERAL,
CFS_OPENSTACK,
CFS_EXPERIMENT_VNF,
- CFS_EXPERIMENT_GENERIC,
+ # CFS_EXPERIMENT_GENERIC,
CFS_TESTCASE_PARAMETERS,
- CFS_DEPLOYMENT_PARAMETERS
+ CFS_DEPLOYMENT_PARAMETERS,
+ CFS_INFLUXDB
# Add here eventually new sections in configuration file ...
]
@@ -42,8 +44,7 @@ def get_sections_api():
return [
CFS_PKTGEN,
CFS_GENERAL,
- # TODO: TO BE REMOVED AFTER TESTING THE API
- CFS_OPENSTACK
+ CFS_INFLUXDB
# Add here eventually new sections in configuration file ...
]
@@ -55,17 +56,30 @@ CFSG_TEMPLATE_DIR = 'template_dir'
CFSG_TEMPLATE_NAME = 'template_base_name'
CFSG_RESULT_DIRECTORY = 'results_directory'
CFSG_BENCHMARKS = 'benchmarks'
+CFSG_DEBUG = 'debug'
+
+
+# ------------------------------------------------------
+# InfluxDB
+# ------------------------------------------------------
+CFSI_IDB_IP = 'influxdb_ip_address'
+CFSI_IDB_PORT = 'influxdb_port'
+CFSI_IDB_DB_NAME = 'influxdb_db_name'
# ------------------------------------------------------
# Packet generator section parameters
# ------------------------------------------------------
CFSP_PACKET_GENERATOR = 'packet_generator'
-CFSP_DPDK_DIRECTORY = 'directory'
+CFSP_DPDK_PKTGEN_DIRECTORY = 'pktgen_directory'
+CFSP_DPDK_DPDK_DIRECTORY = 'dpdk_directory'
CFSP_DPDK_PROGRAM_NAME = 'program_name'
CFSP_DPDK_COREMASK = 'coremask'
CFSP_DPDK_MEMORY_CHANNEL = 'memory_channels'
-CFSP_DPDK_CORE_NICS = 'core_nics'
+CFSP_DPDK_BUS_SLOT_NIC_1 = 'bus_slot_nic_1'
+CFSP_DPDK_BUS_SLOT_NIC_2 = 'bus_slot_nic_2'
+CFSP_DPDK_NAME_IF_1 = 'name_if_1'
+CFSP_DPDK_NAME_IF_2 = 'name_if_2'
# ------------------------------------------------------
diff --git a/yardstick/vTC/apexlake/experimental_framework/deployment_unit.py b/yardstick/vTC/apexlake/experimental_framework/deployment_unit.py
new file mode 100644
index 000000000..186258f7d
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/deployment_unit.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+
+from experimental_framework import heat_manager
+from experimental_framework import common
+
+MAX_RETRY = 3
+
+
+class DeploymentUnit:
+ """
+ This unit is in charge to manage the deployment of the workloads under
+ test and any other workloads necessary to
+ the benchmark
+ """
+
+ def __init__(self, openstack_credentials):
+ self.heat_manager = heat_manager.HeatManager(openstack_credentials)
+ self.deployed_stacks = list()
+
+ def destroy_heat_template(self, stack_name):
+ """
+ Destroys a stack
+ :param stack_name: Stack of the name to be destroyed (sting)
+ :return: None
+ """
+ try:
+ if self.heat_manager.check_stack_status(stack_name):
+ if stack_name in self.deployed_stacks:
+ self.deployed_stacks.remove(stack_name)
+ self.heat_manager.delete_stack(stack_name)
+
+ status = self.heat_manager.check_stack_status(stack_name)
+ while status and 'DELETE_IN_PROGRESS' in status:
+ common.LOG.info(status)
+ time.sleep(5)
+ status = self.heat_manager.check_stack_status(stack_name)
+ return True
+ except:
+ return False
+
+ def destroy_all_deployed_stacks(self):
+ """
+ Destroys all the stacks currently deployed
+ :return: None
+ """
+ for stack in self.deployed_stacks:
+ if self.heat_manager.is_stack_deployed(stack):
+ self.destroy_heat_template(stack)
+
+ def deploy_heat_template(self, template_file, stack_name, parameters,
+ attempt=0):
+ """
+ Deploys a heat template and in case of failure retries 3 times
+ :param template_file: full path file name of the heat template
+ :param stack_name: name of the stack to deploy
+ :param parameters: parameters to be given to the heat template
+ :param attempt: number of current attempt
+ :return: returns True in case the creation is completed
+ returns False in case the creation is failed
+ """
+ if not os.path.isfile(template_file):
+ raise ValueError('The specified file does not exist ("' +
+ template_file + '")')
+ self.heat_manager.validate_heat_template(template_file)
+ try:
+ self.heat_manager.create_stack(template_file, stack_name,
+ parameters)
+ deployed = True
+ except:
+ deployed = False
+
+ if not deployed and 'COMPLETE' in \
+ self.heat_manager.check_stack_status(stack_name):
+ try:
+ self.destroy_heat_template(stack_name)
+ except:
+ pass
+
+ status = self.heat_manager.check_stack_status(stack_name)
+ while status and 'CREATE_IN_PROGRESS' in status:
+ time.sleep(5)
+ status = self.heat_manager.check_stack_status(stack_name)
+ if status and ('FAILED' in status or 'NOT_FOUND' in status):
+ if attempt < MAX_RETRY:
+ attempt += 1
+ try:
+ self.destroy_heat_template(stack_name)
+ except Exception as e:
+ common.LOG.debug(e.message)
+ pass
+ return self.deploy_heat_template(template_file, stack_name,
+ parameters, attempt)
+ else:
+ try:
+ self.destroy_heat_template(stack_name)
+ except Exception as e:
+ common.LOG.debug(e.message)
+ finally:
+ return False
+ if self.heat_manager.check_stack_status(stack_name) and \
+ 'COMPLETE' in self.heat_manager.\
+ check_stack_status(stack_name):
+ self.deployed_stacks.append(stack_name)
+ return True
diff --git a/yardstick/vTC/apexlake/experimental_framework/heat_manager.py b/yardstick/vTC/apexlake/experimental_framework/heat_manager.py
index 41fc585f7..607fa77f3 100644
--- a/yardstick/vTC/apexlake/experimental_framework/heat_manager.py
+++ b/yardstick/vTC/apexlake/experimental_framework/heat_manager.py
@@ -19,7 +19,7 @@ from keystoneclient.v2_0 import client as keystoneClient
from heatclient import client as heatClient
from heatclient.common import template_utils
-from experimental_framework import common
+import experimental_framework.common as common
class HeatManager:
@@ -33,15 +33,14 @@ class HeatManager:
self.project_id = credentials['project']
self.heat = None
- # TODO: verify that init_heat is useless in the constructor
- # self.init_heat()
-
def init_heat(self):
keystone = keystoneClient.Client(username=self.user,
password=self.password,
tenant_name=self.project_id,
auth_url=self.auth_uri)
auth_token = keystone.auth_token
+ self.heat_url = keystone.service_catalog.url_for(
+ service_type='orchestration')
self.heat = heatClient.Client('1', endpoint=self.heat_url,
token=auth_token)
diff --git a/yardstick/vTC/apexlake/experimental_framework/heat_template_generation.py b/yardstick/vTC/apexlake/experimental_framework/heat_template_generation.py
index 15c4eff36..e0c1a667f 100644
--- a/yardstick/vTC/apexlake/experimental_framework/heat_template_generation.py
+++ b/yardstick/vTC/apexlake/experimental_framework/heat_template_generation.py
@@ -14,8 +14,7 @@
'''
-This file contains the code to Generate the heat templates from the base
-template
+Generation of the heat templates from the base template
'''
import json
@@ -160,7 +159,8 @@ def generates_templates(base_heat_template, deployment_configuration):
# Delete the templates eventually generated in previous running of the
# framework
common.LOG.info("Removing the heat templates previously generated")
- os.system("rm " + template_dir + template_name + "_*")
+ command = "rm {}{}_*".format(template_dir, template_name)
+ os.system(command)
# Creation of the tree with all the new configurations
common.LOG.info("Creation of the tree with all the new configurations")
@@ -188,10 +188,7 @@ def generates_templates(base_heat_template, deployment_configuration):
base_template = template_base_name
else:
base_template = template_dir + template_base_name
- if os.path.isabs(template_name):
- new_template = template_name
- else:
- new_template = template_dir + template_name
+ new_template = template_dir + template_name
new_template += "_" + str(counter) + template_file_extension
shutil.copy(base_template, new_template)
diff --git a/yardstick/vTC/apexlake/experimental_framework/libraries/packet_checker/test_sniff.c b/yardstick/vTC/apexlake/experimental_framework/libraries/packet_checker/test_sniff.c
index f85acfa11..a4eda3cff 100644
--- a/yardstick/vTC/apexlake/experimental_framework/libraries/packet_checker/test_sniff.c
+++ b/yardstick/vTC/apexlake/experimental_framework/libraries/packet_checker/test_sniff.c
@@ -135,7 +135,7 @@ int main(int argc,char **argv)
int write_file()
{
- FILE *f = fopen("packet_checker.res", "w");
+ FILE *f = fopen("/tmp/apexlake/results/packet_checker.res", "w");
if (f == NULL)
{
printf("Error opening file!\n");
diff --git a/yardstick/vTC/apexlake/experimental_framework/packet_generators/dpdk_packet_generator.py b/yardstick/vTC/apexlake/experimental_framework/packet_generators/dpdk_packet_generator.py
index ae54502e9..347d51af3 100644
--- a/yardstick/vTC/apexlake/experimental_framework/packet_generators/dpdk_packet_generator.py
+++ b/yardstick/vTC/apexlake/experimental_framework/packet_generators/dpdk_packet_generator.py
@@ -15,6 +15,7 @@
import os
import base_packet_generator
import experimental_framework.common as common
+import time
from experimental_framework.constants import conf_file_sections as conf_file
from experimental_framework.constants import framework_parameters as fp
@@ -141,6 +142,8 @@ class DpdkPacketGenerator(base_packet_generator.BasePacketGenerator):
def _change_vlan(pcap_directory, pcap_file, vlan):
common.LOG.info("Changing VLAN Tag on Packet: " + pcap_file +
". New VLAN Tag is " + vlan)
+ command = "chmod +x {}{}".format(pcap_directory, 'vlan_tag.sh')
+ common.run_command(command)
command = pcap_directory + 'vlan_tag.sh '
command += pcap_directory + pcap_file + ' ' + vlan
common.run_command(command)
@@ -244,6 +247,7 @@ class DpdkPacketGenerator(base_packet_generator.BasePacketGenerator):
common.run_command(dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] +
'tools/dpdk_nic_bind.py --unbind ' +
dpdk_vars[conf_file.CFSP_DPDK_BUS_SLOT_NIC_1])
+ time.sleep(5)
common.run_command(dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] +
'tools/dpdk_nic_bind.py --bind=ixgbe ' +
dpdk_vars[conf_file.CFSP_DPDK_BUS_SLOT_NIC_1])
@@ -255,6 +259,7 @@ class DpdkPacketGenerator(base_packet_generator.BasePacketGenerator):
common.run_command(dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] +
'tools/dpdk_nic_bind.py --unbind ' +
dpdk_vars[conf_file.CFSP_DPDK_BUS_SLOT_NIC_2])
+ time.sleep(5)
common.run_command(dpdk_vars[conf_file.CFSP_DPDK_DPDK_DIRECTORY] +
'tools/dpdk_nic_bind.py --bind=ixgbe ' +
dpdk_vars[conf_file.CFSP_DPDK_BUS_SLOT_NIC_2])
diff --git a/yardstick/vTC/apexlake/heat_templates/stress_workload.yaml b/yardstick/vTC/apexlake/heat_templates/stress_workload.yaml
new file mode 100644
index 000000000..1a2f7dbb3
--- /dev/null
+++ b/yardstick/vTC/apexlake/heat_templates/stress_workload.yaml
@@ -0,0 +1,112 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ nic_type:
+ type: string
+ default: normal
+ name:
+ type: string
+ default: cpu_stress
+ cores:
+ type: string
+ memory:
+ type: string
+
+resources:
+ internal_net:
+ type: OS::Neutron::Net
+ properties:
+ name: traffic_network
+ internal_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: internal_net }
+ cidr: 10.100.0.0/24
+
+ router:
+ properties:
+ admin_state_up: true
+ name: router
+ type: OS::Neutron::Router
+
+ router_gateway:
+ properties:
+ network: external
+ router_id: { get_resource: router }
+ type: OS::Neutron::RouterGateway
+
+ router_interface_0:
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: internal_subnet }
+ type: OS::Neutron::RouterInterface
+
+ vm1_port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: internal_net }
+ binding:vnic_type: { get_param: nic_type }
+ fixed_ips:
+ - subnet: { get_resource: internal_subnet }
+
+ flavor_1:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 4096
+ vcpus: 4
+
+ vm1:
+ type: OS::Nova::Server
+ properties:
+ name: traffic_vm1
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ ifconfig eth1 up
+ dhclient eth1
+
+ sed -i 's/localhost/localhost traffic_vm1/g' /etc/hosts
+ touch /etc/resolfconf/resolv.conf.d/tail
+ echo 'nameserver 8.8.8.8' > /etc/resolvconf/resolv.conf.d/tail
+ resolvconf -u
+
+ # Installation of stress
+ apt-get install -y stress
+
+ cd /home/clouduser
+ # Setup merlin
+ rm -rf merlin
+ mkdir merlin
+ cd merlin
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ apt-get install -y zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role=cpu_stress
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # workload setup
+ nohup stress -c #CORES --vm-bytes #MEMORY
+
+ params:
+ $NAME: { get_param: name }
+ $CORES: { get_param: cores }
+ $MEMORY: { get_param: memory }
+
+ flavor: { get_resource: flavor_1 }
+ networks:
+ - port: { get_resource: vm1_port_1 }
diff --git a/yardstick/vTC/apexlake/heat_templates/vTC.yaml b/yardstick/vTC/apexlake/heat_templates/vTC.yaml
new file mode 100644
index 000000000..e0163e872
--- /dev/null
+++ b/yardstick/vTC/apexlake/heat_templates/vTC.yaml
@@ -0,0 +1,167 @@
+heat_template_version: 2014-10-16
+description: HOT template to deploy a vitual Traffic Classifier
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 2000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ ### DEFAULT NETWORK FOR MNGM
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ #key_name: { get_param: key_name }
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ # Creation of a user
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ # Install vTC Dependencies
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev bison flex byacc libjson0-dev libcurl4-gnutls-dev jq dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+ apt-get install -y byacc libtool libcurl4-openssl-dev
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ cd /home/clouduser
+
+ # Setup multicast
+ echo mgroup from eth1 group 224.192.16.1 > /etc/smcroute.conf
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ rm resp.json
+ curl -X POST -u "mPkgwvJPsTFS8hYmHk:SDczcrK4cvnkMRWSEchB3ANcWbqFXqPx" https://bitbucket.org/site/oauth2/access_token -d grant_type=refresh_token -d refresh_token=38uFQuhEdPvCTbhc7k >> resp.json
+ access_token=`jq -r '.access_token' resp.json`
+ git clone https://x-token-auth:${access_token}@bitbucket.org/akiskourtis/vtc.git
+ cd vtc
+ git checkout -b stable
+ #Build nDPI library
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+
+ #Build PF_RING library
+ cd ..
+ cd PF_RING
+ make
+ #Build PF_RING examples, including the modified pfbridge, with nDPI integrated.
+ cd userland/examples/
+ sed -i 's#EXTRA_LIBS =#EXTRA_LIBS='"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c#' ./Makefile
+ sed -i 's# -Ithird-party# -Ithird-party/ -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'#' ./Makefile
+ echo $NDPI_DIR
+ make
+ cd ../..
+ cd ..
+ cd ..
+
+ # To use PF_RING driver use the following
+ #sudo rmmod pf_ring
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=16384 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ sleep 5
+
+ # To use the Linux kernel driver use the following
+ cd /home/clouduser/
+ sudo nohup ./vtc/nDPI/example/ndpiReader -i eth1 -a eth2 &
+ sleep 5
+
+ curl --data-ascii "{\"classification_rules\":[{\"RuleId\":\"1\",\"ToS\":\"16\"}]}" http://localhost:9999/classifier/classification_rules/224.192.16.1 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ #$IP_FAMILY: { get_param: ip_family }
+ flavor: #vtc_flavor
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/setup.py b/yardstick/vTC/apexlake/setup.py
new file mode 100644
index 000000000..8ab3f4845
--- /dev/null
+++ b/yardstick/vTC/apexlake/setup.py
@@ -0,0 +1,38 @@
+"""
+Experimental Framework
+"""
+
+from distutils.core import setup
+
+
+# TODO: Add instruction to compile the test_sniff
+
+setup(name='apexlake',
+ version='1.0',
+ description='Framework to automatically run experiments/benchmarks '
+ 'with VMs within OpenStack environments',
+ author='Intel Research and Development Ireland Ltd',
+ author_email='vincenzo.m.riccobene@intel.com',
+ license='Apache 2.0',
+ url='www.intel.com',
+ packages=['experimental_framework',
+ 'experimental_framework.benchmarks',
+ 'experimental_framework.packet_generators',
+ 'experimental_framework.libraries',
+ 'experimental_framework.constants'],
+ include_package_data=True,
+ package_data={
+ 'experimental_framework': [
+ 'packet_generators/dpdk_pktgen/*.lua',
+ 'packet_generators/pcap_files/*.pcap',
+ 'packet_generators/pcap_files/*.sh',
+ 'libraries/packet_checker/*'
+ ]
+ },
+ data_files=[
+ ('/tmp/apexlake/', ['apexlake.conf']),
+ ('/tmp/apexlake/heat_templates/',
+ ['heat_templates/vTC.yaml']),
+ ('/tmp/apexlake/heat_templates/',
+ ['heat_templates/stress_workload.yaml'])
+ ])
diff --git a/yardstick/vTC/apexlake/tests/api_test.py b/yardstick/vTC/apexlake/tests/api_test.py
new file mode 100644
index 000000000..4b70b9bd6
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/api_test.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import unittest
+import mock
+import os
+import experimental_framework.common as common
+from experimental_framework.api import FrameworkApi
+from experimental_framework.benchmarking_unit import BenchmarkingUnit
+import experimental_framework.benchmarks.\
+ instantiation_validation_benchmark as iv
+
+
+class DummyBenchmarkingUnit(BenchmarkingUnit):
+
+ def __init__(self):
+ BenchmarkingUnit.__init__(self)
+
+ @staticmethod
+ def get_available_test_cases():
+ return ['BenchA', 'BenchB']
+
+ @staticmethod
+ def get_required_benchmarks(required_benchmarks):
+ common.BASE_DIR = "base_dir/"
+ return [iv.InstantiationValidationBenchmark('benchmark', dict())]
+
+
+class DummyBenchmarkingUnit2(BenchmarkingUnit):
+
+ counter_init = 0
+ counter_finalize = 0
+ counter_run = 0
+
+ def __init__(self, base_heat_template, credentials,
+ heat_template_parameters, iterations, test_cases):
+ DummyBenchmarkingUnit.counter_init = 0
+ DummyBenchmarkingUnit.counter_finalize = 0
+ DummyBenchmarkingUnit.counter_run = 0
+
+ def initialize(self):
+ DummyBenchmarkingUnit2.counter_init += 1
+
+ def run_benchmarks(self):
+ DummyBenchmarkingUnit2.counter_run += 1
+
+ def finalize(self):
+ DummyBenchmarkingUnit2.counter_finalize += 1
+
+
+class TestGeneratesTemplate(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('experimental_framework.common.init')
+ def test_init_for_success(self, mock_init):
+ FrameworkApi.init()
+ mock_init.assert_called_once_with(api=True)
+
+ # @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+ # 'get_available_test_cases',
+ # side_effect=DummyBenchmarkingUnit.get_available_test_cases)
+ # def test_get_available_test_cases_for_success(self, mock_bench):
+ # expected = ['BenchA', 'BenchB']
+ # output = FrameworkApi.get_available_test_cases()
+ # self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+ 'get_required_benchmarks',
+ side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
+ def test_get_test_case_features_for_success(self, mock_get_req_bench):
+ expected = dict()
+ expected['description'] = 'Instantiation Validation Benchmark'
+ expected['parameters'] = [
+ iv.THROUGHPUT,
+ iv.VLAN_SENDER,
+ iv.VLAN_RECEIVER]
+ expected['allowed_values'] = dict()
+ expected['allowed_values'][iv.THROUGHPUT] = \
+ map(str, range(0, 100))
+ expected['allowed_values'][iv.VLAN_SENDER] = \
+ map(str, range(-1, 4096))
+ expected['allowed_values'][iv.VLAN_RECEIVER] = \
+ map(str, range(-1, 4096))
+ expected['default_values'] = dict()
+ expected['default_values'][iv.THROUGHPUT] = '1'
+ expected['default_values'][iv.VLAN_SENDER] = '-1'
+ expected['default_values'][iv.VLAN_RECEIVER] = '-1'
+
+ test_case = 'instantiation_validation_benchmark.' \
+ 'InstantiationValidationBenchmark'
+ output = FrameworkApi.get_test_case_features(test_case)
+ self.assertEqual(expected, output)
+
+ def test__get_test_case_features__for_failure(self):
+ self.assertRaises(
+ ValueError, FrameworkApi.get_test_case_features, 111)
+
+ @mock.patch('experimental_framework.common.init')
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.common.get_credentials')
+ @mock.patch('experimental_framework.heat_template_generation.'
+ 'generates_templates')
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit',
+ side_effect=DummyBenchmarkingUnit2)
+ def test_execute_framework_for_success(self, mock_b_unit, mock_heat,
+ mock_credentials, mock_log,
+ mock_common_init):
+ common.TEMPLATE_DIR = "{}/{}/".format(
+ os.getcwd(), 'tests/data/generated_templates'
+ )
+
+ test_cases = dict()
+ iterations = 1
+ heat_template = 'VTC_base_single_vm_wait.tmp'
+ heat_template_parameters = dict()
+ deployment_configuration = ''
+ openstack_credentials = dict()
+ openstack_credentials['ip_controller'] = ''
+ openstack_credentials['heat_url'] = ''
+ openstack_credentials['user'] = ''
+ openstack_credentials['password'] = ''
+ openstack_credentials['auth_uri'] = ''
+ openstack_credentials['project'] = ''
+ FrameworkApi.execute_framework(
+ test_cases, iterations, heat_template,
+ heat_template_parameters, deployment_configuration,
+ openstack_credentials)
diff --git a/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py b/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
new file mode 100644
index 000000000..ccf64066a
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
@@ -0,0 +1,477 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'vmriccox'
+
+
+import unittest
+import mock
+from experimental_framework.benchmarking_unit import BenchmarkingUnit
+# from experimental_framework.data_manager import DataManager
+from experimental_framework.deployment_unit import DeploymentUnit
+import experimental_framework.common as common
+from experimental_framework.benchmarks.rfc2544_throughput_benchmark import \
+ RFC2544ThroughputBenchmark
+
+
+# class DummyDataManager(DataManager):
+#
+# def __init__(self, experiment_directory):
+# self.experiment_directory = experiment_directory
+# self.experiments = dict()
+# self.new_exp_counter = 0
+# self.add_bench_counter = 0
+# self.close_experiment_1_counter = 0
+# self.close_experiment_2_counter = 0
+# self.generate_csv_counter = 0
+#
+# def create_new_experiment(self, experiment_name, get_counter=None):
+# if not get_counter:
+# self.new_exp_counter += 1
+# else:
+# return self.new_exp_counter
+#
+# def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
+# if not get_counter:
+# self.add_bench_counter += 1
+# else:
+# return self.add_bench_counter
+#
+# def close_experiment(self, experiment, get_counter=None):
+# if get_counter:
+# return [self.close_experiment_1_counter,
+# self.close_experiment_2_counter]
+# if experiment == 'VTC_base_single_vm_wait_1':
+# self.close_experiment_1_counter += 1
+# if experiment == 'VTC_base_single_vm_wait_2':
+# self.close_experiment_2_counter += 1
+#
+# def generate_result_csv_file(self, get_counter=None):
+# if get_counter:
+# return self.generate_csv_counter
+# else:
+# self.generate_csv_counter += 1
+#
+# def add_metadata(self, experiment_name, metadata):
+# pass
+#
+# def add_configuration(self, experiment_name, configuration):
+# pass
+#
+# def add_data_points(self, experiment_name, benchmark_name, result):
+# pass
+
+
+class Dummy_2544(RFC2544ThroughputBenchmark):
+
+ def __init__(self, name, params):
+ self.name = name
+ self.init_counter = 0
+ self.finalize_counter = 0
+ self.run_counter = 0
+ self.params = params
+
+ def init(self, get_counter=None):
+ if get_counter:
+ return self.init_counter
+ else:
+ self.init_counter += 1
+
+ def finalize(self, get_counter=None):
+ if get_counter:
+ return self.finalize_counter
+ else:
+ self.finalize_counter += 1
+
+ def run(self, get_counter=None):
+ if get_counter:
+ return self.run_counter
+ else:
+ self.run_counter += 1
+ return {'throughput': 10}
+
+
+class DummyDeploymentUnit(DeploymentUnit):
+
+ def __init__(self, openstack_credentials):
+ pass
+
+ def deploy_heat_template(self, template_file, stack_name, parameters,
+ attempt=0):
+ return False
+
+
+class TestBenchmarkingUnit(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.heat.'
+ 'get_all_heat_templates')
+ def test___init__(self, mock_heat, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/results/'
+ common.TEMPLATE_FILE_EXTENSION = '.ext'
+ common.RESULT_DIR = 'tests/data/results/'
+ heat_template_name = 'name'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = ['bench_1', 'bench_2']
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ self.assertEqual(bu.required_benchmarks, benchmarks)
+ bu.heat_template_parameters = heat_template_parameters
+ # mock_data_manager.assert_called_once_with('tests/data/results/12345')
+ mock_dep_unit.assert_called_once_with(openstack_credentials)
+ mock_heat.assert_called_once_with('tests/data/results/', '.ext')
+
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_initialize_for_success(self, mock_heat, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir,
+ mock_time, mock_rfc2544):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+
+ heat_template_name = 'VTC_base_single_vm_wait_'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ self.assertEqual(bu.required_benchmarks, benchmarks)
+ bu.heat_template_parameters = heat_template_parameters
+ bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+ 'VTC_base_single_vm_wait_2.yaml']
+ bu.initialize()
+ self.assertTrue(len(bu.benchmarks) == 1)
+ self.assertEqual(bu.benchmarks[0].__class__,
+ Dummy_2544)
+ # self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
+ # self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
+
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_finalize_for_success(
+ self, mock_heat, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+
+ heat_template_name = 'VTC_base_single_vm_wait_'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ bu.heat_template_parameters = heat_template_parameters
+ bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+ 'VTC_base_single_vm_wait_2.yaml']
+ bu.finalize()
+ # self.assertEqual(bu.data_manager.close_experiment('', True), [1, 1])
+ # self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
+
+ @mock.patch('experimental_framework.common.push_data_influxdb')
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_run_benchmarks_for_success(self, mock_heat, mock_common_dep_unit,
+ mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time,
+ mock_rfc2544, mock_log, mock_influx):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+ common.INFLUXDB_IP = 'InfluxIP'
+ common.INFLUXDB_PORT = '8086'
+ common.INFLUXDB_DB_NAME = 'test_db'
+
+ heat_template_name = 'VTC_base_single_vm_wait_'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ # bu.data_manager = DummyDataManager('tests/data/results/12345')
+ bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+ 'VTC_base_single_vm_wait_2.yaml']
+ bu.benchmarks = [Dummy_2544('dummy', {'param1': 'val1'})]
+ bu.run_benchmarks()
+ self.assertEqual(bu.benchmarks[0].init(True), 2)
+ self.assertEqual(bu.benchmarks[0].finalize(True), 2)
+ self.assertEqual(bu.benchmarks[0].run(True), 2)
+ # expected_metric = \
+ # 'throughput,vnic_type=direct,ram=1024,benchmark=dummy,' \
+ # 'vcpus=2,experiment_name=VTC_base_single_vm_wait_2,' \
+ # 'param1=val1 value=10 12345000000000'
+ # mock_influx.assert_called_with(expected_metric)
+
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_run_benchmarks_2_for_success(
+ self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
+ mock_log):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+
+ heat_template_name = 'VTC_base_single_vm_wait_'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ # bu.data_manager = DummyDataManager('tests/data/results/12345')
+ bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
+ 'VTC_base_single_vm_wait_2.yaml']
+ bu.benchmarks = [Dummy_2544('dummy', dict())]
+ common.DEPLOYMENT_UNIT = DummyDeploymentUnit(dict())
+ bu.run_benchmarks()
+ self.assertEqual(bu.benchmarks[0].init(True), 2)
+ self.assertEqual(bu.benchmarks[0].finalize(True), 0)
+ self.assertEqual(bu.benchmarks[0].run(True), 0)
+
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_get_benchmark_name_for_success(
+ self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
+ mock_log):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+
+ heat_template_name = 'VTC_base_single_vm_wait_'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit(heat_template_name,
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+
+ expected = 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark_0'
+ output = bu.get_benchmark_name(
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark')
+ self.assertEqual(expected, output)
+
+ expected = 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark_1'
+ output = bu.get_benchmark_name(
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark')
+ self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.benchmarks.'
+ 'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
+ @mock.patch('time.time')
+ @mock.patch('experimental_framework.common.get_template_dir')
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
+ @mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
+ @mock.patch('experimental_framework.benchmarking_unit.'
+ 'heat.get_all_heat_templates')
+ def test_get_required_benchmarks_for_success(
+ self, mock_heat, mock_common_dep_unit, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
+ mock_log):
+ mock_heat.return_value = list()
+ mock_time.return_value = '12345'
+ mock_temp_dir.return_value = 'tests/data/test_templates/'
+ common.TEMPLATE_FILE_EXTENSION = '.yaml'
+ common.RESULT_DIR = 'tests/data/results/'
+ openstack_credentials = {
+ 'name': 'aaa',
+ 'surname': 'bbb'
+ }
+ heat_template_parameters = {
+ 'param_1': 'name_1',
+ 'param_2': 'name_2'
+ }
+ iterations = 1
+ benchmarks = [
+ {
+ 'name':
+ 'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark',
+ 'params': dict()
+ }
+ ]
+ bu = BenchmarkingUnit('',
+ openstack_credentials,
+ heat_template_parameters,
+ iterations,
+ benchmarks)
+ req_benchs = \
+ ['rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark']
+ output = bu.get_required_benchmarks(req_benchs)
+ self.assertEqual(len(req_benchs), 1)
+ self.assertEqual(output[0].__class__, Dummy_2544)
diff --git a/yardstick/vTC/apexlake/tests/common_test.py b/yardstick/vTC/apexlake/tests/common_test.py
new file mode 100644
index 000000000..293754b16
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/common_test.py
@@ -0,0 +1,648 @@
+__author__ = 'vmricco'
+
+import unittest
+import mock
+import os
+import logging
+import ConfigParser
+import experimental_framework.common as common
+import experimental_framework.constants.conf_file_sections as cf
+
+
+def reset_common():
+ common.LOG = None
+ common.CONF_FILE = None
+ common.DEPLOYMENT_UNIT = None
+ common.ITERATIONS = None
+ common.BASE_DIR = None
+ common.RESULT_DIR = None
+ common.TEMPLATE_DIR = None
+ common.TEMPLATE_NAME = None
+ common.TEMPLATE_FILE_EXTENSION = None
+ common.PKTGEN = None
+ common.PKTGEN_DIR = None
+ common.PKTGEN_DPDK_DIRECTORY = None
+ common.PKTGEN_PROGRAM = None
+ common.PKTGEN_COREMASK = None
+ common.PKTGEN_MEMCHANNEL = None
+ common.PKTGEN_BUS_SLOT_NIC_1 = None
+ common.PKTGEN_BUS_SLOT_NIC_2 = None
+ common.INFLUXDB_IP = None
+ common.INFLUXDB_PORT = None
+ common.INFLUXDB_DB_NAME = None
+
+
+class DummyConfigurationFile(common.ConfigurationFile):
+ def __init__(self, sections, conf_file=''):
+ pass
+
+ def get_variable(self, section, variable_name):
+ return 'vTC.yaml'
+
+ def get_variable_list(self, section):
+ return ['template_base_name']
+
+
+class DummyConfigurationFile2(common.ConfigurationFile):
+ def __init__(self, sections):
+ self.pktgen_counter = 0
+
+ def get_variable(self, section, variable_name):
+ if variable_name == cf.CFSG_TEMPLATE_NAME:
+ return 'vTC.yaml'
+ if variable_name == cf.CFSG_ITERATIONS:
+ return '2'
+ if variable_name == cf.CFSG_DEBUG:
+ return 'True'
+ if variable_name == cf.CFSP_PACKET_GENERATOR:
+ if self.pktgen_counter == 1:
+ return 'non_supported'
+ self.pktgen_counter += 1
+ return 'dpdk_pktgen'
+ if variable_name == cf.CFSP_DPDK_PKTGEN_DIRECTORY:
+ return os.getcwd()
+ if variable_name == cf.CFSP_DPDK_PROGRAM_NAME:
+ return 'program'
+ if variable_name == cf.CFSP_DPDK_COREMASK:
+ return 'coremask'
+ if variable_name == cf.CFSP_DPDK_MEMORY_CHANNEL:
+ return 'memchannel'
+ if variable_name == cf.CFSP_DPDK_BUS_SLOT_NIC_1:
+ return 'bus_slot_nic_1'
+ if variable_name == cf.CFSP_DPDK_BUS_SLOT_NIC_2:
+ return 'bus_slot_nic_2'
+ if variable_name == cf.CFSP_DPDK_DPDK_DIRECTORY:
+ return os.getcwd()
+
+ def get_variable_list(self, section):
+ if section == cf.CFS_PKTGEN:
+ return [
+ cf.CFSP_DPDK_NAME_IF_2,
+ cf.CFSP_DPDK_NAME_IF_1,
+ cf.CFSP_DPDK_BUS_SLOT_NIC_1,
+ cf.CFSP_DPDK_BUS_SLOT_NIC_2,
+ cf.CFSP_DPDK_COREMASK,
+ cf.CFSP_DPDK_DPDK_DIRECTORY,
+ cf.CFSP_DPDK_PKTGEN_DIRECTORY,
+ cf.CFSP_DPDK_MEMORY_CHANNEL,
+ cf.CFSP_DPDK_PROGRAM_NAME,
+ cf.CFSP_PACKET_GENERATOR
+ ]
+ else:
+ return [
+ 'template_base_name',
+ 'iterations',
+ cf.CFSG_DEBUG
+ ]
+
+
+class TestCommonInit(unittest.TestCase):
+
+ def setUp(self):
+ common.CONF_FILE = DummyConfigurationFile('')
+ self.dir = '{}/{}'.format(os.getcwd(),
+ 'experimental_framework/')
+
+ def tearDown(self):
+ reset_common()
+ # common.CONF_FILE = None
+
+ @mock.patch('os.getcwd')
+ @mock.patch('experimental_framework.common.init_conf_file')
+ @mock.patch('experimental_framework.common.init_general_vars')
+ @mock.patch('experimental_framework.common.init_log')
+ @mock.patch('experimental_framework.common.init_pktgen')
+ @mock.patch('experimental_framework.common.CONF_FILE')
+ def test_init_for_success(self, mock_conf_file, init_pkgen, init_log,
+ init_general_vars, init_conf_file, mock_getcwd):
+ mock_getcwd.return_value = self.dir
+ common.init(True)
+ init_pkgen.assert_called_once()
+ init_conf_file.assert_called_once()
+ init_general_vars.assert_called_once()
+ init_log.assert_called_once()
+ expected = self.dir.split('experimental_framework/')[0]
+ self.assertEqual(common.BASE_DIR, expected)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('os.makedirs')
+ @mock.patch('experimental_framework.common.LOG')
+ def test_init_general_vars_for_success(self, mock_log, mock_makedirs,
+ mock_path_exists):
+ common.BASE_DIR = "{}/".format(os.getcwd())
+ mock_path_exists.return_value = False
+ common.init_general_vars()
+ self.assertEqual(common.TEMPLATE_FILE_EXTENSION, '.yaml')
+ self.assertEqual(common.TEMPLATE_DIR, '/tmp/apexlake/heat_templates/')
+ self.assertEqual(common.TEMPLATE_NAME, 'vTC.yaml')
+ self.assertEqual(common.RESULT_DIR, '/tmp/apexlake/results/')
+ self.assertEqual(common.ITERATIONS, 1)
+ # mock_makedirs.assert_called_once_with('/tmp/apexlake/heat_templates/')
+
+
+class TestCommonInit2(unittest.TestCase):
+
+ def setUp(self):
+ common.CONF_FILE = DummyConfigurationFile2('')
+ self.dir = '{}/{}'.format(os.getcwd(), 'experimental_framework/')
+
+ def tearDown(self):
+ reset_common()
+ common.CONF_FILE = None
+
+ @mock.patch('experimental_framework.common.LOG')
+ def test_init_general_vars_2_for_success(self, mock_log):
+ common.BASE_DIR = "{}/".format(os.getcwd())
+ common.init_general_vars()
+ self.assertEqual(common.TEMPLATE_FILE_EXTENSION, '.yaml')
+ self.assertEqual(common.TEMPLATE_DIR, '/tmp/apexlake/heat_templates/')
+ self.assertEqual(common.TEMPLATE_NAME, 'vTC.yaml')
+ self.assertEqual(common.RESULT_DIR, '/tmp/apexlake/results/')
+ self.assertEqual(common.ITERATIONS, 2)
+
+ def test_init_log_2_for_success(self):
+ common.init_log()
+ self.assertIsInstance(common.LOG, logging.RootLogger)
+
+ def test_init_pktgen_for_success(self):
+ common.init_pktgen()
+ self.assertEqual(common.PKTGEN, 'dpdk_pktgen')
+ directory = self.dir.split('experimental_framework/')[0]
+ self.assertEqual(common.PKTGEN_DIR, directory)
+ self.assertEqual(common.PKTGEN_PROGRAM, 'program')
+ self.assertEqual(common.PKTGEN_COREMASK, 'coremask')
+ self.assertEqual(common.PKTGEN_MEMCHANNEL, 'memchannel')
+ self.assertEqual(common.PKTGEN_BUS_SLOT_NIC_1, 'bus_slot_nic_1')
+ self.assertEqual(common.PKTGEN_BUS_SLOT_NIC_2, 'bus_slot_nic_2')
+ expected_dir = "{}/".format(os.getcwd())
+ self.assertEqual(common.PKTGEN_DPDK_DIRECTORY, expected_dir)
+
+ def test_init_pktgen_for_failure(self):
+ common.CONF_FILE.get_variable('', cf.CFSP_PACKET_GENERATOR)
+ self.assertRaises(ValueError, common.init_pktgen)
+
+
+class TestConfFileInitialization(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ reset_common()
+
+ @mock.patch('experimental_framework.common.ConfigurationFile',
+ side_effect=DummyConfigurationFile)
+ def test_init_conf_file_for_success(self, conf_file):
+ common.CONF_FILE = None
+ common.init_conf_file(False)
+ self.assertIsInstance(common.CONF_FILE,
+ DummyConfigurationFile)
+
+ common.CONF_FILE = None
+ common.init_conf_file(True)
+ self.assertIsInstance(common.CONF_FILE,
+ DummyConfigurationFile)
+
+ @mock.patch('experimental_framework.common.CONF_FILE')
+ def test_init_log_for_success(self, mock_conf_file):
+ mock_conf_file.get_variable_list.return_value = 'value'
+ common.init_log()
+ self.assertIsInstance(common.LOG, logging.RootLogger)
+
+ @mock.patch('experimental_framework.common.CONF_FILE')
+ def test_init_influxdb_for_success(self, mock_conf_file):
+ mock_conf_file.get_variable.return_value = 'value'
+ common.init_influxdb()
+ self.assertEqual(common.INFLUXDB_IP, 'value')
+ self.assertEqual(common.INFLUXDB_PORT, 'value')
+ self.assertEqual(common.INFLUXDB_DB_NAME, 'value')
+
+
+class DummyConfigurationFile3(common.ConfigurationFile):
+ counter = 0
+
+ def __init__(self, sections, config_file='conf.cfg'):
+ common.ConfigurationFile.__init__(self, sections, config_file)
+
+ @staticmethod
+ def _config_section_map(section, config_file, get_counter=None):
+ if get_counter:
+ return DummyConfigurationFile3.counter
+ else:
+ DummyConfigurationFile3.counter += 1
+ return dict()
+
+
+class TestConfigFileClass(unittest.TestCase):
+
+ def setUp(self):
+ self.sections = [
+ 'General',
+ 'OpenStack',
+ 'Experiment-VNF',
+ 'PacketGen',
+ 'Deployment-parameters',
+ 'Testcase-parameters'
+ ]
+ c_file = './tests/data/common/conf.cfg'
+ common.BASE_DIR = os.getcwd()
+ self.conf_file = common.ConfigurationFile(self.sections, c_file)
+
+ def tearDown(self):
+ reset_common()
+ common.BASE_DIR = None
+
+ @mock.patch('experimental_framework.common.ConfigurationFile.'
+ '_config_section_map',
+ side_effect=DummyConfigurationFile3._config_section_map)
+ def test___init___for_success(self, mock_conf_map):
+ sections = ['General', 'OpenStack', 'Experiment-VNF', 'PacketGen',
+ 'Deployment-parameters', 'Testcase-parameters']
+ c = DummyConfigurationFile3(
+ sections, config_file='./tests/data/common/conf.cfg')
+ self.assertEqual(
+ DummyConfigurationFile3._config_section_map('', '', True),
+ 6)
+ for section in sections:
+ self.assertEqual(getattr(c, section), dict())
+
+ def test__config_section_map_for_success(self):
+ general_section = 'General'
+ # openstack_section = 'OpenStack'
+ config_file = 'tests/data/common/conf.cfg'
+ config = ConfigParser.ConfigParser()
+ config.read(config_file)
+
+ expected = {
+ 'benchmarks': 'b_marks',
+ 'iterations': '1',
+ 'template_base_name': 't_name'
+ }
+ output = common.\
+ ConfigurationFile._config_section_map(general_section, config)
+ self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.common.'
+ 'ConfigurationFile.get_variable_list')
+ def test_get_variable_for_success(self, mock_get_var_list):
+ section = self.sections[0]
+ variable_name = 'template_base_name'
+ expected = 't_name'
+ mock_get_var_list.return_value = [variable_name]
+ output = self.conf_file.get_variable(section, variable_name)
+ self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.common.'
+ 'ConfigurationFile.get_variable_list')
+ def test_get_variable_for_failure(self, mock_get_var_list):
+ section = self.sections[0]
+ variable_name = 'something_else'
+ self.assertRaises(
+ ValueError,
+ self.conf_file.get_variable,
+ section, variable_name
+ )
+
+ def test_get_variable_list_for_success(self):
+ section = self.sections[0]
+ expected = {
+ 'benchmarks': 'b_marks',
+ 'iterations': '1',
+ 'template_base_name': 't_name'
+ }
+ output = self.conf_file.get_variable_list(section)
+ self.assertEqual(expected, output)
+
+ def test_get_variable_list_for_failure(self):
+ section = 'something_else'
+ self.assertRaises(
+ ValueError,
+ self.conf_file.get_variable_list,
+ section)
+
+
+class DummyConfigurationFile4(common.ConfigurationFile):
+
+ def get_variable(self, section, variable_name):
+ if variable_name == 'vnic2_type':
+ return '"value"'
+ elif variable_name == cf.CFSG_BENCHMARKS:
+ return "BenchmarkClass1, BenchmarkClass2"
+ return '@string "value"'
+
+ # def get_variable_list(self, section):
+ # return list()
+
+
+class TestCommonMethods(unittest.TestCase):
+
+ def setUp(self):
+ self.sections = [
+ 'General',
+ 'OpenStack',
+ 'Experiment-VNF',
+ 'PacketGen',
+ 'Deployment-parameters',
+ 'Testcase-parameters'
+ ]
+ config_file = './tests/data/common/conf.cfg'
+ common.BASE_DIR = os.getcwd()
+ common.CONF_FILE = DummyConfigurationFile4(self.sections, config_file)
+
+ def tearDown(self):
+ reset_common()
+ common.CONF_FILE = None
+
+ def test_get_credentials_for_success(self):
+ expected = {
+ 'ip_controller': '@string "value"',
+ 'project': '@string "value"',
+ 'auth_uri': '@string "value"',
+ 'user': '@string "value"',
+ 'heat_url': '@string "value"',
+ 'password': '@string "value"'
+ }
+ output = common.get_credentials()
+ self.assertEqual(expected, output)
+
+ def test_get_heat_template_params_for_success(self):
+ expected = {
+ 'param_1': '@string "value"',
+ 'param_2': '@string "value"',
+ 'param_3': '@string "value"',
+ 'param_4': '@string "value"'
+ }
+ output = common.get_heat_template_params()
+ self.assertEqual(expected, output)
+
+ def test_get_testcase_params_for_success(self):
+ expected = {'test_case_param': '@string "value"'}
+ output = common.get_testcase_params()
+ self.assertEqual(expected, output)
+
+ def test_get_file_first_line_for_success(self):
+ file = 'tests/data/common/conf.cfg'
+ expected = '[General]\n'
+ output = common.get_file_first_line(file)
+ self.assertEqual(expected, output)
+
+ def test_replace_in_file_for_success(self):
+ filename = 'tests/data/common/file_replacement.txt'
+ text_to_search = 'replacement of'
+ text_to_replace = '***'
+ common.replace_in_file(filename, text_to_search, text_to_replace)
+ after = open(filename, 'r').readline()
+ self.assertEqual(after, 'Test for the *** strings into a file\n')
+ text_to_search = '***'
+ text_to_replace = 'replacement of'
+ common.replace_in_file(filename, text_to_search, text_to_replace)
+
+ @mock.patch('os.system')
+ @mock.patch('experimental_framework.common.LOG')
+ def test_run_command_for_success(self, mock_log, mock_os_system):
+ command = 'command to be run'
+ common.run_command(command)
+ mock_os_system.assert_called_once_with(command)
+
+ @mock.patch('experimental_framework.common.run_command')
+ def test_push_data_influxdb_for_success(self, mock_run_cmd):
+ data = 'string that describes the data'
+ expected = "curl -i -XPOST 'http://None:None/write?db=None' " \
+ "--data-binary string that describes the data"
+ common.push_data_influxdb(data)
+ mock_run_cmd.assert_called_once_with(expected)
+
+ def test_get_base_dir_for_success(self):
+ base_dir = common.BASE_DIR
+ common.BASE_DIR = 'base_dir'
+ expected = 'base_dir'
+ output = common.get_base_dir()
+ self.assertEqual(expected, output)
+ common.BASE_DIR = base_dir
+
+ def test_get_template_dir_for_success(self):
+ template_dir = common.TEMPLATE_DIR
+ common.TEMPLATE_DIR = 'base_dir'
+ expected = 'base_dir'
+ output = common.get_template_dir()
+ self.assertEqual(expected, output)
+ common.TEMPLATE_DIR = template_dir
+
+ def test_get_dpdk_pktgen_vars_test(self):
+ # Test 1
+ common.PKTGEN = 'dpdk_pktgen'
+ common.PKTGEN_DIR = 'var'
+ common.PKTGEN_PROGRAM = 'var'
+ common.PKTGEN_COREMASK = 'var'
+ common.PKTGEN_MEMCHANNEL = 'var'
+ common.PKTGEN_BUS_SLOT_NIC_1 = 'var'
+ common.PKTGEN_BUS_SLOT_NIC_2 = 'var'
+ common.PKTGEN_NAME_NIC_1 = 'var'
+ common.PKTGEN_NAME_NIC_2 = 'var'
+ common.PKTGEN_DPDK_DIRECTORY = 'var'
+ expected = {
+ 'bus_slot_nic_1': 'var',
+ 'bus_slot_nic_2': 'var',
+ 'name_if_1': 'var',
+ 'name_if_2': 'var',
+ 'coremask': 'var',
+ 'dpdk_directory': 'var',
+ 'memory_channels': 'var',
+ 'pktgen_directory': 'var',
+ 'program_name': 'var'
+ }
+ output = common.get_dpdk_pktgen_vars()
+ self.assertEqual(expected, output)
+
+ # Test 2
+ common.PKTGEN = 'something_else'
+ common.PKTGEN_DIR = 'var'
+ common.PKTGEN_PROGRAM = 'var'
+ common.PKTGEN_COREMASK = 'var'
+ common.PKTGEN_MEMCHANNEL = 'var'
+ common.PKTGEN_BUS_SLOT_NIC_1 = 'var'
+ common.PKTGEN_BUS_SLOT_NIC_2 = 'var'
+ common.PKTGEN_DPDK_DIRECTORY = 'var'
+ expected = {}
+ output = common.get_dpdk_pktgen_vars()
+ self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.common.LOG')
+ def test_get_deployment_configuration_variables_for_success(self,
+ mock_log):
+ expected = {
+ 'vcpu': ['value'],
+ 'vnic1_type': ['value'],
+ 'ram': ['value'],
+ 'vnic2_type': ['value']
+ }
+ output = common.get_deployment_configuration_variables_from_conf_file()
+ self.assertEqual(expected, output)
+
+ def test_get_benchmarks_from_conf_file_for_success(self):
+ expected = ['BenchmarkClass1', 'BenchmarkClass2']
+ output = common.get_benchmarks_from_conf_file()
+ self.assertEqual(expected, output)
+
+
+class TestinputValidation(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ reset_common()
+
+ def test_validate_string_for_success(self):
+ output = common.InputValidation.validate_string('string', '')
+ self.assertTrue(output)
+
+ def test_validate_string_for_failure(self):
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_string,
+ list(), ''
+ )
+
+ def test_validate_int_for_success(self):
+ output = common.InputValidation.validate_integer(1111, '')
+ self.assertTrue(output)
+
+ def test_validate_int_for_failure(self):
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_integer,
+ list(), ''
+ )
+
+ def test_validate_dict_for_success(self):
+ output = common.InputValidation.validate_dictionary(dict(), '')
+ self.assertTrue(output)
+
+ def test_validate_dict_for_failure(self):
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_dictionary,
+ list(), ''
+ )
+
+ def test_validate_file_exist_for_success(self):
+ filename = 'tests/data/common/file_replacement.txt'
+ output = common.InputValidation.validate_file_exist(filename, '')
+ self.assertTrue(output)
+
+ def test_validate_file_exist_for_failure(self):
+ filename = 'tests/data/common/file_replacement'
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_file_exist,
+ filename, ''
+ )
+
+ def test_validate_directory_exist_and_format_for_success(self):
+ directory = 'tests/data/common/'
+ output = common.InputValidation.\
+ validate_directory_exist_and_format(directory, '')
+ self.assertTrue(output)
+
+ def test_validate_directory_exist_and_format_for_failure(self):
+ directory = 'tests/data/com/'
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_directory_exist_and_format,
+ directory, ''
+ )
+
+ @mock.patch('experimental_framework.common.CONF_FILE')
+ def test_validate_configuration_file_parameter_for_success(self,
+ mock_conf):
+ mock_conf.get_variable_list.return_value = ['param']
+ section = ''
+ parameter = 'param'
+ message = ''
+ output = common.InputValidation.\
+ validate_configuration_file_parameter(section, parameter, message)
+ self.assertTrue(output)
+
+ @mock.patch('experimental_framework.common.CONF_FILE')
+ def test_validate_configuration_file_parameter_for_failure(
+ self, mock_conf_file):
+ section = ''
+ parameter = 'something_else'
+ message = ''
+ mock_conf_file.get_variable_list.return_value(['parameter'])
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.
+ validate_configuration_file_parameter,
+ section, parameter, message
+ )
+
+ def test_validate_configuration_file_section_for_success(self):
+ section = 'General'
+ message = ''
+ output = common.InputValidation.\
+ validate_configuration_file_section(section, message)
+ self.assertTrue(output)
+
+ def test_validate_configuration_file_section_for_failure(self):
+ section = 'Something-Else'
+ message = ''
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_configuration_file_section,
+ section, message
+ )
+
+ def test_validate_boolean_for_success(self):
+ message = ''
+ boolean = True
+ output = common.InputValidation.validate_boolean(boolean, message)
+ self.assertTrue(output)
+
+ boolean = 'True'
+ output = common.InputValidation.validate_boolean(boolean, message)
+ self.assertTrue(output)
+
+ boolean = 'False'
+ output = common.InputValidation.validate_boolean(boolean, message)
+ self.assertFalse(output)
+
+ def test_validate_boolean_for_failure(self):
+ message = ''
+ boolean = 'string'
+ self.assertRaises(
+ ValueError,
+ common.InputValidation.validate_boolean,
+ boolean, message
+ )
+
+ def test_validate_os_credentials_for_failure(self):
+ # Test 1
+ credentials = list()
+ self.assertRaises(ValueError,
+ common.InputValidation.validate_os_credentials,
+ credentials)
+
+ # Test 2
+ credentials = dict()
+ credentials['ip_controller'] = ''
+ credentials['heat_url'] = ''
+ credentials['user'] = ''
+ credentials['password'] = ''
+ credentials['auth_uri'] = ''
+ # credentials['project'] = ''
+ self.assertRaises(ValueError,
+ common.InputValidation.validate_os_credentials,
+ credentials)
+
+ def test_validate_os_credentials_for_success(self):
+ credentials = dict()
+ credentials['ip_controller'] = ''
+ credentials['heat_url'] = ''
+ credentials['user'] = ''
+ credentials['password'] = ''
+ credentials['auth_uri'] = ''
+ credentials['project'] = ''
+ self.assertTrue(
+ common.InputValidation.validate_os_credentials(credentials))
diff --git a/yardstick/vTC/apexlake/tests/conf_file_sections_test.py b/yardstick/vTC/apexlake/tests/conf_file_sections_test.py
new file mode 100644
index 000000000..d2157fcce
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/conf_file_sections_test.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'vmriccox'
+
+
+import unittest
+from experimental_framework.constants import conf_file_sections as cfs
+
+
+class TestConfFileSection(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def test_get_sections_api_for_success(self):
+ expected = ['PacketGen', 'General', 'InfluxDB']
+ output = cfs.get_sections_api()
+ self.assertEqual(expected, output)
diff --git a/yardstick/vTC/apexlake/tests/data/common/conf.cfg b/yardstick/vTC/apexlake/tests/data/common/conf.cfg
new file mode 100644
index 000000000..9266647e8
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/common/conf.cfg
@@ -0,0 +1,43 @@
+[General]
+template_base_name = t_name
+benchmarks = b_marks
+iterations = 1
+
+[OpenStack]
+ip_controller =
+heat_url =
+user =
+password =
+auth_uri =
+project =
+
+
+[Experiment-VNF]
+VNIC1_TYPE = @string "normal" "direct" @costs '0', '1'
+VNIC2_TYPE = @string "normal", "direct" @costs '0', '1'
+VCPU = @numeric "4"
+RAM = @numeric "4096"
+
+
+[PacketGen]
+packet_generator = dpdk_pktgen
+pktgen_directory = pktgen_dir
+dpdk_directory = dpdk_dir
+program_name = app/app/x86_64-native-linuxapp-gcc/pktgen
+coremask = 1f
+memory_channels = 3
+bus_slot_nic_1 = 01:00.0
+name_if_1 = enp1s0f0
+bus_slot_nic_2 = 01:00.1
+name_if_2 = enp1s0f2
+
+
+[Deployment-parameters]
+param_1 = val_1
+param_2 = val_2
+param_3 = val_3
+param_4 = val_4
+
+
+[Testcase-parameters]
+test_case_param = 1280
diff --git a/yardstick/vTC/apexlake/tests/data/common/file_replacement.txt b/yardstick/vTC/apexlake/tests/data/common/file_replacement.txt
new file mode 100644
index 000000000..8122d9a91
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/common/file_replacement.txt
@@ -0,0 +1 @@
+Test for the replacement of strings into a file
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait.tmp b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait.tmp
new file mode 100644
index 000000000..aa3959fc1
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait.tmp
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: #ram
+ vcpus: #vcpus
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs: \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml
new file mode 100644
index 000000000..5788980b0
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml.json b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml.json
new file mode 100644
index 000000000..3af9a1cc7
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_1.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "normal", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml
new file mode 100644
index 000000000..44a81d081
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml.json b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml.json
new file mode 100644
index 000000000..9f246891d
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/VTC_base_single_vm_wait_2.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "direct", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml
new file mode 100644
index 000000000..5788980b0
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml.json b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml.json
new file mode 100644
index 000000000..3af9a1cc7
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_1.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "normal", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml
new file mode 100644
index 000000000..44a81d081
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml.json b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml.json
new file mode 100644
index 000000000..9f246891d
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/generated_templates/experiment_2.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "direct", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/benchmark_1.csv b/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/benchmark_1.csv
new file mode 100644
index 000000000..f5f2932d0
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/benchmark_1.csv
@@ -0,0 +1,3 @@
+conf_1;conf_3;conf_2;point_4;point_5;point_6;point_1;point_2;point_3
+conf_value_1;conf_value_3;conf_value_2;?;?;?;value_1;value_2;value_3
+conf_value_1;conf_value_3;conf_value_2;value_4;value_5;value_6;?;?;?
diff --git a/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/metadata.json b/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/metadata.json
new file mode 100644
index 000000000..ff4cebf41
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_experiments/experiment_1/metadata.json
@@ -0,0 +1 @@
+{"location": "tests/data/experiments/experiment_1/metadata.json", "item_2": "value_2", "item_3": "value_3", "item_1": "value_1"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/test_experiments/results_benchmark_1.csv b/yardstick/vTC/apexlake/tests/data/test_experiments/results_benchmark_1.csv
new file mode 100644
index 000000000..4662dd2be
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_experiments/results_benchmark_1.csv
@@ -0,0 +1,5 @@
+conf_5;conf_4;conf_6;conf_1;conf_3;conf_2;point_12;point_10;point_11;point_4;point_5;point_6;point_8;point_9;point_7;point_1;point_2;point_3
+?;?;?;conf_value_1;conf_value_3;conf_value_2;?;?;?;?;?;?;?;?;?;value_1;value_2;value_3
+?;?;?;conf_value_1;conf_value_3;conf_value_2;?;?;?;value_4;value_5;value_6;?;?;?;?;?;?
+conf_value_5;conf_value_4;conf_value_6;?;?;?;?;?;?;?;?;?;value_8;value_9;value_7;?;?;?
+conf_value_5;conf_value_4;conf_value_6;?;?;?;value_12;value_10;value_11;?;?;?;?;?;?;?;?;?
diff --git a/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait.tmp b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait.tmp
new file mode 100644
index 000000000..aa3959fc1
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait.tmp
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: #vnic_type
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: #ram
+ vcpus: #vcpus
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs: \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml
new file mode 100644
index 000000000..5788980b0
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml.json b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml.json
new file mode 100644
index 000000000..3af9a1cc7
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_1.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "normal", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml
new file mode 100644
index 000000000..44a81d081
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml
@@ -0,0 +1,199 @@
+heat_template_version: 2014-10-16
+description: HOT template to create a DPI
+
+parameters:
+ default_net:
+ type: string
+ default_subnet:
+ type: string
+ source_net:
+ type: string
+ source_subnet:
+ type: string
+ destination_net:
+ type: string
+ destination_subnet:
+ type: string
+ internal_net:
+ type: string
+ internal_subnet:
+ type: string
+ node:
+ type: string
+ default: compB
+ name:
+ type: string
+ default: vtc
+ ip_family:
+ type: string
+ timeout:
+ type: number
+ description: Timeout for WaitCondition, depends on your image and environment
+ default: 1000
+
+resources:
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ count: 1
+ timeout: {get_param: timeout}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+
+ ### DEFAULT NETWORK FOR MERLIN DATA
+ port_1:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: default_net }
+ binding:vnic_type: normal
+ fixed_ips:
+ - subnet: { get_param: default_subnet }
+
+ ### NETWORK FOR RECEIVING TRAFFIC
+ port_2:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: source_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: source_subnet }
+
+ ### NETWORK FOR SENDING TRAFFIC
+ port_3:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: destination_net }
+ binding:vnic_type: direct
+ fixed_ips:
+ - subnet: { get_param: destination_subnet }
+
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 20
+ ram: 1024
+ vcpus: 2
+ #extra_specs: { node: { get_param: node }, "hw:cpu_policy": "#core_pinning_enabled", "hw:cpu_threads_policy": "#core_pinning_mode", "hw:mem_page_size": "#hugepages" }
+ extra_specs: { node: { get_param: node } }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: vTC
+ key_name: test
+ image: ubuntu1404
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+
+ # Creation of a user
+ NAME=$name
+ echo "Creating custom user..."
+ useradd clouduser -g admin -s /bin/bash -m
+ echo clouduser:secrete | chpasswd
+ echo "Enabling ssh password login..."
+ sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+ sleep 1
+
+ # wake up interfaces
+ ifconfig eth1 up
+ ifconfig eth2 up
+ dhclient eth1
+ dhclient eth2
+
+ sed -i 's/localhost/localhost vtc/g' /etc/hosts
+ ip route del 0/0
+ route add default gw 192.168.200.1
+
+ AA=$(netstat -ie | grep -B1 $IP_FAMILY | awk '{ print $1 }')
+ BB=$(echo $AA | awk '{ print $1 }')
+
+ # Setup Instrumentation Agent
+ rm -rf cimmaron
+ mkdir cimmaron
+ cd cimmaron
+ apt-get install -y zip
+ wget http://10.2.1.65/~iolie/merlin/MerlinAgent-12-06-2015-TNovaVM-001.zip
+ unzip MerlinAgent-12-06-2015-TNovaVM-001.zip
+ ./updateConfiguration.py ./instrumentation.cfg tags source=tnova_vm
+ ./updateConfiguration.py ./instrumentation.cfg tags role="$NAME"
+ nohup ./Agent.py ./instrumentation.cfg >log.out 2>&1 &
+ cd ..
+
+ # Setup for PF_RING and bridge between interfaces
+ apt-get update
+ apt-get install -y git build-essential gcc libnuma-dev flex byacc libjson0-dev dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+ # Setup multicast
+ echo smcroute -d mgroup from $BB group 224.192.16.1 > /etc/smcroute.conf
+ cd /home/clouduser/
+ git clone https://github.com/troglobit/smcroute.git
+ cd smcroute
+ sed -i 's/aclocal-1.11/aclocal/g' ./autogen.sh
+ sed -i 's/automake-1.11/automake/g' ./autogen.sh
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ touch multicast.sh
+ echo "#!/bin/bash" > multicast.sh
+ echo "while [ true ]" >> multicast.sh
+ echo "do" >> multicast.sh
+ echo " smcroute -k" >> multicast.sh
+ echo " smcroute -d" >> multicast.sh
+ echo " sleep 50" >> multicast.sh
+ echo "done" >> multicast.sh
+ chmod +x multicast.sh
+ ./multicast.sh &
+
+ # Setup for PF_RING and bridge between interfaces
+ # Akis Repository
+ #git clone https://akiskourtis:ptindpi@bitbucket.org/akiskourtis/vtc.git
+ #cd vtc
+ #git checkout stable
+
+ # Intel Repository
+ git clone http://vincenzox.m.riccobene%40intel.com:vincenzo@134.191.243.6:8081/t-nova/vtc_master.git
+ cd vtc_master
+
+ cd nDPI
+ NDPI_DIR=$(pwd)
+ echo $NDPI_DIR
+ NDPI_INCLUDE=$(pwd)/src/include
+ echo $NDPI_INCLUDE
+ ./autogen.sh
+ ./configure
+ make
+ make install
+ cd ..
+ cd PF_RING
+ make
+ cd userland/examples/
+ sed -i 's/EXTRA_LIBS =/EXTRA_LIBS = '"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c/g' ./Makefile
+ sed -i 's/ -Ithird-party/ -Ithird-party -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'/g' ./Makefile
+ make
+ cd ../..
+ cd ..
+ cd ..
+ #insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ #./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ insmod ./vtc_master/PF_RING/kernel/pf_ring.ko min_num_slots=8192 enable_debug=1 quick_mode=1 enable_tx_capture=0
+ ./vtc_master/PF_RING/userland/examples/pfbridge -a eth1 -b eth2 &
+ wc_notify --data-binary '{"status": "SUCCESS"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+ $name: { get_param: name }
+ $IP_FAMILY: { get_param: ip_family }
+
+ flavor: { get_resource: flavor }
+ networks:
+ - port: { get_resource: port_1 }
+ - port: { get_resource: port_2 }
+ - port: { get_resource: port_3 }
+outputs:
diff --git a/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml.json b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml.json
new file mode 100644
index 000000000..9f246891d
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/data/test_templates/VTC_base_single_vm_wait_2.yaml.json
@@ -0,0 +1 @@
+{"vnic_type": "direct", "ram": "1024", "vcpus": "2"} \ No newline at end of file
diff --git a/yardstick/vTC/apexlake/tests/deployment_unit_test.py b/yardstick/vTC/apexlake/tests/deployment_unit_test.py
new file mode 100644
index 000000000..46b32837b
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/deployment_unit_test.py
@@ -0,0 +1,273 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'vmriccox'
+
+
+import unittest
+import mock
+import experimental_framework.deployment_unit as mut
+
+
+class DummyHeatManager:
+
+ def __init__(self, param):
+ self.counts = 0
+ pass
+
+ def validate_heat_template(self, template_file):
+ return True
+
+ def check_stack_status(self, stack_name):
+ # return 'CREATE_COMPLETE'
+ self.counts += 1
+ if self.counts >= 3:
+ return 'CREATE_COMPLETE'
+ else:
+ return 'CREATE_IN_PROGRESS'
+
+ def delete_stack(self, stack_name):
+ pass
+
+
+class DummyHeatManagerFailed(DummyHeatManager):
+
+ def check_stack_status(self, stack_name):
+ return 'CREATE_FAILED'
+
+ def create_stack(self, template_file, stack_name, parameters):
+ pass
+
+
+class DummyHeatManagerComplete(DummyHeatManager):
+
+ def check_stack_status(self, stack_name):
+ return 'CREATE_COMPLETE'
+
+ def create_stack(self, template_file, stack_name, parameters):
+ raise Exception()
+
+
+class DummyHeatManagerFailedException(DummyHeatManagerFailed):
+
+ def create_stack(self, template_file, stack_name, parameters):
+ raise Exception
+
+ def check_stack_status(self, stack_name):
+ return ''
+
+
+class DummyHeatManagerDestroy:
+
+ def __init__(self, credentials):
+ self.delete_stack_counter = 0
+ self.check_stack_status_counter = 0
+
+ def check_stack_status(self, stack_name):
+ if self.check_stack_status_counter < 2:
+ self.check_stack_status_counter += 1
+ return 'DELETE_IN_PROGRESS'
+ else:
+ return 'DELETE_COMPLETE'
+
+ def create_stack(self, template_file, stack_name, parameters):
+ pass
+
+ def delete_stack(self, stack_name=None):
+ if stack_name == 'stack':
+ self.delete_stack_counter += 1
+ else:
+ return self.delete_stack_counter
+
+ def is_stack_deployed(self, stack_name):
+ return True
+
+
+class DummyHeatManagerDestroyException(DummyHeatManagerDestroy):
+
+ def delete_stack(self, stack_name=None):
+ raise Exception
+
+
+class DummyHeatManagerReiteration:
+
+ def __init__(self, param):
+ self.counts = 0
+
+ def validate_heat_template(self, template_file):
+ return True
+
+ def check_stack_status(self, stack_name):
+ return 'CREATE_FAILED'
+
+ def delete_stack(self, stack_name):
+ pass
+
+ def create_stack(self, template_file=None, stack_name=None,
+ parameters=None):
+ if template_file == 'template_reiteration' and \
+ stack_name == 'stack_reiteration' and \
+ parameters == 'parameters_reiteration':
+ self.counts += 1
+
+
+class DummyDeploymentUnit(mut.DeploymentUnit):
+
+ def destroy_heat_template(self, stack_name):
+ raise Exception
+
+
+class TestDeploymentUnit(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManager)
+ def test_constructor_for_sanity(self, mock_heat_manager):
+ du = mut.DeploymentUnit(dict())
+ self.assertTrue(isinstance(du.heat_manager, DummyHeatManager))
+ mock_heat_manager.assert_called_once_with(dict())
+ self.assertEqual(du.deployed_stacks, list())
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManager)
+ @mock.patch('os.path.isfile')
+ def test_deploy_heat_template_for_failure(self, mock_os_is_file,
+ mock_heat_manager):
+ mock_os_is_file.return_value = False
+ du = mut.DeploymentUnit(dict())
+ template_file = ''
+ stack_name = ''
+ parameters = ''
+ self.assertRaises(ValueError, du.deploy_heat_template, template_file,
+ stack_name, parameters, 0)
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManager)
+ @mock.patch('os.path.isfile')
+ def test_deploy_heat_template_for_success(self, mock_os_is_file,
+ mock_heat_manager):
+ mock_os_is_file.return_value = True
+ du = mut.DeploymentUnit(dict())
+ template_file = ''
+ stack_name = ''
+ parameters = ''
+ output = du.deploy_heat_template(template_file, stack_name,
+ parameters, 0)
+ self.assertEqual(output, True)
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerComplete)
+ @mock.patch('os.path.isfile')
+ def test_deploy_heat_template_2_for_success(self, mock_os_is_file,
+ mock_heat_manager):
+ mock_os_is_file.return_value = True
+ du = mut.DeploymentUnit(dict())
+ template_file = ''
+ stack_name = ''
+ parameters = ''
+ output = du.deploy_heat_template(template_file, stack_name,
+ parameters, 0)
+ self.assertEqual(output, True)
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerComplete)
+ @mock.patch('os.path.isfile')
+ @mock.patch('experimental_framework.deployment_unit.DeploymentUnit',
+ side_effect=DummyDeploymentUnit)
+ def test_deploy_heat_template_3_for_success(self, mock_dep_unit,
+ mock_os_is_file,
+ mock_heat_manager):
+ mock_os_is_file.return_value = True
+ du = mut.DeploymentUnit(dict())
+ template_file = ''
+ stack_name = ''
+ parameters = ''
+ output = du.deploy_heat_template(template_file, stack_name,
+ parameters, 0)
+ self.assertEqual(output, True)
+
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerFailed)
+ @mock.patch('os.path.isfile')
+ def test_deploy_heat_template_for_success_2(self, mock_os_is_file,
+ mock_heat_manager, mock_log):
+ mock_os_is_file.return_value = True
+ du = DummyDeploymentUnit(dict())
+ template_file = ''
+ stack_name = ''
+ parameters = ''
+ output = du.deploy_heat_template(template_file, stack_name,
+ parameters, 0)
+ self.assertEqual(output, False)
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerDestroy)
+ @mock.patch('experimental_framework.common.LOG')
+ def test_destroy_heat_template_for_success(self, mock_log,
+ mock_heat_manager):
+ openstack_credentials = dict()
+ du = mut.DeploymentUnit(openstack_credentials)
+ du.deployed_stacks = ['stack']
+ stack_name = 'stack'
+ self.assertTrue(du.destroy_heat_template(stack_name))
+ self.assertEqual(du.heat_manager.delete_stack(None), 1)
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerDestroyException)
+ @mock.patch('experimental_framework.common.LOG')
+ def test_destroy_heat_template_for_success_2(self, mock_log,
+ mock_heat_manager):
+ openstack_credentials = dict()
+ du = mut.DeploymentUnit(openstack_credentials)
+ du.deployed_stacks = ['stack']
+ stack_name = 'stack'
+ self.assertFalse(du.destroy_heat_template(stack_name))
+
+ def test_destroy_all_deployed_stacks_for_success(self):
+ du = DeploymentUnitDestroy()
+ du.destroy_all_deployed_stacks()
+ self.assertTrue(du.destroy_heat_template())
+
+ @mock.patch('experimental_framework.heat_manager.HeatManager',
+ side_effect=DummyHeatManagerReiteration)
+ @mock.patch('os.path.isfile')
+ def test_deploy_heat_template_for_success_3(self, mock_os_is_file,
+ mock_heat_manager):
+ mock_os_is_file.return_value = True
+ du = mut.DeploymentUnit(dict())
+ template = 'template_reiteration'
+ stack = 'stack_reiteration'
+ parameters = 'parameters_reiteration'
+ output = du.deploy_heat_template(template, stack, parameters, 0)
+ self.assertFalse(output)
+ self.assertEqual(du.heat_manager.counts, 4)
+
+
+class DeploymentUnitDestroy(mut.DeploymentUnit):
+
+ def __init__(self):
+ self.deployed_stacks = ['stack']
+ self.heat_manager = DummyHeatManagerDestroy(dict())
+ self.destroy_all_deployed_stacks_called_correctly = False
+
+ def destroy_heat_template(self, template_name=None):
+ if template_name == 'stack':
+ self.destroy_all_deployed_stacks_called_correctly = True
+ return self.destroy_all_deployed_stacks_called_correctly
diff --git a/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py b/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
index ad1cdcd2b..29b3e369f 100644
--- a/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
+++ b/yardstick/vTC/apexlake/tests/dpdk_packet_generator_test.py
@@ -702,4 +702,4 @@ class TestDpdkPacketGenOthers(unittest.TestCase):
def test__change_vlan_for_success(self, mock_run_command, mock_log):
mut.DpdkPacketGenerator._change_vlan('/directory/', 'pcap_file', '10')
expected_param = '/directory/vlan_tag.sh /directory/pcap_file 10'
- mock_run_command.assert_called_once_with(expected_param)
+ mock_run_command.assert_called_with(expected_param)
diff --git a/yardstick/vTC/apexlake/tests/generates_template_test.py b/yardstick/vTC/apexlake/tests/generates_template_test.py
index 85435db6a..f4525a23d 100644
--- a/yardstick/vTC/apexlake/tests/generates_template_test.py
+++ b/yardstick/vTC/apexlake/tests/generates_template_test.py
@@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+__author__ = 'gpetralx'
+
+
import unittest
import experimental_framework.heat_template_generation as heat_gen
import mock
@@ -19,6 +22,29 @@ import os
import experimental_framework.common as common
+def reset_common():
+ common.LOG = None
+ common.CONF_FILE = None
+ common.DEPLOYMENT_UNIT = None
+ common.ITERATIONS = None
+ common.BASE_DIR = None
+ common.RESULT_DIR = None
+ common.TEMPLATE_DIR = None
+ common.TEMPLATE_NAME = None
+ common.TEMPLATE_FILE_EXTENSION = None
+ common.PKTGEN = None
+ common.PKTGEN_DIR = None
+ common.PKTGEN_DPDK_DIRECTORY = None
+ common.PKTGEN_PROGRAM = None
+ common.PKTGEN_COREMASK = None
+ common.PKTGEN_MEMCHANNEL = None
+ common.PKTGEN_BUS_SLOT_NIC_1 = None
+ common.PKTGEN_BUS_SLOT_NIC_2 = None
+ common.INFLUXDB_IP = None
+ common.INFLUXDB_PORT = None
+ common.INFLUXDB_DB_NAME = None
+
+
class TestGeneratesTemplate(unittest.TestCase):
def setUp(self):
self.deployment_configuration = {
@@ -27,16 +53,15 @@ class TestGeneratesTemplate(unittest.TestCase):
'vcpus': ['2']
}
self.template_name = 'VTC_base_single_vm_wait.tmp'
- common.init()
-
- def test_dummy(self):
- self.assertTrue(True)
+ # common.init()
def tearDown(self):
- pass
+ reset_common()
+ @mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.common.get_template_dir')
- def test_generates_template_for_success(self, mock_template_dir):
+ def test_generates_template_for_success(self, mock_template_dir,
+ mock_log):
generated_templates_dir = 'tests/data/generated_templates/'
mock_template_dir.return_value = generated_templates_dir
test_templates = 'tests/data/test_templates/'
@@ -50,7 +75,7 @@ class TestGeneratesTemplate(unittest.TestCase):
generated.readlines())
t_name = '/tests/data/generated_templates/VTC_base_single_vm_wait.tmp'
- self.template_name = os.getcwd() + t_name
+ self.template_name = "{}{}".format(os.getcwd(), t_name)
heat_gen.generates_templates(self.template_name,
self.deployment_configuration)
for dirname, dirnames, filenames in os.walk(test_templates):
diff --git a/yardstick/vTC/apexlake/tests/heat_manager_test.py b/yardstick/vTC/apexlake/tests/heat_manager_test.py
index f89835cc7..a2798a737 100644
--- a/yardstick/vTC/apexlake/tests/heat_manager_test.py
+++ b/yardstick/vTC/apexlake/tests/heat_manager_test.py
@@ -144,11 +144,13 @@ class TestHeatManager(unittest.TestCase):
def test_delete_stack_for_success_2(self):
self.assertTrue(self.heat_manager.delete_stack('stack_1'))
+ @mock.patch('experimental_framework.common.LOG')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('heatclient.client.Client')
# @mock.patch('heatclient.client.Client', side_effect=DummyHeatClient)
def test_create_stack_for_success(self, mock_stack_create,
- mock_get_template_contents):
+ mock_get_template_contents,
+ mock_log):
return_value = ({'template': 'template'}, 'template')
mock_get_template_contents.return_value = return_value
self.heat_manager.create_stack('template', 'stack_n', 'parameters')
@@ -174,11 +176,18 @@ class TestHeatManager_2(unittest.TestCase):
self.assertFalse(self.heat_manager.delete_stack('stack_1'))
+class ServiceCatalog():
+ def url_for(self, service_type):
+ return 'http://heat_url'
+
+
class KeystoneMock(object):
@property
def auth_token(self):
return 'token'
+ service_catalog = ServiceCatalog()
+
class TestHeatInit(unittest.TestCase):
def setUp(self):
diff --git a/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py b/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
index 569d24c5a..c44df6d9f 100644
--- a/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
+++ b/yardstick/vTC/apexlake/tests/instantiation_validation_bench_test.py
@@ -14,7 +14,9 @@
import unittest
import mock
+import os
import experimental_framework.constants.conf_file_sections as cfs
+import experimental_framework.common as common
import experimental_framework.benchmarks.\
instantiation_validation_benchmark as iv_module
from experimental_framework.benchmarks.\
@@ -70,7 +72,6 @@ def dummy_run_command_2(command, get_counters=None):
elif command == "test_sniff interface.100 128 &":
command_counter[4] += 1
return
- raise Exception(command)
def dummy_replace_in_file(file, str_from, str_to, get_couters=None):
@@ -152,11 +153,12 @@ class DummyInstantiaionValidationBenchmark(InstantiationValidationBenchmark):
class InstantiationValidationInitTest(unittest.TestCase):
def setUp(self):
+ common.BASE_DIR = os.getcwd()
self.iv = InstantiationValidationBenchmark('InstantiationValidation',
dict())
def tearDown(self):
- pass
+ common.BASE_DIR = None
@mock.patch('experimental_framework.common.get_base_dir')
def test___init___for_success(self, mock_base_dir):
@@ -301,11 +303,13 @@ class InstantiationValidationInitTest(unittest.TestCase):
self.assertEqual(dummy_replace_in_file('', '', '', True),
[0, 0, 0, 1, 1, 1])
+ @mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.packet_generators.'
'dpdk_packet_generator.DpdkPacketGenerator',
side_effect=DummyDpdkPacketGenerator)
@mock.patch('experimental_framework.common.get_dpdk_pktgen_vars')
- def test_run_for_success(self, mock_common_get_vars, mock_pktgen):
+ def test_run_for_success(self, mock_common_get_vars, mock_pktgen,
+ mock_log):
rval = dict()
rval[cfs.CFSP_DPDK_BUS_SLOT_NIC_2] = 'bus_2'
rval[cfs.CFSP_DPDK_NAME_IF_2] = 'if_2'
diff --git a/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py b/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
index bbdf73947..463035743 100644
--- a/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
+++ b/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
@@ -14,8 +14,9 @@
import unittest
import mock
-
-
+import os
+import experimental_framework.common as common
+import experimental_framework.deployment_unit as deploy
import experimental_framework.benchmarks.\
instantiation_validation_noisy_neighbors_benchmark as mut
@@ -25,11 +26,22 @@ class InstantiationValidationInitTest(unittest.TestCase):
def setUp(self):
name = 'instantiation_validation_noisy'
params = {'param': 'value'}
+ openstack_credentials = dict()
+ openstack_credentials['ip_controller'] = ''
+ openstack_credentials['project'] = ''
+ openstack_credentials['auth_uri'] = ''
+ openstack_credentials['user'] = ''
+ openstack_credentials['heat_url'] = ''
+ openstack_credentials['password'] = ''
+ common.DEPLOYMENT_UNIT = deploy.DeploymentUnit(openstack_credentials)
+ common.BASE_DIR = os.getcwd()
+ common.TEMPLATE_DIR = 'tests/data/generated_templates'
self.iv = mut.\
InstantiationValidationNoisyNeighborsBenchmark(name, params)
def tearDown(self):
- pass
+ common.BASE_DIR = None
+ common.TEMPLATE_DIR = None
@mock.patch('experimental_framework.benchmarks.'
'instantiation_validation_benchmark.'
@@ -66,14 +78,14 @@ class InstantiationValidationInitTest(unittest.TestCase):
expected['allowed_values'][mut.NUMBER_OF_CORES] = \
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
expected['allowed_values'][mut.AMOUNT_OF_RAM] = \
- ['250M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
+ ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
'10G']
expected['default_values']['throughput'] = '1'
expected['default_values']['vlan_sender'] = '-1'
expected['default_values']['vlan_receiver'] = '-1'
expected['default_values'][mut.NUM_OF_NEIGHBORS] = '1'
expected['default_values'][mut.NUMBER_OF_CORES] = '1'
- expected['default_values'][mut.AMOUNT_OF_RAM] = '250M'
+ expected['default_values'][mut.AMOUNT_OF_RAM] = '256M'
output = self.iv.get_features()
self.assertEqual(expected['description'], output['description'])
diff --git a/yardstick/vTC/apexlake/tests/multi_tenancy_throughput_benchmark_test.py b/yardstick/vTC/apexlake/tests/multi_tenancy_throughput_benchmark_test.py
index 78aff35ba..babf04ab1 100644
--- a/yardstick/vTC/apexlake/tests/multi_tenancy_throughput_benchmark_test.py
+++ b/yardstick/vTC/apexlake/tests/multi_tenancy_throughput_benchmark_test.py
@@ -17,6 +17,8 @@ __author__ = 'gpetralx'
import unittest
import mock
+import os
+import experimental_framework.common as common
from experimental_framework.benchmarks \
import multi_tenancy_throughput_benchmark as bench
@@ -37,6 +39,7 @@ class TestMultiTenancyThroughputBenchmark(unittest.TestCase):
def setUp(self):
name = 'benchmark'
params = dict()
+ common.BASE_DIR = os.getcwd()
self.benchmark = bench.MultiTenancyThroughputBenchmark(name, params)
def tearDown(self):
diff --git a/yardstick/vTC/apexlake/tests/rfc2544_throughput_benchmark_test.py b/yardstick/vTC/apexlake/tests/rfc2544_throughput_benchmark_test.py
index bef9b7f30..ef3b0dabb 100644
--- a/yardstick/vTC/apexlake/tests/rfc2544_throughput_benchmark_test.py
+++ b/yardstick/vTC/apexlake/tests/rfc2544_throughput_benchmark_test.py
@@ -12,11 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__author__ = 'vmriccox'
-
import unittest
import mock
+import os
from experimental_framework.benchmarks import rfc2544_throughput_benchmark \
as mut
import experimental_framework.common as common
@@ -29,11 +28,11 @@ class RFC2544ThroughputBenchmarkRunTest(unittest.TestCase):
params = dict()
params[mut.VLAN_SENDER] = '1'
params[mut.VLAN_RECEIVER] = '2'
+ common.BASE_DIR = os.getcwd()
self.benchmark = mut.RFC2544ThroughputBenchmark(name, params)
- common.init_log()
def tearDown(self):
- pass
+ common.BASE_DIR = None
def test_get_features_for_sanity(self):
output = self.benchmark.get_features()
@@ -51,6 +50,7 @@ class RFC2544ThroughputBenchmarkRunTest(unittest.TestCase):
def test_finalize(self):
self.assertEqual(self.benchmark.finalize(), None)
+ @mock.patch('experimental_framework.common.LOG')
@mock.patch('experimental_framework.benchmarks.'
'rfc2544_throughput_benchmark.RFC2544ThroughputBenchmark.'
'_reset_lua_file')
@@ -67,8 +67,8 @@ class RFC2544ThroughputBenchmarkRunTest(unittest.TestCase):
'rfc2544_throughput_benchmark.dpdk.DpdkPacketGenerator')
def test_run_for_success(self, mock_dpdk, mock_get_results,
mock_extract_size, conf_lua_file_mock,
- reset_lua_file_mock):
- expected = {'results': 0, 'packet_size': '1'}
+ reset_lua_file_mock, mock_common_log):
+ expected = {'results': 0}
mock_extract_size.return_value = '1'
mock_get_results.return_value = {'results': 0}
output = self.benchmark.run()
@@ -88,10 +88,11 @@ class RFC2544ThroughputBenchmarkOthers(unittest.TestCase):
def setUp(self):
name = 'benchmark'
params = {'packet_size': '128'}
+ common.BASE_DIR = os.getcwd()
self.benchmark = mut.RFC2544ThroughputBenchmark(name, params)
def tearDown(self):
- pass
+ common.BASE_DIR = None
def test__extract_packet_size_from_params_for_success(self):
expected = '128'
@@ -121,10 +122,10 @@ class RFC2544ThroughputBenchmarkOthers(unittest.TestCase):
class RFC2544ThroughputBenchmarkGetResultsTest(unittest.TestCase):
def setUp(self):
- pass
+ common.BASE_DIR = os.getcwd()
def tearDown(self):
- pass
+ common.BASE_DIR = None
@mock.patch('experimental_framework.common.get_file_first_line')
def test__get_results_for_success(self, mock_common_file_line):
diff --git a/yardstick/vTC/apexlake/tests/tree_node_test.py b/yardstick/vTC/apexlake/tests/tree_node_test.py
new file mode 100644
index 000000000..8b302c912
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/tree_node_test.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'gpetralx'
+
+
+import unittest
+import experimental_framework.heat_template_generation as heat_gen
+
+
+class TestTreeNode(unittest.TestCase):
+ def setUp(self):
+ self.tree = heat_gen.TreeNode()
+
+ def tearDown(self):
+ pass
+
+ def test_add_child_for_success(self):
+ child = heat_gen.TreeNode()
+ self.tree.add_child(child)
+ self.assertIn(child, self.tree.down)
+
+ def test_get_parent_for_success(self):
+ self.assertIsNone(self.tree.get_parent())
+ child = heat_gen.TreeNode()
+ self.tree.add_child(child)
+ self.assertEqual(self.tree, child.get_parent())
+
+ def test_get_children_for_success(self):
+ self.assertListEqual(list(), self.tree.get_children())
+ child = heat_gen.TreeNode()
+ self.tree.add_child(child)
+ children = [child]
+ self.assertListEqual(children, self.tree.get_children())
+
+ def test_variable_name_for_success(self):
+ self.assertEqual('', self.tree.get_variable_name())
+ variable_name = 'test'
+ self.tree.set_variable_name(variable_name)
+ self.assertEqual(variable_name, self.tree.get_variable_name())
+
+ def test_variable_value_for_success(self):
+ self.assertEqual(0, self.tree.get_variable_value())
+ variable_value = 1
+ self.tree.set_variable_value(variable_value)
+ self.assertEqual(variable_value, self.tree.get_variable_value())
+
+ def test_get_path_for_success(self):
+ child_1 = heat_gen.TreeNode()
+ self.tree.add_child(child_1)
+ child_2 = heat_gen.TreeNode()
+ child_1.add_child(child_2)
+ child_3 = heat_gen.TreeNode()
+ child_2.add_child(child_3)
+
+ path = [self.tree, child_1, child_2, child_3]
+
+ self.assertListEqual(path, child_3.get_path())
+
+ def test_str_for_success(self):
+ name = 'name'
+ value = 0
+ self.tree.set_variable_name(name)
+ self.tree.set_variable_value(value)
+ self.assertEqual(name + " --> " + str(value), str(self.tree))
+
+ def test_repr_for_success(self):
+ name = 'name'
+ value = 0
+ self.tree.set_variable_name(name)
+ self.tree.set_variable_value(value)
+ self.assertEqual(name + " = " + str(value), repr(self.tree))
+
+ def test_get_leaves_for_success(self):
+ child_1 = heat_gen.TreeNode()
+ self.tree.add_child(child_1)
+ child_2 = heat_gen.TreeNode()
+ child_1.add_child(child_2)
+ child_3 = heat_gen.TreeNode()
+ child_2.add_child(child_3)
+ child_4 = heat_gen.TreeNode()
+ child_2.add_child(child_4)
+ child_5 = heat_gen.TreeNode()
+ child_2.add_child(child_5)
+ leaves = [child_3, child_4, child_5]
+ self.assertListEqual(leaves, heat_gen.TreeNode.get_leaves(self.tree))