summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc1
-rw-r--r--.gitignore3
-rw-r--r--README.rst2
-rw-r--r--ci/docker/yardstick-ci/Dockerfile2
-rw-r--r--docs/configguide/yardstick_testcases/01-introduction.rst38
-rw-r--r--docs/configguide/yardstick_testcases/02-methodology.rst181
-rw-r--r--docs/configguide/yardstick_testcases/03-list-of-tcs.rst72
-rw-r--r--docs/configguide/yardstick_testcases/04-vtc-overview.rst114
-rwxr-xr-xdocs/configguide/yardstick_testcases/Yardstick_task_templates.rst155
-rw-r--r--docs/configguide/yardstick_testcases/glossary.rst33
-rw-r--r--docs/configguide/yardstick_testcases/index.rst12
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst (renamed from docs/yardstick/opnfv_yardstick_tc001.rst)42
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst (renamed from docs/yardstick/opnfv_yardstick_tc002.rst)35
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst72
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst139
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst157
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst (renamed from docs/yardstick/opnfv_yardstick_tc008.rst)44
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst (renamed from docs/yardstick/opnfv_yardstick_tc009.rst)46
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst (renamed from docs/yardstick/opnfv_yardstick_tc010.rst)58
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst (renamed from docs/yardstick/opnfv_yardstick_tc012.rst)61
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst69
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst136
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst152
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst67
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst99
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst99
-rw-r--r--docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst60
-rw-r--r--docs/configguide/yardstick_testcases/testcase_description_v2_template.rst64
-rw-r--r--docs/templates/testcase_description_v2_template.rst43
-rw-r--r--docs/userguide/yardstick_framework/03-installation.rst (renamed from docs/user_guides/framework/03-installation.rst)3
-rw-r--r--docs/userguide/yardstick_framework/index.rst (renamed from docs/user_guides/framework/index.rst)0
-rw-r--r--docs/vTC/README.rst87
-rw-r--r--docs/vTC/abbreviations.rst5
-rw-r--r--docs/yardstick/index.rst21
-rw-r--r--etc/yardstick/nodes/compass_sclab_physical/pod.yaml42
-rw-r--r--etc/yardstick/yardstick.conf.sample5
-rwxr-xr-xrun_tests.sh2
-rw-r--r--samples/cyclictest-node-context.yaml50
-rwxr-xr-xsamples/ha-baremetal.yaml45
-rwxr-xr-xsamples/ha-service.yaml42
-rw-r--r--samples/parser.yaml21
-rw-r--r--samples/ping6.yaml28
-rwxr-xr-xsamples/serviceha.yaml14
-rw-r--r--samples/tosca.yaml149
-rw-r--r--samples/yang.yaml687
-rwxr-xr-xsetup.py7
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml48
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml26
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml32
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml32
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml31
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml28
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml27
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml85
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml85
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml22
-rw-r--r--tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml18
-rw-r--r--tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml4
-rw-r--r--tests/opnfv/test_suites/opnfv_vTC_daily.yaml16
-rw-r--r--tests/opnfv/test_suites/opnfv_vTC_weekly.yaml16
-rw-r--r--tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml4
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py77
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_basemonitor.py84
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor.py83
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py79
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_process.py56
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_serviceha.py11
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cyclictest.py159
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping6.py99
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py48
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py51
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py48
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py51
-rw-r--r--tests/unit/benchmark/scenarios/parser/__init__.py0
-rw-r--r--tests/unit/benchmark/scenarios/parser/test_parser.py58
-rw-r--r--tests/unit/dispatcher/__init__.py0
-rw-r--r--tests/unit/dispatcher/test_influxdb.py124
-rw-r--r--tests/unit/dispatcher/test_influxdb_line_protocol.py55
-rw-r--r--yardstick/__init__.py7
-rw-r--r--yardstick/benchmark/contexts/heat.py23
-rw-r--r--yardstick/benchmark/contexts/node.py11
-rw-r--r--[-rwxr-xr-x]yardstick/benchmark/runners/iteration.py101
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py129
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml9
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml13
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash27
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash20
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash18
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash)0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash18
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash21
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash)0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash (renamed from yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash)0
-rwxr-xr-xyardstick/benchmark/scenarios/availability/monitor.py114
-rwxr-xr-xyardstick/benchmark/scenarios/availability/monitor/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py140
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py108
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py81
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py35
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py105
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py119
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_benchmark.bash20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_metadata.txt82
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_setup.bash84
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6_teardown.bash58
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py85
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py92
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_throughput.py85
-rw-r--r--yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py91
-rw-r--r--yardstick/benchmark/scenarios/parser/__init__.py0
-rw-r--r--yardstick/benchmark/scenarios/parser/parser.py80
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser.sh51
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser_setup.sh16
-rwxr-xr-xyardstick/benchmark/scenarios/parser/parser_teardown.sh13
-rwxr-xr-xyardstick/cmd/commands/task.py14
-rw-r--r--yardstick/dispatcher/influxdb.py149
-rw-r--r--yardstick/dispatcher/influxdb_line_protocol.py114
-rw-r--r--yardstick/ssh.py7
-rw-r--r--yardstick/vTC/__init__.py0
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/api.py24
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py2
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/common.py12
-rw-r--r--yardstick/vTC/apexlake/setup.py6
-rw-r--r--yardstick/vTC/apexlake/tests/api_test.py57
-rw-r--r--yardstick/vTC/apexlake/tests/benchmarking_unit_test.py157
-rw-r--r--yardstick/vTC/apexlake/tests/common_test.py2
-rw-r--r--yardstick/vTC/apexlake/tests/experiment_test.py94
-rw-r--r--yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py4
136 files changed, 6555 insertions, 850 deletions
diff --git a/.coveragerc b/.coveragerc
index 07ca20984..d2e3447da 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -5,3 +5,4 @@ source = yardstick
[report]
ignore_errors = True
precision = 3
+omit = yardstick/vTC/*
diff --git a/.gitignore b/.gitignore
index 162687f8d..6f462f55a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,5 +26,6 @@ test.log
.testrepository/
cover/
.*.sw?
-/output/
+/docs_build/
+/docs_output/
/releng/
diff --git a/README.rst b/README.rst
index 8cb360040..582622264 100644
--- a/README.rst
+++ b/README.rst
@@ -80,7 +80,7 @@ Example setup known to work for development and test:
- Cloud: Mirantis OpenStack 6.0 deployed using Virtualbox
Install dependencies:
-$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev
+$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev
$ sudo easy_install -U setuptools
Create a virtual environment:
diff --git a/ci/docker/yardstick-ci/Dockerfile b/ci/docker/yardstick-ci/Dockerfile
index 229b91227..a1cf9160f 100644
--- a/ci/docker/yardstick-ci/Dockerfile
+++ b/ci/docker/yardstick-ci/Dockerfile
@@ -28,6 +28,8 @@ RUN apt-get update && apt-get install -y \
libssl-dev \
python \
python-dev \
+ libxml2-dev \
+ libxslt1-dev \
python-setuptools && \
easy_install -U setuptools
diff --git a/docs/configguide/yardstick_testcases/01-introduction.rst b/docs/configguide/yardstick_testcases/01-introduction.rst
new file mode 100644
index 000000000..6cca2875e
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/01-introduction.rst
@@ -0,0 +1,38 @@
+============
+Introduction
+============
+
+**Welcome to Yardstick's documentation !**
+
+.. _Yardstick: https://wiki.opnfv.org/yardstick
+
+Yardstick_ is an OPNFV Project.
+
+The project's goal is to verify infrastructure compliance, from the perspective
+of a :term:`VNF`.
+
+The Project's scope is the development of a test framework, *Yardstick*, test
+cases and test stimuli to enable :term:`NFVI` verification.
+The Project also includes a sample :term:`VNF`, the :term:`VTC` and its
+experimental framework, *ApexLake* !
+
+The chapter :doc:`02-methodology` describes the methodology implemented by the
+Yardstick Project for :term:`NFVI` verification. The chapter
+:doc:`03-list-of-tcs` includes a list of available Yardstick test cases.
+
+Yardstick is used for verifying the OPNFV infrastructure and some of the OPNFV
+features, listed in :doc:`03-list-of-tcs`.
+
+The *Yardstick* framework is deployed in several OPNFV community labs. It is
+installer, infrastructure and application independent.
+
+.. _Pharos: https://wiki.opnfv.org/pharos
+
+.. seealso:: Pharos_ for information on OPNFV community labs.
+
+Contact Yardstick
+=================
+
+Feedback? `Contact us`_
+
+.. _Contact us: opnfv-users@lists.opnfv.org
diff --git a/docs/configguide/yardstick_testcases/02-methodology.rst b/docs/configguide/yardstick_testcases/02-methodology.rst
new file mode 100644
index 000000000..5097c566b
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/02-methodology.rst
@@ -0,0 +1,181 @@
+===========
+Methodology
+===========
+
+Abstract
+========
+
+This chapter describes the methodology implemented by the Yardstick project for
+verifying the NFV Infrastructure from the perspective of a VNF.
+
+ETSI-NFV
+========
+
+.. _NFV-TST001: https://docbox.etsi.org/ISG/NFV/Open/Drafts/TST001_-_Pre-deployment_Validation/
+
+The document ETSI GS NFV-TST001_, "Pre-deployment Testing; Report on Validation
+of NFV Environments and Services", recommends methods for pre-deployment
+testing of the functional components of an NFV environment.
+
+The Yardstick project implements the methodology described in chapter 6, "Pre-
+deployment validation of NFV infrastructure".
+
+The methodology consists in decomposing the typical VNF work-load performance
+metrics into a number of characteristics/performance vectors, which each can be
+represented by distinct test-cases.
+
+The methodology includes five steps:
+
+* *Step1:* Define Infrastruture - the HW, SW and corresponding configuration
+ target for validation; the OPNFV infrastructure, in OPNFV community labs.
+
+* *Step2:* Identify VNF type - the application for which the infrastructure is
+ to be validated, and its requirements on the underlying infrastructure.
+
+* *Step3:* Select test cases - depending on the workload that represents the
+ application for which the infrastruture is to be validated, the relevant
+ test cases amongst the list of available Yardstick test cases.
+
+* *Step4:* Execute tests - define the duration and number of iterations for the
+ selected test cases, tests runs are automated via OPNFV Jenkins Jobs.
+
+* *Step5:* Collect results - using the common API for result collection.
+
+Metrics
+=======
+
+The metrics, as defined by ETSI GS NFV-TST001, are shown in
+:ref:`Table1 <table2_1>`, :ref:`Table2 <table2_2>` and
+:ref:`Table3 <table2_3>`.
+
+In OPNFV Brahmaputra release, generic test cases covering aspects of the listed
+metrics are available; further OPNFV releases will provide extended testing of
+these metrics.
+The view of available Yardstick test cases cross ETSI definitions in
+:ref:`Table1 <table2_1>`, :ref:`Table2 <table2_2>` and :ref:`Table3 <table2_3>`
+is shown in :ref:`Table4 <table2_4>`.
+It shall be noticed that the Yardstick test cases are examples, the test
+duration and number of iterations are configurable, as are the System Under
+Test (SUT) and the attributes (or, in Yardstick nomemclature, the scenario
+options).
+
+.. _table2_1:
+
+**Table 1 - Performance/Speed Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Performance/Speed |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Latency for random memory access |
+| | * Latency for cache read/write operations |
+| | * Processing speed (instructions per second) |
+| | * Throughput for random memory access (bytes per second) |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * Throughput per NFVI node (frames/byte per second) |
+| | * Throughput provided to a VM (frames/byte per second) |
+| | * Latency per traffic flow |
+| | * Latency between VMs |
+| | * Latency between NFVI nodes |
+| | * Packet delay variation (jitter) between VMs |
+| | * Packet delay variation (jitter) between NFVI nodes |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Sequential read/write IOPS |
+| | * Random read/write IOPS |
+| | * Latency for storage read/write operations |
+| | * Throughput for storage read/write operations |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_2:
+
+**Table 2 - Capacity/Scale Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Capacity/Scale |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Number of cores and threads- Available memory size |
+| | * Cache size |
+| | * Processor utilization (max, average, standard deviation) |
+| | * Memory utilization (max, average, standard deviation) |
+| | * Cache utilization (max, average, standard deviation) |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * Number of connections |
+| | * Number of frames sent/received |
+| | * Maximum throughput between VMs (frames/byte per second) |
+| | * Maximum throughput between NFVI nodes (frames/byte per second) |
+| | * Network utilization (max, average, standard deviation) |
+| | * Number of traffic flows |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Storage/Disk size |
+| | * Capacity allocation (block-based, object-based) |
+| | * Block size |
+| | * Maximum sequential read/write IOPS |
+| | * Maximum random read/write IOPS |
+| | * Disk utilization (max, average, standard deviation) |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_3:
+
+**Table 3 - Availability/Reliability Metrics**
+
++---------+-------------------------------------------------------------------+
+| Category| Availability/Reliability |
+| | |
++---------+-------------------------------------------------------------------+
+| Compute | * Processor availability (Error free processing time) |
+| | * Memory availability (Error free memory time) |
+| | * Processor mean-time-to-failure |
+| | * Memory mean-time-to-failure |
+| | * Number of processing faults per second |
+| | |
++---------+-------------------------------------------------------------------+
+| Network | * NIC availability (Error free connection time) |
+| | * Link availability (Error free transmission time) |
+| | * NIC mean-time-to-failure |
+| | * Network timeout duration due to link failure |
+| | * Frame loss rate |
+| | |
++---------+-------------------------------------------------------------------+
+| Storage | * Disk availability (Error free disk access time) |
+| | * Disk mean-time-to-failure |
+| | * Number of failed storage read/write operations per second |
+| | |
++---------+-------------------------------------------------------------------+
+
+.. _table2_4:
+
+**Table 4 - Yardstick Generic Test Cases**
+
++---------+-------------------+----------------+------------------------------+
+| Category| Performance/Speed | Capacity/Scale | Availability/Reliability |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Compute | TC003 | TC003 | TC013 [1]_ |
+| | TC004 | TC004 | TC015 [1]_ |
+| | TC014 | TC010 | |
+| | TC024 | TC012 | |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Network | TC002 | TC001 | TC016 [1]_ |
+| | TC011 | TC008 | TC018 [1]_ |
+| | | TC009 | |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+| Storage | TC005 | TC005 | TC017 [1]_ |
+| | | | |
++---------+-------------------+----------------+------------------------------+
+
+.. note:: The description in this OPNFV document is intended as a reference for
+ users to understand the scope of the Yardstick Project and the
+ deliverables of the Yardstick framework. For complete description of
+ the methodology, refer to the ETSI document.
+
+.. rubric:: Footnotes
+.. [1] To be included in future deliveries.
diff --git a/docs/configguide/yardstick_testcases/03-list-of-tcs.rst b/docs/configguide/yardstick_testcases/03-list-of-tcs.rst
new file mode 100644
index 000000000..f72d80b75
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/03-list-of-tcs.rst
@@ -0,0 +1,72 @@
+====================
+Yardstick Test Cases
+====================
+
+Abstract
+========
+
+This chapter lists available Yardstick test cases.
+Yardstick test cases are divided in two main categories:
+
+* *Generic NFVI Test Cases* - Test Cases developed to realize the methodology
+described in :doc:`02-methodology`
+
+* *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
+aspect of a feature delivered by an OPNFV Project, including the test cases
+developed for the :term:`VTC`.
+
+Generic NFVI Test Case Descriptions
+===================================
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc001.rst
+ opnfv_yardstick_tc002.rst
+ opnfv_yardstick_tc005.rst
+ opnfv_yardstick_tc008.rst
+ opnfv_yardstick_tc009.rst
+ opnfv_yardstick_tc010.rst
+ opnfv_yardstick_tc012.rst
+ opnfv_yardstick_tc014.rst
+ opnfv_yardstick_tc037.rst
+ opnfv_yardstick_tc038.rst
+
+OPNFV Feature Test Cases
+========================
+
+IPv6
+----
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc027.rst
+
+Parser
+------
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc040.rst
+
+virtual Traffic Classifier
+--------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ opnfv_yardstick_tc006.rst
+ opnfv_yardstick_tc007.rst
+ opnfv_yardstick_tc020.rst
+ opnfv_yardstick_tc021.rst
+
+Templates
+=========
+
+.. toctree::
+ :maxdepth: 1
+
+ testcase_description_v2_template
+ Yardstick_task_templates
diff --git a/docs/configguide/yardstick_testcases/04-vtc-overview.rst b/docs/configguide/yardstick_testcases/04-vtc-overview.rst
new file mode 100644
index 000000000..95159a9bc
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/04-vtc-overview.rst
@@ -0,0 +1,114 @@
+==========================
+Virtual Traffic Classifier
+==========================
+
+Abstract
+========
+
+.. _TNOVA: http://www.t-nova.eu/
+.. _TNOVAresults: http://www.t-nova.eu/results/
+.. _Yardstick: https://wiki.opnfv.org/yardstick
+
+This chapter provides an overview of the virtual Traffic Classifier, a
+contribution to OPNFV Yardstick_ from the EU Project TNOVA_.
+Additional documentation is available in TNOVAresults_.
+
+Overview
+========
+
+The virtual Traffic Classifier :term:`VNF`, the :term:`VTC`, comprises of a
+:term:`VNFC`. The :term:`VNFC` contains both the Traffic Inspection module, and
+the Traffic forwarding module, needed to run the VNF. The exploitation of
+:term:`DPI` methods for traffic classification is built around two basic
+assumptions:
+
+* third parties unaffiliated with either source or recipient are able to
+inspect each IP packet’s payload
+
+* the classifier knows the relevant syntax of each application’s packet
+payloads (protocol signatures, data patterns, etc.).
+
+The proposed :term:`DPI` based approach will only use an indicative, small
+number of the initial packets from each flow in order to identify the content
+and not inspect each packet.
+
+In this respect it follows the :term:`PBFS`. This method uses a table to track
+each session based on the 5-tuples (src address, dest address, src port,dest
+port, transport protocol) that is maintained for each flow.
+
+Concepts
+========
+
+* *Traffic Inspection*: The process of packet analysis and application
+identification of network traffic that passes through the :term:`VTC`.
+
+* *Traffic Forwarding*: The process of packet forwarding from an incoming
+network interface to a pre-defined outgoing network interface.
+
+* *Traffic Rule Application*: The process of packet tagging, based on a
+predefined set of rules. Packet tagging may include e.g. :term:`ToS` field
+modification.
+
+Architecture
+============
+
+The Traffic Inspection module is the most computationally intensive component
+of the :term:`VNF`. It implements filtering and packet matching algorithms in
+order to support the enhanced traffic forwarding capability of the :term:`VNF`.
+The component supports a flow table (exploiting hashing algorithms for fast
+indexing of flows) and an inspection engine for traffic classification.
+
+The implementation used for these experiments exploits the nDPI library.
+The packet capturing mechanism is implemented using libpcap. When the
+:term:`DPI` engine identifies a new flow, the flow register is updated with the
+appropriate information and transmitted across the Traffic Forwarding module,
+which then applies any required policy updates.
+
+The Traffic Forwarding moudle is responsible for routing and packet forwarding.
+It accepts incoming network traffic, consults the flow table for classification
+information for each incoming flow and then applies pre-defined policies
+marking e.g. :term:`ToS`/:term:`DSCP` multimedia traffic for :term:`QoS`
+enablement on the forwarded traffic.
+It is assumed that the traffic is forwarded using the default policy until it
+is identified and new policies are enforced.
+
+The expected response delay is considered to be negligible, as only a small
+number of packets are required to identify each flow.
+
+Graphical Overview
+==================
+
+.. code-block:: console
+
+ +----------------------------+
+ | |
+ | Virtual Traffic Classifier |
+ | |
+ | Analysing/Forwarding |
+ | ------------> |
+ | ethA ethB |
+ | |
+ +----------------------------+
+ | ^
+ | |
+ v |
+ +----------------------------+
+ | |
+ | Virtual Switch |
+ | |
+ +----------------------------+
+
+Install
+=======
+
+run the build.sh with root privileges
+
+Run
+===
+
+sudo ./pfbridge -a eth1 -b eth2
+
+Development Environment
+=======================
+
+Ubuntu 14.04
diff --git a/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst b/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst
new file mode 100755
index 000000000..d2c2b7ec9
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/Yardstick_task_templates.rst
@@ -0,0 +1,155 @@
+Task Template Syntax
+====================
+
+Basic template syntax
+---------------------
+A nice feature of the input task format used in Yardstick is that it supports
+the template syntax based on Jinja2.
+This turns out to be extremely useful when, say, you have a fixed structure of
+your task but you want to parameterize this task in some way.
+For example, imagine your input task file (task.yaml) runs a set of Ping
+scenarios:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+ schema: "yardstick:task:0.1"
+
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: 200
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+
+ sla:
+ max_rtt: 10
+ action: monitor
+
+ context:
+ ...
+
+Let's say you want to run the same set of scenarios with the same runner/
+context/sla, but you want to try another packetsize to compare the performance.
+The most elegant solution is then to turn the packetsize name into a template
+variable:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+
+ schema: "yardstick:task:0.1"
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: {{packetsize}}
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+
+ sla:
+ max_rtt: 10
+ action: monitor
+
+ context:
+ ...
+
+and then pass the argument value for {{packetsize}} when starting a task with
+this configuration file.
+Yardstick provides you with different ways to do that:
+
+1.Pass the argument values directly in the command-line interface (with either
+a JSON or YAML dictionary):
+
+::
+
+ yardstick task start samples/ping-template.yaml
+ --task-args'{"packetsize":"200"}'
+
+2.Refer to a file that specifies the argument values (JSON/YAML):
+
+::
+
+ yardstick task start samples/ping-template.yaml --task-args-file args.yaml
+
+Using the default values
+------------------------
+Note that the Jinja2 template syntax allows you to set the default values for
+your parameters.
+With default values set, your task file will work even if you don't
+parameterize it explicitly while starting a task.
+The default values should be set using the {% set ... %} clause (task.yaml).
+For example:
+
+::
+
+ # Sample benchmark task config file
+ # measure network latency using ping
+ schema: "yardstick:task:0.1"
+ {% set packetsize = packetsize or "100" %}
+ scenarios:
+ -
+ type: Ping
+ options:
+ packetsize: {{packetsize}}
+ host: athena.demo
+ target: ares.demo
+
+ runner:
+ type: Duration
+ duration: 60
+ interval: 1
+ ...
+
+If you don't pass the value for {{packetsize}} while starting a task, the
+default one will be used.
+
+Advanced templates
+------------------
+
+Yardstick makes it possible to use all the power of Jinja2 template syntax,
+including the mechanism of built-in functions.
+As an example, let us make up a task file that will do a block storage
+performance test.
+The input task file (fio-template.yaml) below uses the Jinja2 for-endfor
+construct to accomplish that:
+
+::
+
+ #Test block sizes of 4KB, 8KB, 64KB, 1MB
+ #Test 5 workloads: read, write, randwrite, randread, rw
+ schema: "yardstick:task:0.1"
+
+ scenarios:
+ {% for bs in ['4k', '8k', '64k', '1024k' ] %}
+ {% for rw in ['read', 'write', 'randwrite', 'randread', 'rw' ] %}
+ -
+ type: Fio
+ options:
+ filename: /home/ec2-user/data.raw
+ bs: {{bs}}
+ rw: {{rw}}
+ ramp_time: 10
+ host: fio.demo
+ runner:
+ type: Duration
+ duration: 60
+ interval: 60
+
+ {% endfor %}
+ {% endfor %}
+ context
+ ...
diff --git a/docs/configguide/yardstick_testcases/glossary.rst b/docs/configguide/yardstick_testcases/glossary.rst
new file mode 100644
index 000000000..8ce9a6ba0
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/glossary.rst
@@ -0,0 +1,33 @@
+==================
+Yardstick Glossary
+==================
+
+.. glossary::
+ :sorted:
+
+ DPI
+ Deep Packet Inspection
+
+ DSCP
+ Differentiated Services Code Point
+
+ PBFS
+ Packet Based per Flow State
+
+ QoS
+ Quality of Service
+
+ VNF
+ Virtual Network Function
+
+ VNFC
+ Virtual Network Function Component
+
+ NFVI
+ Network Function Virtualization Infrastructure
+
+ ToS
+ Type of Service
+
+ VTC
+ Virtual Traffic Classifier
diff --git a/docs/configguide/yardstick_testcases/index.rst b/docs/configguide/yardstick_testcases/index.rst
new file mode 100644
index 000000000..55d4ea3e1
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/index.rst
@@ -0,0 +1,12 @@
+==================
+Yardstick Overview
+==================
+
+.. toctree::
+ :maxdepth: 2
+
+ 01-introduction
+ 02-methodology
+ 04-vtc-overview
+ 03-list-of-tcs
+ glossary
diff --git a/docs/yardstick/opnfv_yardstick_tc001.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst
index 16c9d2c60..810bad489 100644
--- a/docs/yardstick/opnfv_yardstick_tc001.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc001.rst
@@ -1,12 +1,18 @@
*************************************
Yardstick Test Case Description TC001
*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
+-----------------------------------------------------------------------------+
|Network Performance |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC001_NW PERF |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Number of flows and throughput |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To evaluate the IaaS network performance with regards to |
| | flows and throughput, such as if and how different amounts |
@@ -19,6 +25,7 @@ Yardstick Test Case Description TC001
| | graphs ans similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc001.yaml |
| | |
@@ -28,6 +35,7 @@ Yardstick Test Case Description TC001
| | twice. The client and server are distributed on different HW.|
| | For SLA max_ppm is set to 1000. The amount of configured |
| | ports map to between 110 up to 1001000 flows, respectively. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
@@ -36,30 +44,36 @@ Yardstick Test Case Description TC001
| | image. |
| | As an example see the /yardstick/tools/ directory for how |
| | to generate a Linux image with pktgen included.) |
+| | |
+--------------+--------------------------------------------------------------+
-|references |https://www.kernel.org/doc/Documentation/networking/pktgen.txt|
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
-| |ETSI-NFV-TST001 |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different packet sizes, amount |
| | of flows and test duration. Default values exist. |
| | |
-| |SLA (optional): |
-| | max_ppm: The number of packets per million packets sent |
-| | that are acceptable to lose, i.e. not received. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. pktgen is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc002.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst
index bc795bf38..56350f5bb 100644
--- a/docs/yardstick/opnfv_yardstick_tc002.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc002.rst
@@ -2,12 +2,17 @@
Yardstick Test Case Description TC002
*************************************
+.. _cirros-image: https://download.cirros-cloud.net
+
+-----------------------------------------------------------------------------+
|Network Latency |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC002_NW LATENCY |
+| | |
+--------------+--------------------------------------------------------------+
|metric | RTT, Round Trip Time |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To do a basic verification that network latency is within |
| | acceptable boundaries when packets travel between hosts |
@@ -16,11 +21,13 @@ Yardstick Test Case Description TC002
| | graphs and similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc002.yaml |
| | |
| | Packet size 100 bytes. Total test duration 600 seconds. |
| | One ping each 10 seconds. SLA RTT is set to maximum 10 ms. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | ping |
| | |
@@ -28,11 +35,13 @@ Yardstick Test Case Description TC002
| | doesn't need to be installed. It is also part of the |
| | Yardstick Docker image. |
| | (For example also a Cirros image can be downloaded from |
-| | https://download.cirros-cloud.net, it includes ping) |
+| | cirros-image_, it includes ping) |
+| | |
+--------------+--------------------------------------------------------------+
|references | Ping man page |
| | |
| | ETSI-NFV-TST001 |
+| | |
+--------------+--------------------------------------------------------------+
|applicability | Test case can be configured with different packet sizes, |
| | burst sizes, ping intervals and test duration. |
@@ -46,20 +55,24 @@ Yardstick Test Case Description TC002
| | than this. Some may suffer bad also close to this RTT, while |
| | others may not suffer at all. It is a compromise that may |
| | have to be tuned for different configuration purposes. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with ping included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. Ping is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. Ping is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test should not PASS if any RTT is above the optional SLA |
| | value, or if there is a test case execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
-
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst
new file mode 100644
index 000000000..8b7474696
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc005.rst
@@ -0,0 +1,72 @@
+*************************************
+Yardstick Test Case Description TC005
+*************************************
+
+.. _fio: http://www.bluestop.org/fio/HOWTO.txt
+
++-----------------------------------------------------------------------------+
+|Storage Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC005_Storage Performance |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | IOPS, throughput and latency |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS storage performance with regards to |
+| | IOPS, throughput and latency. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons |
+| | and product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc005.yaml |
+| | |
+| | IO types: read, write, randwrite, randread, rw |
+| | IO block size: 4KB, 64KB, 1024KB, where each |
+| | runs for 30 seconds(10 for ramp time, 20 for runtime). |
+| | |
+| | For SLA minimum read/write iops is set to 100, minimum |
+| | read/write throughput is set to 400 KB/s, and maximum |
+| | read/write latency is set to 20000 usec. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | fio |
+| | |
+| | (fio is not always part of a Linux distribution, hence it |
+| | needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with fio included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | fio_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different read/write types, IO |
+| | block size, IO depth, ramp time (runtime required for stable |
+| | results) and test duration. Default values exist. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with fio included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. fio is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst
new file mode 100644
index 000000000..b68315078
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc006.rst
@@ -0,0 +1,139 @@
+*************************************
+Yardstick Test Case Description TC006
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC006_Virtual Traffic Classifier Data Plane |
+| | Throughput Benchmarking Test. |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To measure the throughput supported by the virtual Traffic |
+| | Classifier according to the RFC2544 methodology for a |
+| | user-defined set of vTC deployment configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: file: opnfv_yardstick_tc006.yaml |
+| | |
+| | packet_size: size of the packets to be used during the |
+| | throughput calculation. |
+| | Allowe values: [64, 128, 256, 512, 1024, 1280, 1518] |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDK Pktgen: DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | RFC 2544: rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The vTC is deployed, according to the user-defined |
+| | configuration |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is correctly deployed and configured as necessary |
+| | The initialization script has been correctly executed and |
+| | vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Test case is executed with the selected parameters: |
+| | - vTC flavor |
+| | - vNIC type |
+| | - packet size |
+| | The traffic is sent to the vTC using the maximum available |
+| | traffic rate for 60 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The vTC instance forwards all the packets back to the packet |
+| | generator for 60 seconds, as specified by RFC 2544. |
+| | |
+| | Steps 3 and 4 are executed different times, with different |
+| | rates in order to find the maximum supported traffic rate |
+| | according to the current definition of throughput in RFC |
+| | 2544. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The result of the test is a number between 0 and 100 which |
+| | represents the throughput in terms of percentage of the |
+| | available pktgen NIC bandwidth. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst
new file mode 100644
index 000000000..a7a4776d5
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc007.rst
@@ -0,0 +1,157 @@
+*************************************
+Yardstick Test Case Description TC007
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC007_Virtual Traffic Classifier Data Plane |
+| | Throughput Benchmarking Test in Presence of Noisy |
+| | neighbours |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Throughput |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To measure the throughput supported by the virtual Traffic |
+| | Classifier according to the RFC2544 methodology for a |
+| | user-defined set of vTC deployment configurations in the |
+| | presence of noisy neighbours. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc007.yaml |
+| | |
+| | packet_size: size of the packets to be used during the |
+| | throughput calculation. |
+| | Allowe values: [64, 128, 256, 512, 1024, 1280, 1518] |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | |
+| | num_of_neighbours: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | |
+| | amount_of_ram: RAM to be used by each neighbor. |
+| | Allowed values: ['250M', '1G', '2G', '3G', '4G', '5G', |
+| | '6G', '7G', '8G', '9G', '10G'] |
+| | Deault value: 256M |
+| | |
+| | number_of_cores: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | Default value: 1 |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The noisy neighbours are deployed as required by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is deployed, according to the configuration required |
+| | by the user |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | Test case is executed with the parameters specified by the |
+| | user: |
+| | - vTC flavor |
+| | - vNIC type |
+| | - packet size |
+| | The traffic is sent to the vTC using the maximum available |
+| | traffic rate |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | The vTC instance forwards all the packets back to the |
+| | packet generator for 60 seconds, as specified by RFC 2544. |
+| | |
+| | Steps 4 and 5 are executed different times with different |
+| | with different traffic rates, in order to find the maximum |
+| | supported traffic rate, accoring to the current definition |
+| | of throughput in RFC 2544. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The result of the test is a number between 0 and 100 which |
+| | represents the throughput in terms of percentage of the |
+| | available pktgen NIC bandwidth. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc008.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst
index f4971fbad..e176e633a 100644
--- a/docs/yardstick/opnfv_yardstick_tc008.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc008.rst
@@ -1,12 +1,18 @@
*************************************
Yardstick Test Case Description TC008
*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
+-----------------------------------------------------------------------------+
-|Network Performance |
-+==============+==============================================================+
+|Packet Loss Extended Test |
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC008_NW PERF, Packet loss Extended Test |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Number of flows, packet size and throughput |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To evaluate the IaaS network performance with regards to |
| | flows and throughput, such as if and how different amounts |
@@ -20,6 +26,7 @@ Yardstick Test Case Description TC008
| | graphs ans similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc008.yaml |
| | |
@@ -34,6 +41,7 @@ Yardstick Test Case Description TC008
| | The client and server are distributed on different HW. |
| | |
| | For SLA max_ppm is set to 1000. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
@@ -42,30 +50,36 @@ Yardstick Test Case Description TC008
| | image. |
| | As an example see the /yardstick/tools/ directory for how |
| | to generate a Linux image with pktgen included.) |
+| | |
+--------------+--------------------------------------------------------------+
-|references |https://www.kernel.org/doc/Documentation/networking/pktgen.txt|
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
-| |ETSI-NFV-TST001 |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different packet sizes, amount |
| | of flows and test duration. Default values exist. |
| | |
-| |SLA (optional): |
-| | max_ppm: The number of packets per million packets sent |
-| | that are acceptable to lose, i.e. not received. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. pktgen is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc009.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst
index 07d7fbfea..e4002a884 100644
--- a/docs/yardstick/opnfv_yardstick_tc009.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc009.rst
@@ -1,12 +1,18 @@
*************************************
Yardstick Test Case Description TC009
*************************************
+
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
+-----------------------------------------------------------------------------+
-|Network Performance |
-+==============+==============================================================+
+|Packet Loss |
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC009_NW PERF, Packet loss |
+| | |
+--------------+--------------------------------------------------------------+
-|metric | Number of flows and throughput |
+|metric | Number of flows, packets lost and throughput |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | To evaluate the IaaS network performance with regards to |
| | flows and throughput, such as if and how different amounts |
@@ -20,6 +26,7 @@ Yardstick Test Case Description TC009
| | graphs ans similar shall be stored for comparison reasons and|
| | product evolution understanding between different OPNFV |
| | versions and/or configurations. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | file: opnfv_yardstick_tc009.yaml |
| | |
@@ -33,6 +40,7 @@ Yardstick Test Case Description TC009
| | The client and server are distributed on different HW. |
| | |
| | For SLA max_ppm is set to 1000. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | pktgen |
| | |
@@ -41,30 +49,36 @@ Yardstick Test Case Description TC009
| | image. |
| | As an example see the /yardstick/tools/ directory for how |
| | to generate a Linux image with pktgen included.) |
+| | |
+--------------+--------------------------------------------------------------+
-|references |https://www.kernel.org/doc/Documentation/networking/pktgen.txt|
+|references | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
| | |
-| |ETSI-NFV-TST001 |
+--------------+--------------------------------------------------------------+
|applicability | Test can be configured with different packet sizes, amount |
| | of flows and test duration. Default values exist. |
| | |
-| |SLA (optional): |
-| | max_ppm: The number of packets per million packets sent |
-| | that are acceptable to lose, i.e. not received. |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with pktgen included in it. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The hosts are installed, as | Logs are stored |
-| | | server and client. pktgen is | |
-| | | invoked and logs are produced | |
-| | | and stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Fails only if SLA is not passed, or if there is a test case |
| | execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc010.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst
index c2f8eb4e2..ebb74ea30 100644
--- a/docs/yardstick/opnfv_yardstick_tc010.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc010.rst
@@ -1,17 +1,23 @@
-
*************************************
Yardstick Test Case Description TC010
*************************************
+
+.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
+
+-----------------------------------------------------------------------------+
|Memory Latency |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC010_Memory Latency |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Latency in nanoseconds |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | Measure the memory read latency for varying memory sizes and |
| | strides. Whole memory hierarchy is measured including all |
| | levels of cache. |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc010.yaml |
| | |
@@ -21,6 +27,7 @@ Yardstick Test Case Description TC010
| | * Iterations: 10 - test is run 10 times iteratively. |
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | Lmbench |
| | |
@@ -28,38 +35,43 @@ Yardstick Test Case Description TC010
| | test uses lat_mem_rd tool from that suite. |
| | Lmbench is not always part of a Linux distribution, hence it |
| | needs to be installed in the test image |
-| | (See :ref:`guest-image` for how to generate a Linux image |
-| | for Glance with Lmbench included). |
+| | |
+--------------+--------------------------------------------------------------+
-|references |* http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html|
+|references | man-pages_ |
+| | |
+| | McVoy, Larry W.,and Carl Staelin. "lmbench: Portable Tools |
+| | for Performance Analysis." USENIX annual technical |
+| | conference 1996. |
| | |
-| |* McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools |
-| | for Performance Analysis." *USENIX annual technical |
-| | conference*. 1996. |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different |
-| | * strides; |
-| | * stop_size; |
-| | * iterations and intervals; |
+|applicability | Test can be configured with different: |
+| | |
+| | * strides; |
+| | * stop_size; |
+| | * iterations and intervals. |
| | |
| | There are default values for each above-mentioned option. |
| | |
-| | * SLA (optional). |
-| | max_latency: The maximum memory latency that is |
-| | accepted. |
+| | SLA (optional) : max_latency: The maximum memory latency |
+| | that is accepted. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with Lmbench included in the image. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The host is installed as client. | Logs are stored |
-| | | Lmbench's lat_mem_rd tool is | |
-| | | invoked and logs are produced and| |
-| | | stored. | |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The host is installed as client. Lmbench's lat_mem_rd tool |
+| | is invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory latency is above the SLA |
| | value or if there is a test case execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/yardstick/opnfv_yardstick_tc012.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst
index b5768c0c5..e7889c14e 100644
--- a/docs/yardstick/opnfv_yardstick_tc012.rst
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc012.rst
@@ -2,21 +2,26 @@
Yardstick Test Case Description TC012
*************************************
+.. _man-pages: http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html
+
+-----------------------------------------------------------------------------+
|Memory Bandwidth |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | OPNFV_YARDSTICK_TC012_Memory Bandwidth |
+| | |
+--------------+--------------------------------------------------------------+
|metric | Megabyte per second (MBps) |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | Measure the rate at which data can be read from and written |
| | to the memory (this includes all levels of memory). |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | File: opnfv_yardstick_tc012.yaml |
| | |
-| | * SLA (optional): 15000 (MBps) |
-| | min_bw: The minimum amount of memory bandwidth that is |
-| | accepted. |
+| | * SLA (optional): 15000 (MBps) min_bw: The minimum amount of |
+| | memory bandwidth that is accepted. |
| | * Size: 10 240 kB - test allocates twice that size (20 480kB)|
| | zeros it and then measures the time it takes to copy from |
| | one side to another. |
@@ -27,42 +32,50 @@ Yardstick Test Case Description TC012
| | * Iterations: 10 - test is run 10 times iteratively. |
| | * Interval: 1 - there is 1 second delay between each |
| | iteration. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | Lmbench |
| | |
| | Lmbench is a suite of operating system microbenchmarks. This |
| | test uses bw_mem tool from that suite. |
| | Lmbench is not always part of a Linux distribution, hence it |
-| | needs to be installed in the test image |
-| | (See :ref:`guest-image` for how to generate a Linux image |
-| | for Glance with Lmbench included). |
+| | needs to be installed in the test image. |
+| | |
+--------------+--------------------------------------------------------------+
-|references | * http://manpages.ubuntu.com/manpages/trusty/bw_mem.8.html |
+|references | man-pages_ |
+| | |
+| | McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools |
+| | for Performance Analysis." USENIX annual technical |
+| | conference. 1996. |
| | |
-| | * McVoy, Larry W., and Carl Staelin. "lmbench: Portable Tools|
-| | for Performance Analysis." |
-| | * USENIX annual technical conference. 1996. |
+--------------+--------------------------------------------------------------+
-|applicability | Test can be configured with different |
-| | * memory sizes; |
-| | * memory operations (such as rd, wr, rdwr, cp, frd, fwr, |
-| | fcp, bzero, bcopy); |
-| | * number of warmup iterations; |
-| | * iterations and intervals. |
+|applicability | Test can be configured with different: |
+| | |
+| | * memory sizes; |
+| | * memory operations (such as rd, wr, rdwr, cp, frd, fwr, |
+| | fcp, bzero, bcopy); |
+| | * number of warmup iterations; |
+| | * iterations and intervals. |
| | |
| | There are default values for each above-mentioned option. |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | The test case image needs to be installed into Glance |
|conditions | with Lmbench included in the image. |
| | |
| | No POD specific requirements have been identified. |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | The host is installed as client. | Logs are stored |
-| | | Lmbench's bw_mem tool is invoked | |
-| | | and logs are produced and stored.| |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The host is installed as client. Lmbench's bw_mem tool is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | Test fails if the measured memory bandwidth is below the SLA |
| | value or if there is a test case execution problem. |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst
new file mode 100644
index 000000000..68d36ecd2
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc014.rst
@@ -0,0 +1,69 @@
+*************************************
+Yardstick Test Case Description TC014
+*************************************
+
+.. _unixbench: https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench
+
++-----------------------------------------------------------------------------+
+|Processing speed |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC014_Processing speed |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | score of single cpu running, score of parallel running |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS processing speed with regards to score |
+| | of single cpu running and parallel running |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons |
+| | and product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc014.yaml |
+| | |
+| | run_mode: Run unixbench in quiet mode or verbose mode |
+| | test_type: dhry2reg, whetstone and so on |
+| | |
+| | For SLA with single_score and parallel_score, both can be |
+| | set by user, default is NA |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | unixbench |
+| | |
+| | (unixbench is not always part of a Linux distribution, hence |
+| | it needs to be installed. As an example see the |
+| | /yardstick/tools/ directory for how to generate a Linux |
+| | image with unixbench included.) |
+| | |
++--------------+--------------------------------------------------------------+
+|references | unixbench_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different test types, dhry2reg, |
+| | whetstone and so on. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with unixbench included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as a client. unixbench is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst
new file mode 100644
index 000000000..9a5130f71
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc020.rst
@@ -0,0 +1,136 @@
+*************************************
+Yardstick Test Case Description TC020
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC0020_Virtual Traffic Classifier |
+| | Instantiation Test |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Failure |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify that a newly instantiated vTC is 'alive' and |
+| | functional and its instantiation is correctly supported by |
+| | the infrastructure. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc020.yaml |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | Installation and configuration of smcroute is required |
+| | before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The vTC is deployed, according to the configuration provided |
+| | by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | Test case is executed with the parameters specified by the |
+| | the user: |
+| | - vTC flavor |
+| | - vNIC type |
+| | A constant rate traffic is sent to the vTC for 10 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | The vTC instance tags all the packets and sends them back to |
+| | the packet generator for 10 seconds. |
+| | |
+| | The framework checks that the packet generator receives |
+| | back all the packets with the correct tag from the vTC. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The vTC is deemed to be successfully instantiated if all |
+| | packets are sent back with the right tag as requested, |
+| | else it is deemed DoA (Dead on arrival) |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst
new file mode 100644
index 000000000..a493ddfc0
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc021.rst
@@ -0,0 +1,152 @@
+*************************************
+Yardstick Test Case Description TC021
+*************************************
+
+.. _DPDKpktgen: https://github.com/Pktgen/Pktgen-DPDK/
+.. _rfc2544: https://www.ietf.org/rfc/rfc2544.txt
+
++-----------------------------------------------------------------------------+
+|Network Performance |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC0021_Virtual Traffic Classifier |
+| | Instantiation Test in Presence of Noisy Neighbours |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Failure |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify that a newly instantiated vTC is 'alive' and |
+| | functional and its instantiation is correctly supported by |
+| | the infrastructure in the presence of noisy neighbours. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc021.yaml |
+| | |
+| | vnic_type: type of VNIC to be used. |
+| | Allowed values are: |
+| | - normal: for default OvS port configuration |
+| | - direct: for SR-IOV port configuration |
+| | Default value: None |
+| | |
+| | vtc_flavor: OpenStack flavor to be used for the vTC |
+| | Default available values are: m1.small, m1.medium, |
+| | and m1.large, but the user can create his/her own |
+| | flavor and give it as input |
+| | Default value: None |
+| | |
+| | num_of_neighbours: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | |
+| | amount_of_ram: RAM to be used by each neighbor. |
+| | Allowed values: ['250M', '1G', '2G', '3G', '4G', '5G', |
+| | '6G', '7G', '8G', '9G', '10G'] |
+| | Deault value: 256M |
+| | |
+| | number_of_cores: Number of noisy neighbours (VMs) to be |
+| | instantiated during the experiment. |
+| | Allowed values: range (1, 10) |
+| | Default value: 1 |
+| | |
+| | vlan_sender: vlan tag of the network on which the vTC will |
+| | receive traffic (VLAN Network 1). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | vlan_receiver: vlan tag of the network on which the vTC |
+| | will send traffic back to the packet generator |
+| | (VLAN Network 2). |
+| | Allowed values: range (1, 4096) |
+| | |
+| | default_net_name: neutron name of the defaul network that |
+| | is used for access to the internet from the vTC |
+| | (vNIC 1). |
+| | |
+| | default_subnet_name: subnet name for vNIC1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_1_name: Neutron Name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_1_name: Subnet Neutron name for VLAN Network 1 |
+| | (information available through Neutron). |
+| | |
+| | vlan_net_2_name: Neutron Name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
+| | vlan_subnet_2_name: Subnet Neutron name for VLAN Network 2 |
+| | (information available through Neutron). |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | DPDK pktgen |
+| | |
+| | DPDK Pktgen is not part of a Linux distribution, |
+| | hence it needs to be installed by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | DPDK Pktgen: DPDK Pktgen: DPDKpktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
+| | RFC 2544: rfc2544_ |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different flavors, vNIC type |
+| | and packet sizes. Default values exist as specified above. |
+| | The vNIC type and flavor MUST be specified by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The vTC has been successfully instantiated and configured. |
+| | The user has correctly assigned the values to the deployment |
+| | configuration parameters. |
+| | |
+| | - Multicast traffic MUST be enabled on the network. |
+| | The Data network switches need to be configured in |
+| | order to manage multicast traffic. |
+| | Installation and configuration of smcroute is required |
+| | before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | - In the case of SR-IOV vNICs use, SR-IOV compatible NICs |
+| | must be used on the compute node. |
+| | - Yarsdtick needs to be installed on a host connected to the |
+| | data network and the host must have 2 DPDK-compatible |
+| | NICs. Proper configuration of DPDK and DPDK pktgen is |
+| | required before to run the test case. |
+| | (For further instructions please refer to the ApexLake |
+| | documentation). |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected results |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The noisy neighbours are deployed as required by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | The vTC is deployed, according to the configuration provided |
+| | by the user. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 3 | The vTC is correctly deployed and configured as necessary. |
+| | The initialization script has been correctly executed and |
+| | the vTC is ready to receive and process the traffic. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 4 | Test case is executed with the selected parameters: |
+| | - vTC flavor |
+| | - vNIC type |
+| | A constant rate traffic is sent to the vTC for 10 seconds. |
+| | |
++--------------+--------------------------------------------------------------+
+|step 5 | The vTC instance tags all the packets and sends them back to |
+| | the packet generator for 10 seconds. |
+| | |
+| | The framework checks if the packet generator receives back |
+| | all the packets with the correct tag from the vTC. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | The vTC is deemed to be successfully instantiated if all |
+| | packets are sent back with the right tag as requested, |
+| | else it is deemed DoA (Dead on arrival) |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst
new file mode 100644
index 000000000..56c8227df
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc027.rst
@@ -0,0 +1,67 @@
+*************************************
+Yardstick Test Case Description TC027
+*************************************
+
+.. _ipv6: https://wiki.opnfv.org/ipv6_opnfv_project
+
++-----------------------------------------------------------------------------+
+|IPv6 connectivity between nodes on the tenant network |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC002_IPv6 connectivity |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | RTT, Round Trip Time |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To do a basic verification that IPv6 connectivity is within |
+| | acceptable boundaries when ipv6 packets travel between hosts |
+| | located on same or different compute blades. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs and similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc027.yaml |
+| | |
+| | Packet size 56 bytes. |
+| | SLA RTT is set to maximum 10 ms. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | ping6 |
+| | |
+| | Ping6 is normally part of Linux distribution, hence it |
+| | doesn't need to be installed. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | ipv6_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test case can be configured with different run step |
+| | you can run setup, run benchmakr, teardown independently |
+| | SLA is optional. The SLA in this test case serves as an |
+| | example. Considerably lower RTT is expected. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with ping6 included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. Ping is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Test should not PASS if any RTT is above the optional SLA |
+| | value, or if there is a test case execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst
new file mode 100644
index 000000000..5c91f6bf1
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc037.rst
@@ -0,0 +1,99 @@
+*************************************
+Yardstick Test Case Description TC037
+*************************************
+
+.. _cirros: https://download.cirros-cloud.net
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Latency, CPU Load, Throughput, Packet Loss |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC037_Latency,CPU Load,Throughput,Packet Loss|
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, latency, throughput, CPU load, packet loss |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of flows matter for the throughput between hosts on different|
+| | compute blades. Typically e.g. the performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc037.yaml |
+| | |
+| | Packet size: 64 bytes |
+| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
+| | The amount configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each port amount is run two times, for 20 |
+| | seconds each. Then the next port_amount is run, and so on. |
+| | During the test CPU load on both client and server, and the |
+| | network latency between the client and server are measured. |
+| | The client and server are distributed on different HW. |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
+| | ping |
+| | |
+| | Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Glance image. |
+| | (For example also a cirros_ image can be downloaded, it |
+| | includes ping) |
+| | |
+| | mpstat |
+| | |
+| | (Mpstat is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping and Mpstat man pages |
+| | |
+| | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst
new file mode 100644
index 000000000..93c2cf3d8
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc038.rst
@@ -0,0 +1,99 @@
+*************************************
+Yardstick Test Case Description TC038
+*************************************
+
+.. _cirros: https://download.cirros-cloud.net
+.. _pktgen: https://www.kernel.org/doc/Documentation/networking/pktgen.txt
+
++-----------------------------------------------------------------------------+
+|Latency, CPU Load, Throughput, Packet Loss (Extended measurements) |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC038_Latency,CPU Load,Throughput,Packet Loss|
+| | |
++--------------+--------------------------------------------------------------+
+|metric | Number of flows, latency, throughput, CPU load, packet loss |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | To evaluate the IaaS network performance with regards to |
+| | flows and throughput, such as if and how different amounts |
+| | of flows matter for the throughput between hosts on different|
+| | compute blades. Typically e.g. the performance of a vSwitch |
+| | depends on the number of flows running through it. Also |
+| | performance of other equipment or entities can depend |
+| | on the number of flows or the packet sizes used. |
+| | The purpose is also to be able to spot trends. Test results, |
+| | graphs ans similar shall be stored for comparison reasons and|
+| | product evolution understanding between different OPNFV |
+| | versions and/or configurations. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc038.yaml |
+| | |
+| | Packet size: 64 bytes |
+| | Number of ports: 1, 10, 50, 100, 300, 500, 750 and 1000. |
+| | The amount configured ports map from 2 up to 1001000 flows, |
+| | respectively. Each port amount is run ten times, for 20 |
+| | seconds each. Then the next port_amount is run, and so on. |
+| | During the test CPU load on both client and server, and the |
+| | network latency between the client and server are measured. |
+| | The client and server are distributed on different HW. |
+| | For SLA max_ppm is set to 1000. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | pktgen |
+| | |
+| | (Pktgen is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | As an example see the /yardstick/tools/ directory for how |
+| | to generate a Linux image with pktgen included.) |
+| | |
+| | ping |
+| | |
+| | Ping is normally part of any Linux distribution, hence it |
+| | doesn't need to be installed. It is also part of the |
+| | Yardstick Glance image. |
+| | (For example also a cirros_ image can be downloaded, it |
+| | includes ping) |
+| | |
+| | mpstat |
+| | |
+| | (Mpstat is not always part of a Linux distribution, hence it |
+| | needs to be installed. It is part of the Yardstick Glance |
+| | image. |
+| | |
++--------------+--------------------------------------------------------------+
+|references | Ping and Mpstat man pages |
+| | |
+| | pktgen_ |
+| | |
+| | ETSI-NFV-TST001 |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different packet sizes, amount |
+| | of flows and test duration. Default values exist. |
+| | |
+| | SLA (optional): max_ppm: The number of packets per million |
+| | packets sent that are acceptable to loose, not received. |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | The test case image needs to be installed into Glance |
+|conditions | with pktgen included in it. |
+| | |
+| | No POD specific requirements have been identified. |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | The hosts are installed, as server and client. pktgen is |
+| | invoked and logs are produced and stored. |
+| | |
+| | Result: Logs are stored. |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if SLA is not passed, or if there is a test case |
+| | execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst
new file mode 100644
index 000000000..044ccf193
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/opnfv_yardstick_tc040.rst
@@ -0,0 +1,60 @@
+*************************************
+Yardstick Test Case Description TC040
+*************************************
+
+.. _Parser: https://wiki.opnfv.org/parser
+
++-----------------------------------------------------------------------------+
+|Verify Parser Yang-to-Tosca |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | OPNFV_YARDSTICK_TC040 Verify Parser Yang-to-Tosca |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | 1. tosca file which is converted from yang file by Parser |
+| | 2. result whether the output is same with expected outcome |
++--------------+--------------------------------------------------------------+
+|test purpose | To verify the function of Yang-to-Tosca in Parser. |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc040.yaml |
+| | |
+| | yangfile: the path of the yangfile which you want to convert |
+| | toscafile: the path of the toscafile which is your expected |
+| | outcome. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | Parser |
+| | |
+| | (Parser is not part of a Linux distribution, hence it |
+| | needs to be installed. As an example see the |
+| | /yardstick/benchmark/scenarios/parser/parser_setup.sh for |
+| | how to install it manual. Of course, it will be installed |
+| | and uninstalled automatically when you run this test case |
+| | by yardstick) |
++--------------+--------------------------------------------------------------+
+|references | Parser_ |
+| | |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | Test can be configured with different path of yangfile and |
+| | toscafile to fit your real environment to verify Parser |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | No POD specific requirements have been identified. |
+|conditions | it can be run without VM |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | parser is installed without VM, running Yang-to-Tosca module |
+| | to convert yang file to tosca file, validating output against|
+| | expected outcome. |
+| | |
+| | Result: Logs are stored. |
++--------------+--------------------------------------------------------------+
+|test verdict | Fails only if output is different with expected outcome |
+| | or if there is a test case execution problem. |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst b/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst
new file mode 100644
index 000000000..1b8754b05
--- /dev/null
+++ b/docs/configguide/yardstick_testcases/testcase_description_v2_template.rst
@@ -0,0 +1,64 @@
+.. Template to be used for test case descriptions in Yardstick Project.
+ Write one .rst per test case.
+ Upload the .rst for the test case in /docs/source/yardstick directory.
+ Review in Gerrit.
+
+*************************************
+Yardstick Test Case Description TCXXX
+*************************************
+
++-----------------------------------------------------------------------------+
+|test case slogan e.g. Network Latency |
+| |
++--------------+--------------------------------------------------------------+
+|test case id | e.g. OPNFV_YARDSTICK_TC001_NW Latency |
+| | |
++--------------+--------------------------------------------------------------+
+|metric | what will be measured, e.g. latency |
+| | |
++--------------+--------------------------------------------------------------+
+|test purpose | describe what is the purpose of the test case |
+| | |
++--------------+--------------------------------------------------------------+
+|configuration | what .yaml file to use, state SLA if applicable, state |
+| | test duration, list and describe the scenario options used in|
+| | this TC and also list the options using default values. |
+| | |
++--------------+--------------------------------------------------------------+
+|test tool | e.g. ping |
+| | |
++--------------+--------------------------------------------------------------+
+|references | e.g. RFCxxx, ETSI-NFVyyy |
+| | |
++--------------+--------------------------------------------------------------+
+|applicability | describe variations of the test case which can be |
+| | performend, e.g. run the test for different packet sizes |
+| | |
++--------------+--------------------------------------------------------------+
+|pre-test | describe configuration in the tool(s) used to perform |
+|conditions | the measurements (e.g. fio, pktgen), POD-specific |
+| | configuration required to enable running the test |
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | use this to describe tests that require sveveral steps e.g |
+| | collect logs. |
+| | |
+| | Result: what happens in this step e.g. logs collected |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | remove interface |
+| | |
+| | Result: interface down. |
+| | |
++--------------+--------------------------------------------------------------+
+|step N | what is done in step N |
+| | |
+| | Result: what happens |
+| | |
++--------------+--------------------------------------------------------------+
+|test verdict | expected behavior, or SLA, pass/fail criteria |
+| | |
++--------------+--------------------------------------------------------------+
diff --git a/docs/templates/testcase_description_v2_template.rst b/docs/templates/testcase_description_v2_template.rst
index da90f561e..1b8754b05 100644
--- a/docs/templates/testcase_description_v2_template.rst
+++ b/docs/templates/testcase_description_v2_template.rst
@@ -9,37 +9,56 @@ Yardstick Test Case Description TCXXX
+-----------------------------------------------------------------------------+
|test case slogan e.g. Network Latency |
-+==============+==============================================================+
+| |
++--------------+--------------------------------------------------------------+
|test case id | e.g. OPNFV_YARDSTICK_TC001_NW Latency |
+| | |
+--------------+--------------------------------------------------------------+
|metric | what will be measured, e.g. latency |
+| | |
+--------------+--------------------------------------------------------------+
|test purpose | describe what is the purpose of the test case |
+| | |
+--------------+--------------------------------------------------------------+
|configuration | what .yaml file to use, state SLA if applicable, state |
| | test duration, list and describe the scenario options used in|
| | this TC and also list the options using default values. |
+| | |
+--------------+--------------------------------------------------------------+
|test tool | e.g. ping |
+| | |
+--------------+--------------------------------------------------------------+
|references | e.g. RFCxxx, ETSI-NFVyyy |
+| | |
+--------------+--------------------------------------------------------------+
|applicability | describe variations of the test case which can be |
| | performend, e.g. run the test for different packet sizes |
+| | |
+--------------+--------------------------------------------------------------+
|pre-test | describe configuration in the tool(s) used to perform |
|conditions | the measurements (e.g. fio, pktgen), POD-specific |
| | configuration required to enable running the test |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | use this to describe tests that | what happens in |
-| | | require several steps e.g. | this step |
-| | | step 1 collect logs | e.g. logs collected|
-| +------+----------------------------------+--------------------+
-| | 2 | remove interface | interface down |
-| +------+----------------------------------+--------------------+
-| | N | what is done in step N | what happens |
-+--------------+------+----------------------------------+--------------------+
+| | |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+--------------------------------------------------------------+
+|step 1 | use this to describe tests that require sveveral steps e.g |
+| | collect logs. |
+| | |
+| | Result: what happens in this step e.g. logs collected |
+| | |
++--------------+--------------------------------------------------------------+
+|step 2 | remove interface |
+| | |
+| | Result: interface down. |
+| | |
++--------------+--------------------------------------------------------------+
+|step N | what is done in step N |
+| | |
+| | Result: what happens |
+| | |
++--------------+--------------------------------------------------------------+
|test verdict | expected behavior, or SLA, pass/fail criteria |
+| | |
+--------------+--------------------------------------------------------------+
diff --git a/docs/user_guides/framework/03-installation.rst b/docs/userguide/yardstick_framework/03-installation.rst
index d2cae36b8..31f8a922e 100644
--- a/docs/user_guides/framework/03-installation.rst
+++ b/docs/userguide/yardstick_framework/03-installation.rst
@@ -92,7 +92,8 @@ via the OpenStack Dashboard.
Example command:
::
- glance image-create --name yardstick-trusty-server --is-public true \
+ glance --os-image-api-version 1 image-create \
+ --name yardstick-trusty-server --is-public true \
--disk-format qcow2 --container-format bare \
--file /tmp/workspace/yardstick/yardstick-trusty-server.img
diff --git a/docs/user_guides/framework/index.rst b/docs/userguide/yardstick_framework/index.rst
index f982c30ff..f982c30ff 100644
--- a/docs/user_guides/framework/index.rst
+++ b/docs/userguide/yardstick_framework/index.rst
diff --git a/docs/vTC/README.rst b/docs/vTC/README.rst
deleted file mode 100644
index ae6fefa59..000000000
--- a/docs/vTC/README.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-==========================
-Virtual Traffic Classifier
-==========================
-
-Overview
-========
-
-The virtual Traffic Classifier VNF [1], comprises in the current version of
-1 VNFC [2]. The VNFC contains both the Traffic Inspection module, and the
-Traffic forwarding module, needed to run the VNF. The exploitation of DPI
-methods for traffic classification is built around two basic assumptions:
-
-(i) third parties unaffiliated with either source or recipient are able to
-inspect each IP packet’s payload and
-(ii) the classifier knows the relevant syntax of each application’s packet
-payloads (protocol signatures, data patterns, etc.).
-
-The proposed DPI based approach will only use an indicative, small number of
-the initial packets from each flow in order to identify the content and not
-inspect each packet.
-
-In this respect it follows the Packet Based per Flow State (PBFS).
-This method uses a table to track each session based on the 5-tuples
-(src address,dest address,src port,dest port,transport protocol)
-that is maintained for each flow.
-
-Concepts
-========
-
-Traffic Inspection: The process of packet analysis and application
-identification of network traffic that passes through the vTC.
-
-Traffic Forwarding: The process of packet forwarding from an incoming
-network interface to a pre-defined outgoing network interface.
-
-Traffic Rule Application: The process of packet tagging, based on a
-predefined set of rules. Packet tagging may include e.g. ToS field
-modification.
-
-Architecture
-============
-
-The Traffic Inspection module is the most computationally intensive component
-of the VNF. It implements filtering and packet matching algorithms in order to
-support the enhanced traffic forwarding capability of the VNF. The component
-supports a flow table (exploiting hashing algorithms for fast indexing of
-flows) and an inspection engine for traffic classification.
-
-The implementation used for these experiments exploits the nDPI library.
-The packet capturing mechanism is implemented using libpcap. When the DPI
-engine identifies a new flow, the flow register is updated with the
-appropriate information and transmitted across the Traffic Forwarding module,
-which then applies any required policy updates.
-
-The Traffic Forwarding moudle is responsible for routing and packet forwarding.
-It accepts incoming network traffic, consults the flow table for classification
-information for each incoming flow and then applies pre-defined policies
-marking e.g. type of Service/Differentiated Services Code Point (TOS/DSCP)
-multimedia traffic for QoS enablement on the forwarded traffic.
-It is assumed that the traffic is forwarded using the default policy until it
-is identified and new policies are enforced.
-
-The expected response delay is considered to be negligible,as only a small
-number of packets are required to identify each flow.
-
-Graphical Overview
-==================
-
-Install
-=======
-
-run the build.sh with root privileges
-
-Run
-===
-
-sudo ./pfbridge -a eth1 -b eth2
-
-Custom Image
-============
-
-TBD
-
-Development Environment
-=======================
-
-Ubuntu 14.04 >= VM
diff --git a/docs/vTC/abbreviations.rst b/docs/vTC/abbreviations.rst
deleted file mode 100644
index a713ee66b..000000000
--- a/docs/vTC/abbreviations.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Abbreviations for the virtual Traffic Classifier
-================================================
-
-[1] VNF - Virtual Network Function
-[2] VNFC - Virtual Network Function Component
diff --git a/docs/yardstick/index.rst b/docs/yardstick/index.rst
deleted file mode 100644
index b14670bdd..000000000
--- a/docs/yardstick/index.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-======================
-Yardstick Config Guide
-======================
-
-Test Case Descriptions
-======================
-
-.. toctree::
- :maxdepth: 1
-
- opnfv_yardstick_tc001.rst
- opnfv_yardstick_tc002.rst
-
-Templates
-=========
-
-.. toctree::
- :maxdepth: 1
-
- ../templates/Yardstick_task_templates
- ../templates/testcase_description_v2_template
diff --git a/etc/yardstick/nodes/compass_sclab_physical/pod.yaml b/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
new file mode 100644
index 000000000..e062988c4
--- /dev/null
+++ b/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
@@ -0,0 +1,42 @@
+---
+# sample config file about the POD information, including the
+# name/IP/user/ssh key of Bare Metal and Controllers/Computes
+#
+# The options of this config file include:
+# name: the name of this node
+# role: node's role, support role: Master/Controller/Comupte/BareMetal
+# ip: the node's IP address
+# user: the username for login
+# key_filename:the path of the private key file for login
+
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: 10.1.0.50
+ user: root
+ password: root
+-
+ name: node2
+ role: Controller
+ ip: 10.1.0.51
+ user: root
+ password: root
+-
+ name: node3
+ role: Controller
+ ip: 10.1.0.52
+ user: root
+ password: root
+-
+ name: node4
+ role: Compute
+ ip: 10.1.0.53
+ user: root
+ password: root
+-
+ name: node5
+ role: Compute
+ ip: 10.1.0.54
+ user: root
+ password: root
diff --git a/etc/yardstick/yardstick.conf.sample b/etc/yardstick/yardstick.conf.sample
index 82326dd1b..63462c573 100644
--- a/etc/yardstick/yardstick.conf.sample
+++ b/etc/yardstick/yardstick.conf.sample
@@ -11,3 +11,8 @@
# file_path = /tmp/yardstick.out
# max_bytes = 0
# backup_count = 0
+
+[dispatcher_influxdb]
+# timeout = 5
+# target = http://127.0.0.1:8086
+# db_name = yardstick
diff --git a/run_tests.sh b/run_tests.sh
index d1567af9c..e093a20d3 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -74,6 +74,8 @@ run_functional_test() {
fi
}
+export PYTHONPATH='yardstick/vTC/apexlake'
+
run_flake8
run_tests
run_coverage
diff --git a/samples/cyclictest-node-context.yaml b/samples/cyclictest-node-context.yaml
new file mode 100644
index 000000000..d74d1e5e3
--- /dev/null
+++ b/samples/cyclictest-node-context.yaml
@@ -0,0 +1,50 @@
+---
+# Sample benchmark task config file
+# Measure system high resolution by using Cyclictest
+#
+# For this sample just like running the command below on the test vm and
+# getting latencies info back to the yardstick.
+#
+# sudo bash cyclictest -a 1 -i 1000 -p 99 -l 1000 -t 1 -h 90 -m -n -q
+#
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Cyclictest
+ options:
+ affinity: 1
+ interval: 1000
+ priority: 99
+ loops: 1000
+ threads: 1
+ histogram: 90
+ host: kvm.LF
+ runner:
+ type: Duration
+ duration: 1
+ interval: 1
+ sla:
+ max_min_latency: 50
+ max_avg_latency: 100
+ max_max_latency: 1000
+ action: monitor
+ setup_options:
+ rpm_dir: "/opt/rpm"
+ script_dir: "/opt/scripts"
+ image_dir: "/opt/image"
+ host_setup_seqs:
+ - "host-setup0.sh"
+ - "reboot"
+ - "host-setup1.sh"
+ - "host-run-qemu.sh"
+ guest_setup_seqs:
+ - "guest-setup0.sh"
+ - "reboot"
+ - "guest-setup1.sh"
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/pod.yaml
diff --git a/samples/ha-baremetal.yaml b/samples/ha-baremetal.yaml
new file mode 100755
index 000000000..9f9baf50c
--- /dev/null
+++ b/samples/ha-baremetal.yaml
@@ -0,0 +1,45 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "bare-metal-down"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "heat stack-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "openstack-cmd"
+ command_name: "neutron router-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/samples/ha-service.yaml b/samples/ha-service.yaml
new file mode 100755
index 000000000..e624f531e
--- /dev/null
+++ b/samples/ha-service.yaml
@@ -0,0 +1,42 @@
+---
+# Sample test case for ha
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: ServiceHA
+ options:
+ attackers:
+ - fault_type: "kill-process"
+ process_name: "nova-api"
+ host: node1
+
+ monitors:
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
+ monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
+ nodes:
+ node1: node1.LF
+
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ outage_time: 5
+ action: monitor
+
+
+context:
+ type: Node
+ name: LF
+ file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/samples/parser.yaml b/samples/parser.yaml
new file mode 100644
index 000000000..32d9abed1
--- /dev/null
+++ b/samples/parser.yaml
@@ -0,0 +1,21 @@
+---
+# Sample task config file
+# running Parser Yang-to-Tosca module as a tool
+# validating output against expected outcome
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Parser
+ options:
+ yangfile: /root/yardstick/samples/yang.yaml
+ toscafile: /root/yardstick/samples/tosca.yaml
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ type: Dummy
diff --git a/samples/ping6.yaml b/samples/ping6.yaml
new file mode 100644
index 000000000..22b8bb9cc
--- /dev/null
+++ b/samples/ping6.yaml
@@ -0,0 +1,28 @@
+---
+# Sample test case for ipv6
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping6
+ options:
+ packetsize: 200
+ host: node1.IPV6
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+ run_step: 'setup,run,teardown'
+ sla:
+ max_rtt: 10
+ action: monitor
+
+
+context:
+ type: Node
+ name: IPV6
+ file: /root/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
+
+
diff --git a/samples/serviceha.yaml b/samples/serviceha.yaml
index ce4fefa23..e624f531e 100755
--- a/samples/serviceha.yaml
+++ b/samples/serviceha.yaml
@@ -13,8 +13,18 @@ scenarios:
host: node1
monitors:
- - monitor_cmd: "nova image-list"
+ - monitor_type: "openstack-cmd"
+ command_name: "nova image-list"
monitor_time: 10
+ sla:
+ max_outage_time: 5
+ - monitor_type: "process"
+ process_name: "nova-api"
+ host: node1
+ monitor_time: 10
+ sla:
+ max_recover_time: 5
+
nodes:
node1: node1.LF
@@ -30,5 +40,3 @@ context:
type: Node
name: LF
file: /root/yardstick/etc/yardstick/nodes/fuel_virtual/pod.yaml
-
-
diff --git a/samples/tosca.yaml b/samples/tosca.yaml
new file mode 100644
index 000000000..4472f7ef8
--- /dev/null
+++ b/samples/tosca.yaml
@@ -0,0 +1,149 @@
+tosca_definitions_version:tosca_simple_yaml_1_0_0
+description:
+ This module defines a VNF Deployment Unit.
+import:
+
+
+metadata:
+ ID:clearwater
+ Vendor:HP
+dsl_definitions:
+ compute_props_host_ellis:&compute_props_host_ellis
+ num_cpu:4
+ mem_size:4096
+ compute_props_host_bono:&compute_props_host_bono
+ num_cpu:3
+ mem_size:2048
+node_types:
+ tosca.nodes.compute.ellis:
+ derived_from:tosca.nodes.compute
+
+ tosca.nodes.compute.bono:
+ derived_from:tosca.nodes.compute
+
+topology_template:
+ # a description of the topology template
+ description:>
+ Vdus used in a vnfd
+ inputs:
+ storage_size:
+ type:scalar-unit.size
+ default:2048
+ description:The required storage resource
+ storage_location:
+ type:string
+ description:>
+ Block storage mount point (filesystem path).
+ node_templates:
+ ellis:
+ type:tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties:*compute_props_host_ellis
+ scalable:
+ properties:
+ min_instances:1
+ default_instances:1
+ requirements:
+ - local_storage:
+ node:ellis_BlockStorage
+ relationship:
+ type:AttachesTo
+ properties:
+ location:{ get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation:start.sh
+ delete:
+ implementaion:stop.sh
+ stop:
+ implementaion:shutdown.sh
+ ellis_BlockStorage:
+ type:tosca.nodes.BlockStorage
+ properties:
+ size:{ get_input:storage_size }
+ bono:
+ type:tosca.nodes.Compute
+ capabilities:
+ os:
+ properties:
+ architecture:
+ type:
+ distribution:
+ version:
+ host:
+ properties:*compute_props_host_bono
+ scalable:
+ properties:
+ min_instances:3
+ default_instances:3
+ requirements:
+ - local_storage:
+ node:bono_BlockStorage
+ relationship:
+ type:AttachesTo
+ properties:
+ location:{ get_input:storage_location }
+ interfaces:
+ Standard:
+ start:
+ implementation:start.sh
+ delete:
+ implementaion:stop.sh
+ stop:
+ implementaion:shutdown.sh
+ bono_BlockStorage:
+ type:tosca.nodes.BlockStorage
+ properties:
+ size:{ get_input:storage_size }
+ clearwater_network1:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network1
+ clearwater_network2:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ ellis_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:ellis
+ - link:
+ node:clearwater_network2
+ clearwater_network1:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port1:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network1
+ clearwater_network2:
+ type:tosca.nodes.network.Network
+ properties:
+ ip_version:4
+ bono_port2:
+ type:tosca.nodes.network.Port
+ requirements:
+ - binding:
+ node:bono
+ - link:
+ node:clearwater_network2 \ No newline at end of file
diff --git a/samples/yang.yaml b/samples/yang.yaml
new file mode 100644
index 000000000..86b7b2f31
--- /dev/null
+++ b/samples/yang.yaml
@@ -0,0 +1,687 @@
+ module clearwater {
+
+ namespace "http://localhost/ietf-inet-types.yang";
+
+ prefix "yang";
+
+ organization "HP";
+
+ contact "TBD";
+
+ description "This module defines a VNF Deployment Unit.";
+ revision "2014-05-18" {
+
+ description
+
+ "Initial version";
+
+ reference
+
+ "RFC XXXX";
+
+ }
+ container clearwater {
+
+ description
+
+ "Vdus used in a vnfd";
+
+ list ellis {
+ key id;
+ leaf id{
+ type string;
+ description "key ID for vdu1";
+ }
+ description
+
+ "Vdu key";
+
+
+
+ leaf instance-num {
+
+ type uint16{
+ range 1..6;}
+ default 1;
+
+
+ description
+
+ "Number of instances of the said VDU which shall be
+ instantiated";
+
+ }
+
+ leaf vm-image {
+
+ type string;
+ reference "uri";
+
+
+ description
+
+ "Reference to a VM image";
+
+ }
+ container resource {
+ description
+
+ "The required resource";
+
+ container cpu {
+
+ description
+
+ "The required computation resource";
+
+
+
+ leaf vCPU_num {
+
+ type uint32{
+ range 4;
+ }
+
+ description
+
+ "The number of virtual CPU units";
+
+ }
+
+
+
+ list vCPU-spec {
+
+ key "name";
+
+
+
+ description
+
+ "Processor characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of vCPU-spec";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of vCPU-spec";
+
+ }
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of vCPU-spec";
+
+ }
+
+ }
+
+ }
+
+ container memory {
+
+ description
+
+ "The required memory resource";
+
+ leaf memory_size {
+
+ type uint32{
+ range 4096;
+ }
+
+ description
+
+ "Memory size, unit:MBytes";
+
+ }
+
+ list memory-spec {
+
+ key name;
+
+
+
+ description
+
+ "Memory characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of memory-spec";
+
+ }
+
+
+
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of memory-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of memory-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container disk {
+
+ description
+
+ "The required storage resource";
+
+
+
+ leaf disk-size {
+
+ type uint32{
+ range 2048;
+ }
+ description
+
+ "Virtual storage size, unit:MBytes";
+ }
+
+
+
+ list disk-KQI {
+
+ key name;
+
+
+
+ description
+
+ "Storage characteristics in the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of disk-KQI";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of disk-KQI";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of disk-KQI";
+
+ }
+
+ }
+
+ }
+
+
+
+ container vnic {
+
+ description
+
+ "Virtual network interface card (vnic) resource";
+
+
+
+ leaf vnic-num {
+
+ type uint32{
+ range 2;
+ }
+
+ description
+
+ "The total number of virtual vnic";
+
+ }
+ }
+
+ }
+
+
+
+ container workflow-script {
+
+ description
+
+ "VDU workflow script";
+
+
+
+ leaf init {
+
+ type string;
+ default "start.sh";
+
+
+ description
+
+ "VDU initialization script";
+ }
+
+
+
+ leaf terminate {
+
+ type string;
+ default "stop.sh";
+
+
+ description
+
+ "VDU termination script";
+ }
+
+ leaf graceful-shutdown {
+
+ type string;
+ default "shutdown.sh";
+
+
+ description
+
+ "VDU graceful shutdown script";
+
+ }
+
+ }
+
+ }
+ list bono {
+ key id;
+ leaf id{
+ type string;
+ description "key ID for vdu2";
+ }
+ description
+
+ "Vdu key";
+
+
+
+ leaf instance-num {
+
+ type uint16;
+ default 3;
+
+
+ description
+
+ "Number of instances of the said VDU which shall be
+ instantiated";
+
+ }
+
+
+
+ leaf vm-image {
+
+ type string;
+ reference "URI";
+
+
+ description
+
+ "Reference to a VM image";
+
+ }
+
+
+
+ container resource {
+ description
+
+ "The required resource";
+
+
+
+ container cpu {
+
+ description
+
+ "The required computation resource";
+
+
+
+ leaf vCPU_num {
+
+ type uint32{
+ range 3;
+ }
+
+ description
+
+ "The number of virtual CPU units";
+
+ }
+
+
+
+ list vCPU-spec {
+
+ key "name";
+
+
+
+ description
+
+ "Processor characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of vCPU-spec";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of vCPU-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of vCPU-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container memory {
+
+ description
+
+ "The required memory resource";
+
+
+
+ leaf memory_size {
+
+ type uint32{
+ range 2048;
+ }
+
+ description
+
+ "Memory size, unit:MBytes";
+
+ }
+
+ list memory-spec {
+
+ key name;
+
+ description
+
+ "Memory characteristics for the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of memory-spec";
+
+ }
+
+
+
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of memory-spec";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of memory-spec";
+
+ }
+
+ }
+
+ }
+
+
+
+ container disk {
+
+ description
+
+ "The required storage resource";
+
+
+
+ leaf disk-size {
+
+ type uint32{
+ range 3000;
+ }
+
+ description
+
+ "Virtual storage size, unit:MBytes";
+
+ }
+
+
+
+ list disk-KQI {
+
+ key name;
+
+
+
+ description
+
+ "Storage characteristics in the VDU";
+
+
+
+ leaf name {
+
+ type string;
+
+ description
+
+ "Name of disk-KQI";
+
+ }
+ leaf description {
+
+ type string;
+
+ description
+
+ "Description of disk-KQI";
+
+ }
+
+
+
+ leaf value {
+
+ type uint32;
+
+ description
+
+ "Value of disk-KQI";
+
+ }
+
+ }
+
+ }
+
+
+
+ container vnic {
+
+ description
+
+ "Virtual network interface card (vnic) resource";
+
+
+
+ leaf vnic-num {
+
+ type uint32{
+ range 2;
+ }
+
+ description
+
+ "The total number of virtual vnic";
+
+ }
+ }
+
+ }
+
+
+
+ container workflow-script {
+
+ description
+
+ "VDU workflow script";
+
+
+
+ leaf init {
+
+ type string;
+ default "start.sh";
+
+
+ description
+
+ "VDU initialization script";
+
+ }
+
+
+
+ leaf terminate {
+
+ type string;
+ default "stop.sh";
+
+
+ description
+
+ "VDU termination script";
+
+ }
+
+ leaf graceful-shutdown {
+
+ type string;
+ default "shutdown.sh";
+
+
+ description
+
+ "VDU graceful shutdown script";
+
+ }
+
+ }
+
+ }
+
+ }
+
+ }
+
diff --git a/setup.py b/setup.py
index 654ea0fc4..eb55e9283 100755
--- a/setup.py
+++ b/setup.py
@@ -10,8 +10,12 @@ setup(
'yardstick': [
'benchmark/scenarios/availability/attacker/*.yaml',
'benchmark/scenarios/availability/attacker/scripts/*.bash',
+ 'benchmark/scenarios/availability/monitor/*.yaml',
+ 'benchmark/scenarios/availability/monitor/script_tools/*.bash',
'benchmark/scenarios/compute/*.bash',
'benchmark/scenarios/networking/*.bash',
+ 'benchmark/scenarios/networking/*.txt',
+ 'benchmark/scenarios/parser/*.sh',
'benchmark/scenarios/storage/*.bash',
'resources/files/*'
]
@@ -21,6 +25,7 @@ setup(
"coverage>=3.6",
"flake8",
"Jinja2>=2.6",
+ "lxml",
"PyYAML>=3.10",
"pbr<2.0,>=1.3",
"python-glanceclient>=0.12.0",
@@ -31,9 +36,11 @@ setup(
"mock>=1.0.1", # remove with python3
"paramiko",
"netifaces",
+ "scp",
"six",
"testrepository>=0.0.18",
"testtools>=1.4.0"
+ "nose"
],
extras_require={
'plot': ["matplotlib>=1.4.2"]
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
new file mode 100644
index 000000000..f89a3099e
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc005.yaml
@@ -0,0 +1,48 @@
+---
+# Yardstick TC005 config file
+# Measure Storage IOPS, throughput and latency using fio
+
+schema: "yardstick:task:0.1"
+scenarios:
+{% for rw in ['read', 'write', 'randwrite', 'randread', 'rw'] %}
+ {% for bs in ['4k', '64k', '1024k'] %}
+-
+ type: Fio
+ options:
+ filename: /home/ec2-user/data.raw
+ bs: {{bs}}
+ rw: {{rw}}
+ ramp_time: 10
+ duration: 20
+
+ host: fio.yardstick-TC005
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+ sla:
+ read_bw: 400
+ read_iops: 100
+ read_lat: 20000
+ write_bw: 400
+ write_iops: 100
+ write_lat: 20000
+ action: monitor
+ {% endfor %}
+{% endfor %}
+
+context:
+ name: yardstick-TC005
+ image: yardstick-trusty-server
+ flavor: m1.small
+ user: ec2-user
+
+ servers:
+ fio:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
new file mode 100644
index 000000000..3d4091293
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
@@ -0,0 +1,26 @@
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_throughput
+
+ options:
+ packet_size: 1280
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml
new file mode 100644
index 000000000..30d59f797
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc007.yaml
@@ -0,0 +1,32 @@
+---
+# Sample benchmark task config file
+# vTC
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_throughput_noisy
+
+ options:
+ packet_size: 1280
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+ num_of_neighbours: 2
+ amount_of_ram: 1G
+ number_of_cores: 2
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
new file mode 100644
index 000000000..f1b995371
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc014.yaml
@@ -0,0 +1,32 @@
+---
+# Yardstick TC014 config file
+# Measure Processing speed using unixbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: UnixBench
+ options:
+ run_mode: 'verbose'
+ test_type: 'dhry2reg'
+ host: Chang'e.yardstick-TC014
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ name: yardstick-TC014
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ servers:
+ Chang'e:
+ floating_ip: true
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24' \ No newline at end of file
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml
new file mode 100644
index 000000000..8d9edfe7b
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc020.yaml
@@ -0,0 +1,31 @@
+---
+# Sample benchmark task config file
+# vTC
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_instantiation_validation
+
+ options:
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+# dummy context, will not be used by vTC
+context:
+ type: Node
+ name: LF
+ file: /etc/yardstick/nodes/fuel_virtual/pod.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml
new file mode 100644
index 000000000..c62ce2a32
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc021.yaml
@@ -0,0 +1,28 @@
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: vtc_instantiation_validation_noisy
+
+ options:
+ vlan_sender: 1007
+ vlan_receiver: 1006
+ default_net_name: monitoring
+ default_subnet_name: monitoring_subnet
+ vlan_net_1_name: inbound_traffic_network
+ vlan_subnet_1_name: inbound_traffic_subnet
+ vlan_net_2_name: inbound_traffic_network
+ vlan_subnet_2_name: inbound_traffic_subnet
+ vnic_type: direct # [normal (OvS), direct (SR-IOV)]
+ vtc_flavor: m1.large
+ num_of_neighbours: 2
+ amount_of_ram: 1G
+ number_of_cores: 2
+
+ runner:
+ type: Iteration
+ iterations: 1
+
+context:
+ type: Dummy
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml
new file mode 100644
index 000000000..9b5e86509
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml
@@ -0,0 +1,27 @@
+---
+# Yardstick TC027 config file
+# Measure IPV6 network latency using ping6
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: Ping6
+ host: node1.IPV6
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+ run_step: 'setup,run,teardown'
+ sla:
+ max_rtt: 10
+ action: monitor
+
+
+context:
+ type: Node
+ name: IPV6
+ file: /root/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml
+
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
new file mode 100644
index 000000000..a73dfee0a
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml
@@ -0,0 +1,85 @@
+---
+# Yardstick TC037 config file
+# Measure network throughput and packet loss using pktgen.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 2 times each. First 2 times with the least
+# amount of ports, then 2 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements system load and network latency are
+# recorded/measured using ping and mpstat, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: demeter.yardstick-TC037
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: poseidon.yardstick-TC037
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC037
+ target: poseidon.yardstick-TC037
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC037
+ target: poseidon.yardstick-TC037
+
+ runner:
+ type: Iteration
+ iterations: 2
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC037
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
new file mode 100644
index 000000000..59608e312
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc038.yaml
@@ -0,0 +1,85 @@
+---
+# Yardstick TC038 config file
+# Measure network throughput and packet loss using pktgen.
+# Different amounts of flows are tested with, from 2 up to 1001000.
+# All tests are run 10 times each. First 10 times with the least
+# amount of ports, then 10 times with the next amount of ports,
+# and so on until all packet sizes have been run with.
+#
+# During the measurements system load and network latency are
+# recorded/measured using ping and mpstat, respectively.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: demeter.yardstick-TC038
+-
+ type: CPUload
+ run_in_background: true
+
+ options:
+ interval: 1
+
+ host: poseidon.yardstick-TC038
+-
+ type: Ping
+ run_in_background: true
+
+ options:
+ packetsize: 100
+
+ host: demeter.yardstick-TC038
+ target: poseidon.yardstick-TC038
+
+ sla:
+ max_rtt: 10
+ action: monitor
+{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %}
+-
+ type: Pktgen
+ options:
+ packetsize: 64
+ number_of_ports: {{num_ports}}
+ duration: 20
+
+ host: demeter.yardstick-TC038
+ target: poseidon.yardstick-TC038
+
+ runner:
+ type: Iteration
+ iterations: 10
+ interval: 1
+
+ sla:
+ max_ppm: 1000
+ action: monitor
+{% endfor %}
+
+context:
+ name: yardstick-TC038
+ image: yardstick-trusty-server
+ flavor: yardstick-flavor
+ user: ec2-user
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ demeter:
+ floating_ip: true
+ placement: "pgrp1"
+ poseidon:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ test:
+ cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
new file mode 100644
index 000000000..0a6dee656
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
@@ -0,0 +1,22 @@
+---
+# Yardstick TC040 config file
+# Running Parser Yang-to-Tosca module as a tool, validating output against expected outcome
+
+schema: "yardstick:task:0.1"
+
+
+scenarios:
+-
+ type: Parser
+ options:
+ yangfile: /home/opnfv/repos/yardstick/samples/yang.yaml
+ toscafile: /home/opnfv/repos/yardstick//samples/tosca.yaml
+
+ runner:
+ type: Iteration
+ iterations: 1
+ interval: 1
+
+context:
+ type: Dummy
+
diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
index 8279d2378..04bac491f 100644
--- a/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_ericsson-pod1_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml
index f3002ff9d..c3e68150d 100644
--- a/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_ericsson-pod2_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
index e883f560f..ee13e6d9d 100644
--- a/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_huawei-us-deploy-bare-1_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml
new file mode 100644
index 000000000..1bb241ed8
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod2_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD2 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml
new file mode 100644
index 000000000..2ffacb1d0
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod5_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD5 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml
new file mode 100644
index 000000000..792bba2b0
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod6_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD6 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml
new file mode 100644
index 000000000..f10a854d2
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_intel-pod8_daily.yaml
@@ -0,0 +1,18 @@
+---
+# INTEL POD8 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_intel_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml
new file mode 100644
index 000000000..baade6987
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-1_daily.yaml
@@ -0,0 +1,18 @@
+---
+# LF POD 1 daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_lf_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc001.yaml
+-
+ file_name: opnfv_yardstick_tc002.yaml
+-
+ file_name: opnfv_yardstick_tc005.yaml
+-
+ file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
index 4dece13f2..57c95cf69 100644
--- a/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_opnfv-jump-2_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/opnfv/test_suites/opnfv_vTC_daily.yaml b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml
new file mode 100644
index 000000000..37738b423
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml
@@ -0,0 +1,16 @@
+---
+# ERICSSON POD1 VTC daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_vTC_daily"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc006.yaml
+-
+ file_name: opnfv_yardstick_tc007.yaml
+-
+ file_name: opnfv_yardstick_tc020.yaml
+-
+ file_name: opnfv_yardstick_tc021.yaml
diff --git a/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml
new file mode 100644
index 000000000..216648d6f
--- /dev/null
+++ b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml
@@ -0,0 +1,16 @@
+---
+# ERICSSON POD1 VTC weekly task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "opnfv_vTC_weekly"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc006.yaml
+-
+ file_name: opnfv_yardstick_tc007.yaml
+-
+ file_name: opnfv_yardstick_tc020.yaml
+-
+ file_name: opnfv_yardstick_tc021.yaml
diff --git a/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml
index d917c0fb2..8016b46b2 100644
--- a/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_zte-build-1_daily.yaml
@@ -11,4 +11,8 @@ test_cases:
-
file_name: opnfv_yardstick_tc002.yaml
-
+ file_name: opnfv_yardstick_tc005.yaml
+-
file_name: opnfv_yardstick_tc012.yaml
+-
+ file_name: opnfv_yardstick_tc037.yaml
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
new file mode 100644
index 000000000..340f94cb0
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+class ExecuteShellTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
+class AttackerBaremetalTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+
+ def test__attacker_baremetal_all_successful(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.inject_fault()
+ ins.recover()
+
+ def test__attacker_baremetal_check_failuer(self, mock_ssh):
+
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ mock_ssh.SSH().execute.return_value = (0, "error check", '')
+ ins.setup()
+
+ def test__attacker_baremetal_recover_successful(self, mock_ssh):
+
+ self.attacker_cfg["jump_host"] = 'node1'
+ self.context["node1"]["pwd"] = "123456"
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "running", '')
+ ins.setup()
+ ins.recover()
diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
new file mode 100644
index 000000000..13295273b
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
+
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor')
+class MonitorMgrTestCase(unittest.TestCase):
+
+ def setUp(self):
+ config = {
+ 'monitor_type': 'openstack-api',
+ }
+
+ self.monitor_configs = []
+ self.monitor_configs.append(config)
+
+ def test__MonitorMgr_setup_successful(self, mock_monitor):
+ instance = basemonitor.MonitorMgr()
+ instance.init_monitors(self.monitor_configs, None)
+ instance.start_monitors()
+ instance.wait_monitors()
+
+ ret = instance.verify_SLA()
+
+class BaseMonitorTestCase(unittest.TestCase):
+
+ class MonitorSimple(basemonitor.BaseMonitor):
+ __monitor_type__ = "MonitorForTest"
+ def setup(self):
+ self.monitor_result = False
+
+ def monitor_func(self):
+ return self.monitor_result
+
+ def setUp(self):
+ self.monitor_cfg = {
+ 'monitor_type': 'MonitorForTest',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 0.01,
+ 'sla': {'max_outage_time': 5}
+ }
+
+ def test__basemonitor_start_wait_successful(self):
+ ins = basemonitor.BaseMonitor(self.monitor_cfg, None)
+ ins.start_monitor()
+ ins.wait_monitor()
+
+
+ def test__basemonitor_all_successful(self):
+ ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins.setup()
+ ins.run()
+ ins.verify_SLA()
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing')
+ def test__basemonitor_func_false(self, mock_multiprocess):
+ ins = self.MonitorSimple(self.monitor_cfg, None)
+ ins.setup()
+ mock_multiprocess.Event().is_set.return_value = False
+ ins.run()
+ ins.verify_SLA()
+
+ def test__basemonitor_getmonitorcls_successfule(self):
+ cls = None
+ try:
+ cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
+ except Exception:
+ pass
+ self.assertIsNone(cls)
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor.py b/tests/unit/benchmark/scenarios/availability/test_monitor.py
deleted file mode 100644
index 793871ca3..000000000
--- a/tests/unit/benchmark/scenarios/availability/test_monitor.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Unittest for yardstick.benchmark.scenarios.availability.monitor
-
-import mock
-import unittest
-
-from yardstick.benchmark.scenarios.availability import monitor
-
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.subprocess')
-class MonitorTestCase(unittest.TestCase):
-
- def test__fun_execute_shell_command_successful(self, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.return_value = (0, 'unittest')
- exitcode, output = monitor._execute_shell_command(cmd)
- self.assertEqual(exitcode, 0)
-
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
- cmd = "env"
- mock_subprocess.check_output.side_effect = RuntimeError
- exitcode, output = monitor._execute_shell_command(cmd)
- self.assertEqual(exitcode, -1)
-
- def test__fun_monitor_process_successful(self, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.return_value = (0, 'unittest')
- monitor._monitor_process(config, mock_queue, mock_event)
-
- def test__fun_monitor_process_fail_cmd_execute_error(self, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.side_effect = RuntimeError
- monitor._monitor_process(config, mock_queue, mock_event)
-
- def test__fun_monitor_process_fail_no_monitor_cmd(self, mock_subprocess):
- config = {
- 'duration':0
- }
- mock_queue = mock.Mock()
- mock_event = mock.Mock()
-
- mock_subprocess.check_output.return_value = (-1, 'unittest')
- monitor._monitor_process(config, mock_queue, mock_event)
-
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.multiprocessing')
- def test_monitor_all_successful(self, mock_multip, mock_subprocess):
- config = {
- 'monitor_cmd':'env',
- 'duration':0
- }
- p = monitor.Monitor()
- p.setup(config)
- mock_multip.Queue().get.return_value = 'started'
- p.start()
-
- result = "monitor unitest"
- mock_multip.Queue().get.return_value = result
- p.stop()
-
- ret = p.get_result()
-
- self.assertEqual(result, ret)
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
new file mode 100644
index 000000000..c8cda7dc7
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import monitor_command
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+class ExecuteShellTestCase(unittest.TestCase):
+
+ def test__fun_execute_shell_command_successful(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, output = monitor_command._execute_shell_command(cmd)
+ self.assertEqual(exitcode, 0)
+
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ cmd = "env"
+ mock_subprocess.check_output.side_effect = RuntimeError
+ exitcode, output = monitor_command._execute_shell_command(cmd)
+ self.assertEqual(exitcode, -1)
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+class MonitorOpenstackCmdTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.config = {
+ 'monitor_type': 'openstack-api',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 1,
+ 'sla': {'max_outage_time': 5}
+ }
+
+
+ def test__monitor_command_monitor_func_successful(self, mock_subprocess):
+
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
+ mock_subprocess.check_output.return_value = (0, 'unittest')
+ ret = instance.monitor_func()
+ self.assertEqual(ret, True)
+ instance._result = {"outage_time": 0}
+ instance.verify_SLA()
+
+ def test__monitor_command_monitor_func_failure(self, mock_subprocess):
+ mock_subprocess.check_output.return_value = (1, 'unittest')
+ instance = monitor_command.MonitorOpenstackCmd(self.config, None)
+ instance.setup()
+ mock_subprocess.check_output.side_effect = RuntimeError
+ ret = instance.monitor_func()
+ self.assertEqual(ret, False)
+ instance._result = {"outage_time": 10}
+ instance.verify_SLA()
+
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess):
+
+ self.config["host"] = "node1"
+ instance = monitor_command.MonitorOpenstackCmd(self.config, self.context)
+ instance.setup()
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ret = instance.monitor_func()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
new file mode 100644
index 000000000..dda104b4e
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.availability.monitor import monitor_process
+
+@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh')
+class MonitorProcessTestCase(unittest.TestCase):
+
+ def setUp(self):
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.monitor_cfg = {
+ 'monitor_type': 'process',
+ 'process_name': 'nova-api',
+ 'host': "node1",
+ 'monitor_time': 1,
+ 'sla': {'max_recover_time': 5}
+ }
+
+ def test__monitor_process_all_successful(self, mock_ssh):
+
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "1", '')
+ ins.setup()
+ ins.monitor_func()
+ ins._result = {"outage_time": 0}
+ ins.verify_SLA()
+
+ def test__monitor_process_down_failuer(self, mock_ssh):
+
+ ins = monitor_process.MonitorProcess(self.monitor_cfg, self.context)
+
+ mock_ssh.SSH().execute.return_value = (0, "0", '')
+ ins.setup()
+ ins.monitor_func()
+ ins._result = {"outage_time": 10}
+ ins.verify_SLA()
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 32adf3208..6e58b6e7a 100644
--- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -16,7 +16,7 @@ import unittest
from yardstick.benchmark.scenarios.availability import serviceha
-@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.monitor')
+@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor')
@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
class ServicehaTestCase(unittest.TestCase):
@@ -53,15 +53,11 @@ class ServicehaTestCase(unittest.TestCase):
p.setup()
self.assertEqual(p.setup_done, True)
-
- result = {}
- result["outage_time"] = 0
- mock_monitor.Monitor().get_result.return_value = result
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
ret = {}
p.run(ret)
- self.assertEqual(ret, result)
p.teardown()
-
+"""
def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
p = serviceha.ServiceHA(self.args, self.ctx)
@@ -74,3 +70,4 @@ class ServicehaTestCase(unittest.TestCase):
ret = {}
self.assertRaises(AssertionError, p.run, ret)
+"""
diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index a87b39142..807429025 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -22,41 +22,65 @@ from yardstick.benchmark.scenarios.compute import cyclictest
class CyclictestTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.scenario_cfg = {
+ "host": "kvm.LF",
+ "setup_options": {
+ "rpm_dir": "/opt/rpm",
+ "host_setup_seqs": [
+ "host-setup0.sh",
+ "host-setup1.sh",
+ "host-run-qemu.sh"
+ ],
+ "script_dir": "/opt/scripts",
+ "image_dir": "/opt/image",
+ "guest_setup_seqs": [
+ "guest-setup0.sh",
+ "guest-setup1.sh"
+ ]
+ },
+ "sla": {
+ "action": "monitor",
+ "max_min_latency": 50,
+ "max_avg_latency": 100,
+ "max_max_latency": 1000
+ },
+ "options": {
+ "priority": 99,
+ "threads": 1,
+ "loops": 1000,
+ "affinity": 1,
+ "interval": 1000,
+ "histogram": 90
+ }
+ }
+ self.context_cfg = {
"host": {
- "ip": "192.168.50.28",
- "user": "root",
- "key_filename": "mykey.key"
+ "ip": "10.229.43.154",
+ "key_filename": "/yardstick/resources/files/yardstick_key",
+ "role": "BareMetal",
+ "name": "kvm.LF",
+ "user": "root"
}
}
def test_cyclictest_successful_setup(self, mock_ssh):
- c = cyclictest.Cyclictest({}, self.ctx)
- c.setup()
-
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
- self.assertIsNotNone(c.client)
+
+ c.setup()
+ self.assertIsNotNone(c.guest)
+ self.assertIsNotNone(c.host)
self.assertEqual(c.setup_done, True)
def test_cyclictest_successful_no_sla(self, mock_ssh):
-
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- args = {
- "options": options,
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.pop("sla", None)
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
-
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -65,29 +89,19 @@ class CyclictestTestCase(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_cyclictest_successful_sla(self, mock_ssh):
-
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- sla = {
- "max_min_latency": 100,
- "max_avg_latency": 500,
- "max_max_latency": 1000,
- }
- args = {
- "options": options,
- "sla": sla
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {
+ "action": "monitor",
+ "max_min_latency": 100,
+ "max_avg_latency": 500,
+ "max_max_latency": 1000
+ }
+ })
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
-
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -97,14 +111,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_min_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_min_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -112,14 +125,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_avg_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_avg_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -127,14 +139,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
- args = {
- "options": {},
- "sla": {"max_max_latency": 10}
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
@@ -142,27 +153,13 @@ class CyclictestTestCase(unittest.TestCase):
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
- options = {
- "affinity": 2,
- "interval": 100,
- "priority": 88,
- "loops": 10000,
- "threads": 2,
- "histogram": 80
- }
- sla = {
- "max_min_latency": 100,
- "max_avg_latency": 500,
- "max_max_latency": 1000,
- }
- args = {
- "options": options,
- "sla": sla
- }
- c = cyclictest.Cyclictest(args, self.ctx)
result = {}
+ self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
+ c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
- c.server = mock_ssh.SSH()
+ c.guest = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/tests/unit/benchmark/scenarios/networking/test_ping6.py
new file mode 100644
index 000000000..662b85c30
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import ping6
+
+
+class PingTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'key_filename': "mykey.key",
+ 'password': "root"
+ },
+ }
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_pktgen_successful_setup(self, mock_ssh):
+
+ p = ping6.Ping6({}, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '0', '')
+ p.setup()
+
+ self.assertEqual(p.setup_done, True)
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_successful_no_sla(self, mock_ssh):
+
+ result = {}
+
+ p = ping6.Ping6({}, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ p.run(result)
+ self.assertEqual(result, {'rtt': 100.0})
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_successful_sla(self, mock_ssh):
+
+ args = {
+ 'sla': {'max_rtt': 150}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ p.run(result)
+ self.assertEqual(result, {'rtt': 100.0})
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_unsuccessful_sla(self, mock_ssh):
+
+ args = {
+ 'options': {'packetsize': 200},
+ 'sla': {'max_rtt': 50}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (0, '100', '')
+ self.assertRaises(AssertionError, p.run, result)
+
+ @mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
+ def test_ping_unsuccessful_script_error(self, mock_ssh):
+
+ args = {
+ 'options': {'packetsize': 200},
+ 'sla': {'max_rtt': 50}
+ }
+ result = {}
+
+ p = ping6.Ping6(args, self.ctx)
+ p.client = mock_ssh.SSH()
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, p.run, result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
new file mode 100644
index 000000000..418dd39e6
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation
+
+
+class VtcInstantiationValidationTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+
+ self.vt = vtc_instantiation_validation.VtcInstantiationValidation(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
new file mode 100644
index 000000000..e0a46241c
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation_noisy
+
+
+class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+ scenario['options']['num_of_neighbours'] = '1'
+ scenario['options']['amount_of_ram'] = '1G'
+ scenario['options']['number_of_cores'] = '1'
+
+ self.vt = vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
new file mode 100644
index 000000000..ecdf555d2
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_throughput
+
+
+class VtcThroughputTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+
+ self.vt = vtc_throughput.VtcThroughput(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
new file mode 100644
index 000000000..98957b1de
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vtc_throughput_noisy
+
+
+class VtcThroughputNoisyTestCase(unittest.TestCase):
+
+ def setUp(self):
+ scenario = dict()
+ scenario['options'] = dict()
+ scenario['options']['default_net_name'] = ''
+ scenario['options']['default_subnet_name'] = ''
+ scenario['options']['vlan_net_1_name'] = ''
+ scenario['options']['vlan_subnet_1_name'] = ''
+ scenario['options']['vlan_net_2_name'] = ''
+ scenario['options']['vlan_subnet_2_name'] = ''
+ scenario['options']['vnic_type'] = ''
+ scenario['options']['vtc_flavor'] = ''
+ scenario['options']['packet_size'] = ''
+ scenario['options']['vlan_sender'] = ''
+ scenario['options']['vlan_receiver'] = ''
+ scenario['options']['num_of_neighbours'] = '1'
+ scenario['options']['amount_of_ram'] = '1G'
+ scenario['options']['number_of_cores'] = '1'
+
+ self.vt = vtc_throughput_noisy.VtcThroughputNoisy(scenario, '')
+
+ def test_run_for_success(self):
+ result = {}
+ self.vt.run(result)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/scenarios/parser/__init__.py b/tests/unit/benchmark/scenarios/parser/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/parser/__init__.py
diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py
new file mode 100644
index 000000000..d11a6d5c8
--- /dev/null
+++ b/tests/unit/benchmark/scenarios/parser/test_parser.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.parser.Parser
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.parser import parser
+
+@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess')
+class ParserTestCase(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def test_parser_successful_setup(self, mock_subprocess):
+
+ p = parser.Parser({}, {})
+ mock_subprocess.call().return_value = 0
+ p.setup()
+ self.assertEqual(p.setup_done, True)
+
+ def test_parser_successful(self, mock_subprocess):
+ args = {
+ 'options': {'yangfile':'/root/yardstick/samples/yang.yaml',
+ 'toscafile':'/root/yardstick/samples/tosca.yaml'},
+ }
+ p = parser.Parser(args, {})
+ result = {}
+ mock_subprocess.call().return_value = 0
+ sample_output = '{"yangtotosca": "success"}'
+
+ p.run(result)
+ expected_result = json.loads(sample_output)
+
+ def test_parser_teardown_successful(self, mock_subprocess):
+
+ p = parser.Parser({}, {})
+ mock_subprocess.call().return_value = 0
+ p.teardown()
+ self.assertEqual(p.teardown_done, True)
+
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/dispatcher/__init__.py b/tests/unit/dispatcher/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/dispatcher/__init__.py
diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py
new file mode 100644
index 000000000..5553c86a9
--- /dev/null
+++ b/tests/unit/dispatcher/test_influxdb.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.dispatcher.influxdb
+
+import mock
+import unittest
+
+from yardstick.dispatcher.influxdb import InfluxdbDispatcher
+
+class InfluxdbDispatcherTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.data1 = {
+ "runner_id": 8921,
+ "context_cfg": {
+ "host": {
+ "ip": "10.229.43.154",
+ "key_filename": "/root/yardstick/yardstick/resources/files/yardstick_key",
+ "name": "kvm.LF",
+ "user": "root"
+ },
+ "target": {
+ "ipaddr": "10.229.44.134"
+ }
+ },
+ "scenario_cfg": {
+ "runner": {
+ "interval": 1,
+ "object": "yardstick.benchmark.scenarios.networking.ping.Ping",
+ "output_filename": "/tmp/yardstick.out",
+ "runner_id": 8921,
+ "duration": 10,
+ "type": "Duration"
+ },
+ "host": "kvm.LF",
+ "type": "Ping",
+ "target": "10.229.44.134",
+ "sla": {
+ "action": "monitor",
+ "max_rtt": 10
+ },
+ "tc": "ping",
+ "task_id": "ea958583-c91e-461a-af14-2a7f9d7f79e7"
+ }
+ }
+ self.data2 = {
+ "benchmark": {
+ "timestamp": "1451478117.883505",
+ "errors": "",
+ "data": {
+ "rtt": 0.613
+ },
+ "sequence": 1
+ },
+ "runner_id": 8921
+ }
+ self.data3 ={
+ "benchmark": {
+ "data": {
+ "mpstat": {
+ "cpu0": {
+ "%sys": "0.00",
+ "%idle": "99.00"
+ },
+ "loadavg": [
+ "1.09",
+ "0.29"
+ ]
+ },
+ "rtt": "1.03"
+ }
+ }
+ }
+
+ def test_record_result_data_no_target(self):
+ influxdb = InfluxdbDispatcher(None)
+ influxdb.target = ''
+ self.assertEqual(influxdb.record_result_data(self.data1), -1)
+
+ def test_record_result_data_no_case_name(self):
+ influxdb = InfluxdbDispatcher(None)
+ self.assertEqual(influxdb.record_result_data(self.data2), -1)
+
+ @mock.patch('yardstick.dispatcher.influxdb.requests')
+ def test_record_result_data(self, mock_requests):
+ type(mock_requests.post.return_value).status_code = 204
+ influxdb = InfluxdbDispatcher(None)
+ self.assertEqual(influxdb.record_result_data(self.data1), 0)
+ self.assertEqual(influxdb.record_result_data(self.data2), 0)
+ self.assertEqual(influxdb.flush_result_data(), 0)
+
+ def test__dict_key_flatten(self):
+ line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
+ influxdb = InfluxdbDispatcher(None)
+ flattened_data = influxdb._dict_key_flatten(self.data3['benchmark']['data'])
+ result = ",".join([k+"="+v for k, v in flattened_data.items()])
+ self.assertEqual(result, line)
+
+ def test__get_nano_timestamp(self):
+ influxdb = InfluxdbDispatcher(None)
+ results = {'benchmark': {'timestamp': '1451461248.925574'}}
+ self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144')
+
+ @mock.patch('yardstick.dispatcher.influxdb.time')
+ def test__get_nano_timestamp_except(self, mock_time):
+ results = {}
+ influxdb = InfluxdbDispatcher(None)
+ mock_time.time.return_value = 1451461248.925574
+ self.assertEqual(influxdb._get_nano_timestamp(results), '1451461248925574144')
+
+def main():
+ unittest.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/dispatcher/test_influxdb_line_protocol.py b/tests/unit/dispatcher/test_influxdb_line_protocol.py
new file mode 100644
index 000000000..cb05bf4d2
--- /dev/null
+++ b/tests/unit/dispatcher/test_influxdb_line_protocol.py
@@ -0,0 +1,55 @@
+# Unittest for yardstick.dispatcher.influxdb_line_protocol
+
+# yardstick comment: this file is a modified copy of
+# influxdb-python/influxdb/tests/test_line_protocol.py
+
+import unittest
+from yardstick.dispatcher.influxdb_line_protocol import make_lines
+
+
+class TestLineProtocol(unittest.TestCase):
+
+ def test_make_lines(self):
+ data = {
+ "tags": {
+ "empty_tag": "",
+ "none_tag": None,
+ "integer_tag": 2,
+ "string_tag": "hello"
+ },
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "string_val": "hello!",
+ "int_val": 1,
+ "float_val": 1.1,
+ "none_field": None,
+ "bool_val": True,
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ make_lines(data),
+ 'test,integer_tag=2,string_tag=hello '
+ 'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n'
+ )
+
+ def test_string_val_newline(self):
+ data = {
+ "points": [
+ {
+ "measurement": "m1",
+ "fields": {
+ "multi_line": "line1\nline1\nline3"
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ make_lines(data),
+ 'm1 multi_line="line1\\nline1\\nline3"\n'
+ )
diff --git a/yardstick/__init__.py b/yardstick/__init__.py
index 0c25416bd..7114f8008 100644
--- a/yardstick/__init__.py
+++ b/yardstick/__init__.py
@@ -16,3 +16,10 @@ logging.basicConfig(
'%(levelname)s %(message)s', # noqa
datefmt='%m/%d/%y %H:%M:%S')
logging.getLogger(__name__).setLevel(logging.INFO)
+
+# Hack to be able to run apexlake unit tests
+# without having to install apexlake.
+import sys
+import os
+import yardstick.vTC.apexlake as apexlake
+sys.path.append(os.path.dirname(apexlake.__file__))
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 7bd430bc5..8c514d250 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -129,8 +129,27 @@ class HeatContext(Context):
scheduler_hints = {}
for pg in server.placement_groups:
update_scheduler_hints(scheduler_hints, added_servers, pg)
- server.add_to_template(template, self.networks, scheduler_hints)
- added_servers.append(server.stack_name)
+ # workround for openstack nova bug, check JIRA: YARDSTICK-200
+ # for details
+ if len(availability_servers) == 2:
+ if len(scheduler_hints["different_host"]) == 0:
+ scheduler_hints.pop("different_host", None)
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
+ else:
+ scheduler_hints["different_host"] = \
+ scheduler_hints["different_host"][0]
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
+ else:
+ server.add_to_template(template,
+ self.networks,
+ scheduler_hints)
+ added_servers.append(server.stack_name)
# create list of servers with affinity policy
affinity_servers = []
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index 04c8e7ca3..54ee076f4 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -83,12 +83,5 @@ class NodeContext(Context):
sys.exit(-1)
node = nodes[0]
-
- server = {
- "name": attr_name,
- "ip": node["ip"],
- "user": node["user"],
- "key_filename": node["key_filename"]
- }
-
- return server
+ node["name"] = attr_name
+ return node
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index e38ed3749..c24957b1a 100755..100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -30,12 +30,15 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
interval = runner_cfg.get("interval", 1)
iterations = runner_cfg.get("iterations", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
LOG.info("worker START, iterations %d times, class %s", iterations, cls)
runner_cfg['runner_id'] = os.getpid()
benchmark = cls(scenario_cfg, context_cfg)
- benchmark.setup()
+ if "setup" in run_step:
+ benchmark.setup()
+
method = getattr(benchmark, method_name)
queue.put({'runner_id': runner_cfg['runner_id'],
@@ -45,53 +48,55 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
-
- while True:
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- data = {}
- errors = ""
-
- try:
- method(data)
- except AssertionError as assertion:
- # SLA validation failed in scenario, determine what to do now
- if sla_action == "assert":
- raise
- elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s" % assertion.args)
- errors = assertion.args
- except Exception as e:
- errors = traceback.format_exc()
- LOG.exception(e)
-
- time.sleep(interval)
-
- benchmark_output = {
- 'timestamp': time.time(),
- 'sequence': sequence,
- 'data': data,
- 'errors': errors
- }
-
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
-
- LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
- {"runner": runner_cfg["runner_id"], "sequence": sequence})
-
- sequence += 1
-
- if (errors and sla_action is None) or \
- (sequence > iterations or aborted.is_set()):
- LOG.info("worker END")
- break
-
- benchmark.teardown()
+ if "run" in run_step:
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ time.sleep(interval)
+
+ benchmark_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ record = {'runner_id': runner_cfg['runner_id'],
+ 'benchmark': benchmark_output}
+
+ queue.put(record)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or \
+ (sequence > iterations or aborted.is_set()):
+ LOG.info("worker END")
+ break
+ if "teardown" in run_step:
+ benchmark.teardown()
class IterationRunner(base.Runner):
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
new file mode 100644
index 000000000..b35869d07
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
@@ -0,0 +1,129 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import traceback
+import subprocess
+import yardstick.ssh as ssh
+from baseattacker import BaseAttacker
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command, stdin=None):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, stdin=stdin, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+class BaremetalAttacker(BaseAttacker):
+
+ __attacker_type__ = 'bare-metal-down'
+
+ def setup(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+ host = self._context.get(self._config['host'], None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+ self.host_ip = ip
+
+ self.ipmi_ip = host.get("ipmi_ip", None)
+ self.ipmi_user = host.get("ipmi_user", "root")
+ self.ipmi_pwd = host.get("ipmi_pwd", None)
+
+ self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
+ self.check_script = self.get_script_fullpath(
+ self.fault_cfg['check_script'])
+ self.recovery_script = self.get_script_fullpath(
+ self.fault_cfg['recovery_script'])
+
+ if self.check():
+ self.setup_done = True
+
+ def check(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0} -W 10".format(self.host_ip),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("check ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not stdout or "running" not in stdout:
+ LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+ return False
+
+ return True
+
+ def inject_fault(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "shutdown -h now")
+ LOG.debug("inject fault ret: %s out:%s err:%s" %
+ (exit_status, stdout, stderr))
+ if not exit_status:
+ LOG.info("inject fault success")
+
+ def recover(self):
+ jump_host_name = self._config.get("jump_host", None)
+ self.jump_connection = None
+ if jump_host_name is not None:
+ host = self._context.get(jump_host_name, None)
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ pwd = host.get("pwd", None)
+
+ LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+ self.jump_connection = ssh.SSH(user, ip, password=pwd)
+ self.jump_connection.wait(timeout=600)
+ LOG.debug("ssh jump host success!")
+
+ if self.jump_connection is not None:
+ exit_status, stdout, stderr = self.jump_connection.execute(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+ else:
+ exit_status, stdout = _execute_shell_command(
+ "/bin/bash -s {0} {1} {2} {3}".format(
+ self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
+ stdin=open(self.recovery_script, "r"))
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
+ ins = BaremetalAttacker(attacker_cfg, context)
+ ins.setup()
+ ins.inject_fault()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml
deleted file mode 100644
index 44f06038b..000000000
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_conf.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# sample config file for ha test
-#
-schema: "yardstick:task:0.1"
-
-kill-process:
- inject_script: scripts/stop_service.bash
- recovery_script: scripts/start_service.bash
- check_script: scripts/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index ddaf09969..a1c6999e5 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -16,7 +16,7 @@ import yardstick.common.utils as utils
LOG = logging.getLogger(__name__)
attacker_conf_path = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.availability.attacker",
+ "yardstick.benchmark.scenarios.availability",
"attacker_conf.yaml")
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
new file mode 100644
index 000000000..3f6c2aa8f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -0,0 +1,13 @@
+---
+# sample config file for ha test
+#
+schema: "yardstick:task:0.1"
+
+kill-process:
+ check_script: ha_tools/check_process_python.bash
+ inject_script: ha_tools/fault_process_kill.bash
+ recovery_script: ha_tools/start_service.bash
+
+bare-metal-down:
+ check_script: ha_tools/check_host_ping.bash
+ recovery_script: ha_tools/ipmi_power.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
new file mode 100755
index 000000000..0f160e2a8
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_host_ping.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check wether the host is running
+
+set -e
+
+host_ip=$1
+shift
+options="$@"
+
+ping -c 1 $options $host_ip | grep ttl | wc -l
+EXIT_CODE=$?
+
+if [ $EXIT_CODE -ne 0 ]; then
+ exit 1
+else
+ echo "running"
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
new file mode 100755
index 000000000..83d7e36c1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_openstack_cmd.bash
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a openstack command
+
+set -e
+
+cmd=$1
+
+source /root/openrc
+
+exec $cmd
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
new file mode 100755
index 000000000..88baed7d9
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_process_python.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+process_name=$1
+
+ps aux | grep -e .*python.*$process_name.* | grep -v grep | wc -l
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
index cc898a859..cc898a859 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/check_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
new file mode 100755
index 000000000..d0e2f1683
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+process_name=$1
+
+killall -9 $process_name
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
new file mode 100755
index 000000000..ea621facd
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/ipmi_power.bash
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop a service and check the service is stoped
+
+set -e
+
+ipmi_ip=$1
+ipmi_user=$2
+ipmi_pwd=$3
+
+action=$4
+ipmitool -I lanplus -H $ipmi_ip -U $ipmi_user -P $ipmi_pwd power $action
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index c1bf8b7eb..c1bf8b7eb 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
index a8901784e..a8901784e 100755
--- a/yardstick/benchmark/scenarios/availability/attacker/scripts/stop_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/stop_service.bash
diff --git a/yardstick/benchmark/scenarios/availability/monitor.py b/yardstick/benchmark/scenarios/availability/monitor.py
deleted file mode 100755
index 3193d3304..000000000
--- a/yardstick/benchmark/scenarios/availability/monitor.py
+++ /dev/null
@@ -1,114 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import logging
-import multiprocessing
-import subprocess
-import traceback
-import time
-
-LOG = logging.getLogger(__name__)
-
-
-def _execute_shell_command(command):
- '''execute shell script with error handling'''
- exitcode = 0
- output = []
- try:
- output = subprocess.check_output(command, shell=True)
- except Exception:
- exitcode = -1
- output = traceback.format_exc()
- LOG.error("exec command '%s' error:\n " % command)
- LOG.error(traceback.format_exc())
-
- return exitcode, output
-
-
-def _monitor_process(config, queue, event):
-
- total_time = 0
- outage_time = 0
- total_count = 0
- outage_count = 0
- first_outage = 0
- last_outage = 0
-
- wait_time = config.get("duration", 0)
- cmd = config.get("monitor_cmd", None)
- if cmd is None:
- LOG.error("There are no monitor cmd!")
- return
-
- queue.put("started")
-
- begin_time = time.time()
- while True:
-
- total_count = total_count + 1
-
- one_check_begin_time = time.time()
- exit_status, stdout = _execute_shell_command(cmd)
- one_check_end_time = time.time()
-
- LOG.info("the exit_status:%s stdout:%s" % (exit_status, stdout))
- if exit_status:
- outage_count = outage_count + 1
-
- outage_time = outage_time + (
- one_check_end_time - one_check_begin_time)
-
- if not first_outage:
- first_outage = one_check_begin_time
-
- last_outage = one_check_end_time
-
- if event.is_set():
- LOG.debug("the monitor process stop")
- break
-
- if wait_time > 0:
- time.sleep(wait_time)
-
- end_time = time.time()
- total_time = end_time - begin_time
-
- queue.put({"total_time": total_time,
- "outage_time": last_outage-first_outage,
- "total_count": total_count,
- "outage_count": outage_count})
-
-
-class Monitor:
-
- def __init__(self):
- self._result = []
- self._monitor_process = []
-
- def setup(self, config):
- self._config = config
-
- def start(self):
- self._queue = multiprocessing.Queue()
- self._event = multiprocessing.Event()
- self._monitor_process = multiprocessing.Process(
- target=_monitor_process, name="Monitor",
- args=(self._config, self._queue, self._event))
-
- self._monitor_process.start()
- ret = self._queue.get()
- if ret == "started":
- LOG.debug("monitor process started!")
-
- def stop(self):
- self._event.set()
- self._result = self._queue.get()
- LOG.debug("stop the monitor process. the result:%s" % self._result)
-
- def get_result(self):
- return self._result
diff --git a/yardstick/benchmark/scenarios/availability/monitor/__init__.py b/yardstick/benchmark/scenarios/availability/monitor/__init__.py
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/__init__.py
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
new file mode 100644
index 000000000..983c3a3ac
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import multiprocessing
+import time
+import os
+import yardstick.common.utils as utils
+
+LOG = logging.getLogger(__name__)
+
+monitor_conf_path = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.availability",
+ "monitor_conf.yaml")
+
+
+class MonitorMgr(object):
+ """docstring for MonitorMgr"""
+ def __init__(self):
+ self._monitor_list = []
+
+ def init_monitors(self, monitor_cfgs, context):
+ LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+
+ for monitor_cfg in monitor_cfgs:
+ monitor_type = monitor_cfg["monitor_type"]
+ monitor_cls = BaseMonitor.get_monitor_cls(monitor_type)
+ monitor_ins = monitor_cls(monitor_cfg, context)
+
+ self._monitor_list.append(monitor_ins)
+
+ def start_monitors(self):
+ for _monotor_instace in self._monitor_list:
+ _monotor_instace.start_monitor()
+
+ def wait_monitors(self):
+ for monitor in self._monitor_list:
+ monitor.wait_monitor()
+
+ def verify_SLA(self):
+ sla_pass = True
+ for monitor in self._monitor_list:
+ sla_pass = sla_pass & monitor.verify_SLA()
+ return sla_pass
+
+
+class BaseMonitor(multiprocessing.Process):
+ """docstring for BaseMonitor"""
+
+ def __init__(self, config, context):
+ multiprocessing.Process.__init__(self)
+ self._config = config
+ self._context = context
+ self._queue = multiprocessing.Queue()
+ self._event = multiprocessing.Event()
+ self.setup_done = False
+
+ @staticmethod
+ def get_monitor_cls(monitor_type):
+ '''return monitor class of specified type'''
+
+ for monitor in utils.itersubclasses(BaseMonitor):
+ if monitor_type == monitor.__monitor_type__:
+ return monitor
+ raise RuntimeError("No such monitor_type %s" % monitor_type)
+
+ def get_script_fullpath(self, path):
+ base_path = os.path.dirname(monitor_conf_path)
+ return os.path.join(base_path, path)
+
+ def run(self):
+ LOG.debug("config:%s context:%s" % (self._config, self._context))
+
+ self.setup()
+ monitor_time = self._config.get("monitor_time", 0)
+
+ total_time = 0
+ outage_time = 0
+ total_count = 0
+ outage_count = 0
+ first_outage = 0
+ last_outage = 0
+
+ begin_time = time.time()
+ while True:
+ total_count = total_count + 1
+
+ one_check_begin_time = time.time()
+ exit_status = self.monitor_func()
+ one_check_end_time = time.time()
+
+ if exit_status is False:
+ outage_count = outage_count + 1
+
+ outage_time = outage_time + (
+ one_check_end_time - one_check_begin_time)
+
+ if not first_outage:
+ first_outage = one_check_begin_time
+
+ last_outage = one_check_end_time
+
+ if self._event.is_set():
+ LOG.debug("the monitor process stop")
+ break
+
+ if one_check_end_time - begin_time > monitor_time:
+ LOG.debug("the monitor max_time finished and exit!")
+ break
+
+ end_time = time.time()
+ total_time = end_time - begin_time
+
+ self._queue.put({"total_time": total_time,
+ "outage_time": last_outage-first_outage,
+ "total_count": total_count,
+ "outage_count": outage_count})
+
+ def start_monitor(self):
+ self.start()
+
+ def wait_monitor(self):
+ self.join()
+ self._result = self._queue.get()
+ LOG.debug("the monitor result:%s" % self._result)
+
+ def setup(self): # pragma: no cover
+ pass
+
+ def monitor_func(self): # pragma: no cover
+ pass
+
+ def verify_SLA(self):
+ pass
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
new file mode 100644
index 000000000..c285024e1
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -0,0 +1,108 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import subprocess
+import traceback
+import yardstick.ssh as ssh
+import basemonitor as basemonitor
+
+LOG = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command):
+ '''execute shell script with error handling'''
+ exitcode = 0
+ output = []
+ try:
+ output = subprocess.check_output(command, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ LOG.error("exec command '%s' error:\n " % command)
+ LOG.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+class MonitorOpenstackCmd(basemonitor.BaseMonitor):
+ """docstring for MonitorApi"""
+
+ __monitor_type__ = "openstack-cmd"
+
+ def setup(self):
+ self.connection = None
+ node_name = self._config.get("host", None)
+ if node_name:
+ host = self._context[node_name]
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+
+ self.check_script = self.get_script_fullpath(
+ "ha_tools/check_openstack_cmd.bash")
+
+ self.cmd = self._config["command_name"]
+
+ def monitor_func(self):
+ exit_status = 0
+ if self.connection:
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/bash -s '{0}'".format(self.cmd),
+ stdin=open(self.check_script, "r"))
+
+ LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
+ (exit_status, stdout, stderr))
+ else:
+ exit_status, stdout = _execute_shell_command(self.cmd)
+ if exit_status:
+ return False
+ return True
+
+ def verify_SLA(self):
+ outage_time = self._result.get('outage_time', None)
+ LOG.debug("the _result:%s" % self._result)
+ max_outage_time = self._config["sla"]["max_outage_time"]
+ if outage_time > max_outage_time:
+ LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ return False
+ else:
+ LOG.info("the sla is passed")
+ return True
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ip": "192.168.235.22",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ monitor_configs = []
+ config = {
+ 'monitor_type': 'openstack-cmd',
+ 'command_name': 'nova image-list',
+ 'monitor_time': 1,
+ 'host': 'node1',
+ 'sla': {'max_outage_time': 5}
+ }
+ monitor_configs.append(config)
+
+ p = basemonitor.MonitorMgr()
+ p.init_monitors(monitor_configs, context)
+ p.start_monitors()
+ p.wait_monitors()
+ p.verify_SLA()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
new file mode 100644
index 000000000..53a6d8e4d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -0,0 +1,81 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import logging
+import yardstick.ssh as ssh
+
+import basemonitor as basemonitor
+
+LOG = logging.getLogger(__name__)
+
+
+class MonitorProcess(basemonitor.BaseMonitor):
+ """docstring for MonitorApi"""
+
+ __monitor_type__ = "process"
+
+ def setup(self):
+ host = self._context[self._config["host"]]
+ ip = host.get("ip", None)
+ user = host.get("user", "root")
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ self.connection = ssh.SSH(user, ip, key_filename=key_filename)
+ self.connection.wait(timeout=600)
+ LOG.debug("ssh host success!")
+ self.check_script = self.get_script_fullpath(
+ "ha_tools/check_process_python.bash")
+ self.process_name = self._config["process_name"]
+
+ def monitor_func(self):
+ exit_status, stdout, stderr = self.connection.execute(
+ "/bin/sh -s {0}".format(self.process_name),
+ stdin=open(self.check_script, "r"))
+ if not stdout or int(stdout) <= 0:
+ LOG.info("the process (%s) is not running!" % self.process_name)
+ return False
+
+ return True
+
+ def verify_SLA(self):
+ LOG.debug("the _result:%s" % self._result)
+ outage_time = self._result.get('outage_time', None)
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ if outage_time > max_outage_time:
+ LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+ return False
+ else:
+ return True
+
+
+def _test(): # pragma: no cover
+ host = {
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ context = {"node1": host}
+ monitor_configs = []
+ config = {
+ 'monitor_type': 'process',
+ 'process_name': 'nova-api',
+ 'host': "node1",
+ 'monitor_time': 1,
+ 'sla': {'max_recover_time': 5}
+ }
+ monitor_configs.append(config)
+
+ p = basemonitor.MonitorMgr()
+ p.init_monitors(monitor_configs, context)
+ p.start_monitors()
+ p.wait_monitors()
+ p.verify_SLA()
+
+
+if __name__ == '__main__': # pragma: no cover
+ _test()
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 10134ea6d..aee94ee09 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -7,9 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
-import time
from yardstick.benchmark.scenarios import base
-from yardstick.benchmark.scenarios.availability import monitor
+from yardstick.benchmark.scenarios.availability.monitor import basemonitor
from yardstick.benchmark.scenarios.availability.attacker import baseattacker
LOG = logging.getLogger(__name__)
@@ -34,6 +33,7 @@ class ServiceHA(base.Scenario):
if nodes is None:
LOG.error("the nodes info is none")
return
+
self.attackers = []
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
for attacker_cfg in attacker_cfgs:
@@ -45,9 +45,9 @@ class ServiceHA(base.Scenario):
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
- self.monitor_ins = monitor.Monitor()
- self.monitor_ins.setup(monitor_cfgs[0])
- self.monitor_ins.monitor_time = monitor_cfgs[0]["monitor_time"]
+ self.monitorMgr = basemonitor.MonitorMgr()
+ self.monitorMgr.init_monitors(monitor_cfgs, nodes)
+
self.setup_done = True
def run(self, result):
@@ -56,35 +56,24 @@ class ServiceHA(base.Scenario):
LOG.error("The setup not finished!")
return
- self.monitor_ins.start()
+ self.monitorMgr.start_monitors()
LOG.info("monitor start!")
for attacker in self.attackers:
attacker.inject_fault()
- time.sleep(self.monitor_ins.monitor_time)
-
- self.monitor_ins.stop()
+ self.monitorMgr.wait_monitors()
LOG.info("monitor stop!")
- ret = self.monitor_ins.get_result()
- LOG.info("The monitor result:%s" % ret)
- outage_time = ret.get("outage_time")
- result["outage_time"] = outage_time
- LOG.info("the result:%s" % result)
-
- if "sla" in self.scenario_cfg:
- sla_outage_time = int(self.scenario_cfg["sla"]["outage_time"])
- assert outage_time <= sla_outage_time, "outage_time %f > sla:outage_time(%f)" % \
- (outage_time, sla_outage_time)
+ sla_pass = self.monitorMgr.verify_SLA()
+ assert sla_pass is True, "the test cases is not pass the SLA"
return
def teardown(self):
'''scenario teardown'''
for attacker in self.attackers:
- if not attacker.check():
- attacker.recover()
+ attacker.recover()
def _test(): # pragma: no cover
@@ -103,14 +92,14 @@ def _test(): # pragma: no cover
attacker_cfgs = []
attacker_cfgs.append(attacker_cfg)
monitor_cfg = {
- "monitor_cmd": "nova image-list",
- "monitor_tme": 10
+ "monitor_cmd": "nova image-list"
}
monitor_cfgs = []
monitor_cfgs.append(monitor_cfg)
options = {
"attackers": attacker_cfgs,
+ "wait_time": 10,
"monitors": monitor_cfgs
}
sla = {"outage_time": 5}
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index e8fc63cf7..478b0a1a2 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -9,6 +9,9 @@
import pkg_resources
import logging
import json
+import re
+import time
+import os
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base
@@ -53,30 +56,104 @@ class Cyclictest(base.Scenario):
__scenario_type__ = "Cyclictest"
TARGET_SCRIPT = "cyclictest_benchmark.bash"
+ WORKSPACE = "/root/workspace/"
+ REBOOT_CMD_PATTERN = r";\s*reboot\b"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
- def setup(self):
- '''scenario setup'''
- self.target_script = pkg_resources.resource_filename(
- "yardstick.benchmark.scenarios.compute",
- Cyclictest.TARGET_SCRIPT)
+ def _put_files(self, client):
+ setup_options = self.scenario_cfg["setup_options"]
+ rpm_dir = setup_options["rpm_dir"]
+ script_dir = setup_options["script_dir"]
+ image_dir = setup_options["image_dir"]
+ LOG.debug("Send RPMs from %s to workspace %s" %
+ (rpm_dir, self.WORKSPACE))
+ client.put(rpm_dir, self.WORKSPACE, recursive=True)
+ LOG.debug("Send scripts from %s to workspace %s" %
+ (script_dir, self.WORKSPACE))
+ client.put(script_dir, self.WORKSPACE, recursive=True)
+ LOG.debug("Send guest image from %s to workspace %s" %
+ (image_dir, self.WORKSPACE))
+ client.put(image_dir, self.WORKSPACE, recursive=True)
+
+ def _connect_host(self):
+ host = self.context_cfg["host"]
+ user = host.get("user", "root")
+ ip = host.get("ip", None)
+ key_filename = host.get("key_filename", "~/.ssh/id_rsa")
+
+ LOG.debug("user:%s, host:%s", user, ip)
+ self.host = ssh.SSH(user, ip, key_filename=key_filename)
+ self.host.wait(timeout=600)
+
+ def _connect_guest(self):
host = self.context_cfg["host"]
user = host.get("user", "root")
ip = host.get("ip", None)
key_filename = host.get("key_filename", "~/.ssh/id_rsa")
LOG.debug("user:%s, host:%s", user, ip)
- print "key_filename:" + key_filename
- self.client = ssh.SSH(user, ip, key_filename=key_filename)
- self.client.wait(timeout=600)
+ self.guest = ssh.SSH(user, ip, port=5555, key_filename=key_filename)
+ self.guest.wait(timeout=600)
+
+ def _run_setup_cmd(self, client, cmd):
+ LOG.debug("Run cmd: %s" % cmd)
+ status, stdout, stderr = client.execute(cmd)
+ if status:
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ LOG.debug("Error on reboot")
+ else:
+ raise RuntimeError(stderr)
+
+ def _run_host_setup_scripts(self, scripts):
+ setup_options = self.scenario_cfg["setup_options"]
+ script_dir = os.path.basename(setup_options["script_dir"])
+
+ for script in scripts:
+ cmd = "cd %s/%s; export PATH=./:$PATH; %s" %\
+ (self.WORKSPACE, script_dir, script)
+ self._run_setup_cmd(self.host, cmd)
+
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ time.sleep(3)
+ self._connect_host()
+
+ def _run_guest_setup_scripts(self, scripts):
+ setup_options = self.scenario_cfg["setup_options"]
+ script_dir = os.path.basename(setup_options["script_dir"])
+
+ for script in scripts:
+ cmd = "cd %s/%s; export PATH=./:$PATH; %s" %\
+ (self.WORKSPACE, script_dir, script)
+ self._run_setup_cmd(self.guest, cmd)
+
+ if re.search(self.REBOOT_CMD_PATTERN, cmd):
+ time.sleep(3)
+ self._connect_guest()
+
+ def setup(self):
+ '''scenario setup'''
+ setup_options = self.scenario_cfg["setup_options"]
+ host_setup_seqs = setup_options["host_setup_seqs"]
+ guest_setup_seqs = setup_options["guest_setup_seqs"]
+
+ self._connect_host()
+ self._put_files(self.host)
+ self._run_host_setup_scripts(host_setup_seqs)
+
+ self._connect_guest()
+ self._put_files(self.guest)
+ self._run_guest_setup_scripts(guest_setup_seqs)
# copy script to host
- self.client.run("cat > ~/cyclictest_benchmark.sh",
- stdin=open(self.target_script, "rb"))
+ self.target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.compute",
+ Cyclictest.TARGET_SCRIPT)
+ self.guest.run("cat > ~/cyclictest_benchmark.sh",
+ stdin=open(self.target_script, "rb"))
self.setup_done = True
@@ -98,9 +175,9 @@ class Cyclictest(base.Scenario):
cmd_args = "-a %s -i %s -p %s -l %s -t %s -h %s %s" \
% (affinity, interval, priority, loops,
threads, histogram, default_args)
- cmd = "sudo bash cyclictest_benchmark.sh %s" % (cmd_args)
+ cmd = "bash cyclictest_benchmark.sh %s" % (cmd_args)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, stderr = self.guest.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -121,7 +198,7 @@ class Cyclictest(base.Scenario):
assert sla_error == "", sla_error
-def _test():
+def _test(): # pragma: no cover
'''internal test function'''
key_filename = pkg_resources.resource_filename("yardstick.resources",
"files/yardstick_key")
@@ -159,5 +236,5 @@ def _test():
cyclictest.run(result)
print result
-if __name__ == '__main__':
+if __name__ == '__main__': # pragma: no cover
_test()
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
new file mode 100644
index 000000000..629f62be5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -0,0 +1,119 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import pkg_resources
+import logging
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Ping6(base.Scenario): # pragma: no cover
+ """Execute ping6 between two hosts
+
+ read link below for more ipv6 info description:
+ http://wiki.opnfv.org/ipv6_opnfv_project
+ """
+ __scenario_type__ = "Ping6"
+
+ TARGET_SCRIPT = 'ping6_benchmark.bash'
+ SETUP_SCRIPT = 'ping6_setup.bash'
+ TEARDOWN_SCRIPT = 'ping6_teardown.bash'
+ METADATA_SCRIPT = 'ping6_metadata.txt'
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+ self.run_done = False
+
+ def _ssh_host(self):
+ # ssh host1
+ host = self.context_cfg['host']
+ host_user = host.get('user', 'ubuntu')
+ host_ip = host.get('ip', None)
+ host_pwd = host.get('password', 'root')
+ LOG.info("user:%s, host:%s", host_user, host_ip)
+ self.client = ssh.SSH(host_user, host_ip, password=host_pwd)
+ self.client.wait(timeout=600)
+
+ def setup(self):
+ '''scenario setup'''
+ self.setup_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.SETUP_SCRIPT)
+
+ self.ping6_metadata_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.METADATA_SCRIPT)
+ # ssh host1
+ self._ssh_host()
+ # run script to setup ipv6
+ self.client.run("cat > ~/setup.sh",
+ stdin=open(self.setup_script, "rb"))
+ self.client.run("cat > ~/metadata.txt",
+ stdin=open(self.ping6_metadata_script, "rb"))
+ cmd = "sudo bash setup.sh"
+ status, stdout, stderr = self.client.execute(cmd)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+ # ssh vm1
+ self.ping6_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.TARGET_SCRIPT)
+
+ if not self.setup_done:
+ self._ssh_host()
+
+ self.client.run("cat > ~/ping6.sh",
+ stdin=open(self.ping6_script, "rb"))
+ cmd = "sudo bash ping6.sh"
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ print stdout
+ if status:
+ raise RuntimeError(stderr)
+
+ if stdout:
+ result["rtt"] = float(stdout)
+
+ if "sla" in self.scenario_cfg:
+ sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
+ assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
+ (result["rtt"], sla_max_rtt)
+ else:
+ LOG.error("ping6 timeout")
+ self.run_done = True
+
+ def teardown(self):
+ """teardown the benchmark"""
+
+ if not self.run_done:
+ self._ssh_host()
+
+ self.teardown_script = pkg_resources.resource_filename(
+ 'yardstick.benchmark.scenarios.networking',
+ Ping6.TEARDOWN_SCRIPT)
+ self.client.run("cat > ~/teardown.sh",
+ stdin=open(self.teardown_script, "rb"))
+ cmd = "sudo bash teardown.sh"
+ status, stdout, stderr = self.client.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+
+ if stdout:
+ pass
+ else:
+ LOG.error("ping6 teardown failed")
diff --git a/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash b/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash
new file mode 100644
index 000000000..6df354a1b
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_benchmark.bash
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a single ping6 command towards a ipv6 router
+
+set -e
+
+# TODO find host
+sudo ip netns exec qdhcp-$(neutron net-list | grep -w ipv4-int-network1 | awk '{print $2}') bash
+# TODO find VM ip
+ssh -i vRouterkey fedora@20.0.0.4
+ping6 -c 1 2001:db8:0:1::1 | grep ttl | awk -F [=\ ] '{printf $10}'
diff --git a/yardstick/benchmark/scenarios/networking/ping6_metadata.txt b/yardstick/benchmark/scenarios/networking/ping6_metadata.txt
new file mode 100644
index 000000000..5dc08d30f
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_metadata.txt
@@ -0,0 +1,82 @@
+#cloud-config
+bootcmd:
+ - /usr/sbin/ifdown eth0
+ - /usr/sbin/ifup eth0
+ - /usr/sbin/ifdown eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - /usr/sbin/ifup eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - setenforce 0
+ - /sbin/sysctl -w net.ipv6.conf.all.forwarding=1
+ - /sbin/sysctl -w net.ipv6.conf.eth0.accept_ra=2
+ - /sbin/sysctl -w net.ipv6.conf.eth0.accept_ra_defrtr=1
+ - /sbin/sysctl -w net.ipv6.conf.eth0.router_solicitations=1
+packages:
+ - radvd
+runcmd:
+ - /usr/sbin/ifdown eth1
+ - /usr/sbin/ifup eth1
+ - ip link set dev eth0 mtu 1300
+ - ip link set dev eth1 mtu 1300
+ - /usr/bin/systemctl disable NetworkManager
+ - /usr/bin/systemctl start radvd
+ - echo 'complete' >> /tmp/cloud-config.log
+write_files:
+ - content: |
+ TYPE="Ethernet"
+ BOOTPROTO="dhcp"
+ DEFROUTE="yes"
+ PEERDNS="yes"
+ PEERROUTES="yes"
+ IPV4_FAILURE_FATAL="no"
+ IPV6INIT="yes"
+ IPV6_AUTOCONF="yes"
+ IPV6_DEFROUTE="yes"
+ IPV6_PEERROUTES="yes"
+ IPV6_PEERDNS="yes"
+ IPV6_FAILURE_FATAL="no"
+ NAME="eth0"
+ DEVICE="eth0"
+ ONBOOT="yes"
+ path: /etc/sysconfig/network-scripts/ifcfg-eth0
+ permissions: '0755'
+ owner: root:root
+ - content: |
+ TYPE="Ethernet"
+ BOOTPROTO=static
+ IPV6INIT=yes
+ IPV6ADDR="2001:db8:0:2::1/64"
+ NAME=eth1
+ DEVICE=eth1
+ ONBOOT=yes
+ NM_CONTROLLED=no
+ path: /etc/sysconfig/network-scripts/ifcfg-eth1
+ permissions: '0755'
+ owner: root:root
+ - content: |
+ interface eth1
+ {
+ AdvSendAdvert on;
+ MinRtrAdvInterval 3;
+ MaxRtrAdvInterval 10;
+ AdvHomeAgentFlag off;
+ AdvManagedFlag on;
+ AdvOtherConfigFlag on;
+ prefix 2001:db8:0:2::/64
+ {
+ AdvOnLink on;
+ ### On link tells the host that the default router is on the same "link" as it is
+ AdvAutonomous on;
+ AdvRouterAddr off;
+ };
+ };
+ path: /etc/radvd.conf
+ permissions: '0644'
+ owner: root:root
+ - content: |
+ IPV6FORWARDING=yes
+ path: /etc/sysconfig/network
+ permissions: '0644'
+ owner: root:root \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/networking/ping6_setup.bash b/yardstick/benchmark/scenarios/networking/ping6_setup.bash
new file mode 100644
index 000000000..2a54da2ba
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_setup.bash
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# download and create image
+source /opt/admin-openrc.sh
+wget https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2
+glance image-create --name 'Fedora22' --disk-format qcow2 \
+--container-format bare --file ./Fedora-Cloud-Base-22-20150521.x86_64.qcow2
+
+# create external network
+neutron net-create net04_ext --router:external --provider:physical_network physnet \
+--provider:network_type vlan --provider:segmentation_id 1411
+neutron subnet-create net04_ext 10.145.140.0/24 --name net04_ext__subnet \
+--allocation-pool start=10.145.140.13,end=10.145.140.20 --disable-dhcp --gateway 10.145.140.1
+
+# create router
+neutron router-create ipv4-router
+neutron router-create ipv6-router
+
+
+# create (ipv4,ipv6)router and net and subnet
+neutron net-create --port_security_enabled=False ipv4-int-network1
+neutron net-create --port_security_enabled=False ipv6-int-network2
+
+# Create IPv4 subnet and associate it to ipv4-router
+neutron subnet-create --name ipv4-int-subnet1 \
+--dns-nameserver 8.8.8.8 ipv4-int-network1 20.0.0.0/24
+neutron router-interface-add ipv4-router ipv4-int-subnet1
+
+# Associate the net04_ext to the Neutron routers
+neutron router-gateway-set ipv6-router net04_ext
+neutron router-gateway-set ipv4-router net04_ext
+
+# Create two subnets, one IPv4 subnet ipv4-int-subnet2 and
+# one IPv6 subnet ipv6-int-subnet2 in ipv6-int-network2, and associate both subnets to ipv6-router
+neutron subnet-create --name ipv4-int-subnet2 --dns-nameserver 8.8.8.8 ipv6-int-network2 10.0.0.0/24
+neutron subnet-create --name ipv6-int-subnet2 \
+ --ip-version 6 --ipv6-ra-mode slaac --ipv6-address-mode slaac ipv6-int-network2 2001:db8:0:1::/64
+
+
+neutron router-interface-add ipv6-router ipv4-int-subnet2
+neutron router-interface-add ipv6-router ipv6-int-subnet2
+
+
+# create key
+nova keypair-add vRouterKey > ~/vRouterKey
+
+# Create ports for vRouter
+neutron port-create --name eth0-vRouter --mac-address fa:16:3e:11:11:11 ipv6-int-network2
+neutron port-create --name eth1-vRouter --mac-address fa:16:3e:22:22:22 ipv4-int-network1
+
+# Create ports for VM1 and VM2.
+neutron port-create --name eth0-VM1 --mac-address fa:16:3e:33:33:33 ipv4-int-network1
+neutron port-create --name eth0-VM2 --mac-address fa:16:3e:44:44:44 ipv4-int-network1
+
+# Update ipv6-router with routing information to subnet 2001:db8:0:2::/64
+neutron router-update ipv6-router \
+ --routes type=dict list=true destination=2001:db8:0:2::/64,nexthop=2001:db8:0:1:f816:3eff:fe11:1111
+
+# vRouter boot
+nova boot --image Fedora22 --flavor m1.small \
+--user-data ./metadata.txt \
+--nic port-id=$(neutron port-list | grep -w eth0-vRouter | awk '{print $2}') \
+--nic port-id=$(neutron port-list | grep -w eth1-vRouter | awk '{print $2}') \
+--key-name vRouterKey vRouter
+
+# VM create
+nova boot --image Fedora22 --flavor m1.small \
+--nic port-id=$(neutron port-list | grep -w eth0-VM1 | awk '{print $2}') \
+--key-name vRouterKey VM1
+
+nova boot --image Fedora22 --flavor m1.small \
+--nic port-id=$(neutron port-list | grep -w eth0-VM2 | awk '{print $2}') \
+--key-name vRouterKey VM2
+
+nova list
diff --git a/yardstick/benchmark/scenarios/networking/ping6_teardown.bash b/yardstick/benchmark/scenarios/networking/ping6_teardown.bash
new file mode 100644
index 000000000..7ab145523
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/ping6_teardown.bash
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# delete VM
+nova delete VM1
+nova delete VM2
+nova delete vRouter
+#clear routes
+neutron router-update ipv6-router --routes action=clear
+
+#VM1,VM2 port delete
+neutron port-delete --name eth0-VM1
+neutron port-delete --name eth0-VM2
+
+#vRouter port delete
+neutron port-delete --name eth0-vRouter
+neutron port-delete --name eth1-vRouter
+
+#delete key
+nova keypair-delete vRouterKey
+
+#delete ipv6 router interface
+neutron router-interface-delete ipv6-router ipv6-int-subnet2
+neutron router-interface-delete ipv6-router ipv4-int-subnet2
+
+#delete subnet
+neutron subnet-delete --name ipv6-int-subnet2
+neutron subnet-delete --name ipv4-int-subnet2
+
+#clear gateway
+neutron router-gateway-clear ipv4-router net04_ext
+neutron router-gateway-clear ipv6-router net04_ext
+
+#delete ipv4 router interface
+neutron router-interface-delete ipv4-router ipv4-int-subnet1
+neutron subnet-delete --name ipv4-int-subnet1
+
+#delete network
+neutron net-delete ipv6-int-network2
+neutron net-delete ipv4-int-network1
+
+# delete router
+neutron router-delete ipv4-router
+neutron router-delete ipv6-router
+
+# delete ext net
+neutron subnet-delete net04_ext__subnet
+neutron net-delete net04_ext
+
+# delete glance image
+glance --os-image-api-version 1 image-delete Fedora22 \ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py
new file mode 100644
index 000000000..509fa847b
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation.py
@@ -0,0 +1,85 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcInstantiationValidation(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_instantiation_validation"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'instantiation_validation_benchmark.' \
+ 'InstantiationValidationBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['throughput'] = '1'
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py
new file mode 100644
index 000000000..4834a5fc7
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_instantiation_validation_noisy.py
@@ -0,0 +1,92 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcInstantiationValidationNoisy(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_instantiation_validation_noisy"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'instantiation_validation_noisy_neighbors_' \
+ 'benchmark.' \
+ 'InstantiationValidationNoisyNeighborsBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['throughput'] = '1'
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+ test_case['params']['num_of_neighbours'] = \
+ str(self.options['num_of_neighbours'])
+ test_case['params']['amount_of_ram'] = \
+ str(self.options['amount_of_ram'])
+ test_case['params']['number_of_cores'] = \
+ str(self.options['number_of_cores'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_throughput.py b/yardstick/benchmark/scenarios/networking/vtc_throughput.py
new file mode 100644
index 000000000..fe7a88470
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_throughput.py
@@ -0,0 +1,85 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+from experimental_framework import api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcThroughput(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_throughput"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'rfc2544_throughput_benchmark.' \
+ 'RFC2544ThroughputBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['packet_size'] = str(self.options['packet_size'])
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py b/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py
new file mode 100644
index 000000000..ad3832fb5
--- /dev/null
+++ b/yardstick/benchmark/scenarios/networking/vtc_throughput_noisy.py
@@ -0,0 +1,91 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import experimental_framework.api as api
+
+LOG = logging.getLogger(__name__)
+
+
+class VtcThroughputNoisy(base.Scenario):
+ """Execute Instantiation Validation TC on the vTC
+ """
+ __scenario_type__ = "vtc_throughput_noisy"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.options = None
+ self.setup_done = False
+
+ def setup(self):
+ '''scenario setup'''
+
+ self.options = self.scenario_cfg['options']
+ self.setup_done = True
+
+ def run(self, result):
+ """execute test"""
+
+ if not self.setup_done:
+ self.setup()
+
+ heat_template = 'vTC.yaml'
+ iterations = 1
+
+ openstack_credentials = {
+ 'ip_controller': '0.0.0.0',
+ 'heat_url': '***',
+ 'auth_uri': os.environ.get('OS_AUTH_URL'),
+ 'user': os.environ.get('OS_USERNAME'),
+ 'password': os.environ.get('OS_PASSWORD'),
+ 'project': os.environ.get('OS_TENANT_NAME')
+ }
+ heat_template_parameters = {
+ 'default_net': self.options['default_net_name'],
+ 'default_subnet': self.options['default_subnet_name'],
+ 'source_net': self.options['vlan_net_1_name'],
+ 'source_subnet': self.options['vlan_subnet_1_name'],
+ 'destination_net': self.options['vlan_net_2_name'],
+ 'destination_subnet': self.options['vlan_subnet_2_name']
+ }
+ deployment_configuration = {
+ 'vnic_type': [self.options['vnic_type']],
+ 'vtc_flavor': [self.options['vtc_flavor']]
+ }
+
+ test_case = dict()
+ test_case['name'] = 'multi_tenancy_throughput_benchmark.' \
+ 'MultiTenancyThroughputBenchmark'
+ test_case['params'] = dict()
+ test_case['params']['packet_size'] = str(self.options['packet_size'])
+ test_case['params']['vlan_sender'] = str(self.options['vlan_sender'])
+ test_case['params']['vlan_receiver'] = \
+ str(self.options['vlan_receiver'])
+ test_case['params']['num_of_neighbours'] = \
+ str(self.options['num_of_neighbours'])
+ test_case['params']['amount_of_ram'] = \
+ str(self.options['amount_of_ram'])
+ test_case['params']['number_of_cores'] = \
+ str(self.options['number_of_cores'])
+
+ try:
+ result = api.FrameworkApi.execute_framework(
+ [test_case],
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials)
+ except Exception as e:
+ LOG.info('Exception: {}'.format(e.message))
+ LOG.info('Got output: {}'.format(result))
diff --git a/yardstick/benchmark/scenarios/parser/__init__.py b/yardstick/benchmark/scenarios/parser/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/__init__.py
diff --git a/yardstick/benchmark/scenarios/parser/parser.py b/yardstick/benchmark/scenarios/parser/parser.py
new file mode 100644
index 000000000..006258d05
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser.py
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import subprocess
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Parser(base.Scenario):
+ """running Parser Yang-to-Tosca module as a tool
+ validating output against expected outcome
+
+ more info https://wiki.opnfv.org/parser
+ """
+ __scenario_type__ = "Parser"
+
+ SETUP_SCRIPT = "parser_setup.sh"
+ TEARDOWN_SCRIPT = "parser_teardown.sh"
+ PARSER_SCRIPT = "parser.sh"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.setup_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.SETUP_SCRIPT)
+ cmd = "%s" % (self.setup_script)
+
+ subprocess.call(cmd, shell=True)
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the translation"""
+ options = self.scenario_cfg['options']
+ yangfile = options.get("yangfile", '~/yardstick/samples/yang.yaml')
+ toscafile = options.get("toscafile", '~/yardstick/samples/tosca.yaml')
+
+ self.parser_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.PARSER_SCRIPT)
+
+ if not self.setup_done:
+ self.setup()
+
+ cmd1 = "%s %s %s" % (self.parser_script, yangfile, toscafile)
+ cmd2 = "chmod 777 %s" % (self.parser_script)
+ subprocess.call(cmd2, shell=True)
+ output = subprocess.call(cmd1, shell=True, stdout=subprocess.PIPE)
+ print "yangtotosca finished"
+
+ result['yangtotosca'] = "success" if output == 0 else "fail"
+
+ def teardown(self):
+ ''' for scenario teardown remove parser and pyang '''
+ self.teardown_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.parser",
+ Parser.TEARDOWN_SCRIPT)
+ subprocess.call(self.teardown_script, shell=True)
+ self.teardown_done = True
+
+
+def _test():
+ '''internal test function'''
+ pass
+
+if __name__ == '__main__':
+ _test()
diff --git a/yardstick/benchmark/scenarios/parser/parser.sh b/yardstick/benchmark/scenarios/parser/parser.sh
new file mode 100755
index 000000000..4408e637c
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+yangfile=$1
+base_dir=$(dirname $yangfile)
+shift
+toscafile=$1
+OUTPUT_FILE=/tmp/parser-out.log
+
+# run parser test
+run_parser()
+{
+ cd /tmp/parser/yang2tosca
+ python tosca_translator.py --filename $yangfile> $OUTPUT_FILE
+}
+
+# write the result to stdout in json format
+check_result()
+{
+
+ if (diff -q $toscafile ${yangfile%'.yaml'}"_tosca.yaml" >> $OUTPUT_FILE);
+ then
+ exit 0
+ else
+ exit 1
+ fi
+
+}
+
+# main entry
+main()
+{
+ # run the test
+ run_parser
+
+ # output result
+ check_result
+}
+
+main
diff --git a/yardstick/benchmark/scenarios/parser/parser_setup.sh b/yardstick/benchmark/scenarios/parser/parser_setup.sh
new file mode 100755
index 000000000..44356447d
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser_setup.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+git clone https://github.com/mbj4668/pyang.git /tmp/pyang
+cd /tmp/pyang
+python setup.py install
+git clone https://gerrit.opnfv.org/gerrit/parser /tmp/parser
+
diff --git a/yardstick/benchmark/scenarios/parser/parser_teardown.sh b/yardstick/benchmark/scenarios/parser/parser_teardown.sh
new file mode 100755
index 000000000..727e9decd
--- /dev/null
+++ b/yardstick/benchmark/scenarios/parser/parser_teardown.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+rm -rf /tmp/pyang
+rm -rf /tmp/parser
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index fbbca29ad..17e8f4c42 100755
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -16,7 +16,9 @@ import atexit
import ipaddress
import time
import logging
+import uuid
from itertools import ifilter
+
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.runners import base as base_runner
from yardstick.common.task_template import TaskTemplate
@@ -81,7 +83,9 @@ class TaskCommands(object):
for i in range(0, len(task_files)):
one_task_start_time = time.time()
parser.path = task_files[i]
- scenarios, run_in_parallel = parser.parse_task(task_args[i],
+ task_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+ scenarios, run_in_parallel = parser.parse_task(task_name,
+ task_args[i],
task_args_fnames[i])
self._run(scenarios, run_in_parallel, args.output_file)
@@ -199,7 +203,7 @@ class TaskParser(object):
return suite_params
- def parse_task(self, task_args=None, task_args_file=None):
+ def parse_task(self, task_name, task_args=None, task_args_file=None):
'''parses the task file and return an context and scenario instances'''
print "Parsing task config:", self.path
@@ -250,6 +254,12 @@ class TaskParser(object):
run_in_parallel = cfg.get("run_in_parallel", False)
+ # add tc and task id for influxdb extended tags
+ task_id = str(uuid.uuid4())
+ for scenario in cfg["scenarios"]:
+ scenario["tc"] = task_name
+ scenario["task_id"] = task_id
+
# TODO we need something better here, a class that represent the file
return cfg["scenarios"], run_in_parallel
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
new file mode 100644
index 000000000..2f3ff089f
--- /dev/null
+++ b/yardstick/dispatcher/influxdb.py
@@ -0,0 +1,149 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import json
+import logging
+import requests
+import time
+
+from oslo_config import cfg
+
+from yardstick.dispatcher.base import Base as DispatchBase
+from yardstick.dispatcher.influxdb_line_protocol import make_lines
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+influx_dispatcher_opts = [
+ cfg.StrOpt('target',
+ default='http://127.0.0.1:8086',
+ help='The target where the http request will be sent. '
+ 'If this is not set, no data will be posted. For '
+ 'example: target = http://hostname:1234/path'),
+ cfg.StrOpt('db_name',
+ default='yardstick',
+ help='The database name to store test results.'),
+ cfg.IntOpt('timeout',
+ default=5,
+ help='The max time in seconds to wait for a request to '
+ 'timeout.'),
+]
+
+CONF.register_opts(influx_dispatcher_opts, group="dispatcher_influxdb")
+
+
+class InfluxdbDispatcher(DispatchBase):
+ """Dispatcher class for posting data into an influxdb target.
+ """
+
+ __dispatcher_type__ = "Influxdb"
+
+ def __init__(self, conf):
+ super(InfluxdbDispatcher, self).__init__(conf)
+ self.timeout = CONF.dispatcher_influxdb.timeout
+ self.target = CONF.dispatcher_influxdb.target
+ self.db_name = CONF.dispatcher_influxdb.db_name
+ self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
+ self.raw_result = []
+ self.case_name = ""
+ self.tc = ""
+ self.task_id = -1
+ self.static_tags = {
+ "pod_name": os.environ.get('POD_NAME', 'unknown'),
+ "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
+ "version": os.environ.get('YARDSTICK_VERSION', 'unknown')
+ }
+
+ def _dict_key_flatten(self, data):
+ next_data = {}
+
+ if not [v for v in data.values()
+ if type(v) == dict or type(v) == list]:
+ return data
+
+ for k, v in data.iteritems():
+ if type(v) == dict:
+ for n_k, n_v in v.iteritems():
+ next_data["%s.%s" % (k, n_k)] = n_v
+ elif type(v) == list:
+ for index, item in enumerate(v):
+ next_data["%s%d" % (k, index)] = item
+ else:
+ next_data[k] = v
+
+ return self._dict_key_flatten(next_data)
+
+ def _get_nano_timestamp(self, results):
+ try:
+ timestamp = results["benchmark"]["timestamp"]
+ except Exception:
+ timestamp = time.time()
+
+ return str(int(float(timestamp) * 1000000000))
+
+ def _get_extended_tags(self, data):
+ tags = {
+ "runner_id": data["runner_id"],
+ "tc": self.tc,
+ "task_id": self.task_id
+ }
+
+ return tags
+
+ def _data_to_line_protocol(self, data):
+ msg = {}
+ point = {}
+ point["measurement"] = self.case_name
+ point["fields"] = self._dict_key_flatten(data["benchmark"]["data"])
+ point["time"] = self._get_nano_timestamp(data)
+ point["tags"] = self._get_extended_tags(data)
+ msg["points"] = [point]
+ msg["tags"] = self.static_tags
+
+ return make_lines(msg).encode('utf-8')
+
+ def record_result_data(self, data):
+ LOG.debug('Test result : %s' % json.dumps(data))
+ self.raw_result.append(data)
+ if self.target == '':
+ # if the target was not set, do not do anything
+ LOG.error('Dispatcher target was not set, no data will'
+ 'be posted.')
+ return -1
+
+ if isinstance(data, dict) and "scenario_cfg" in data:
+ self.case_name = data["scenario_cfg"]["type"]
+ self.tc = data["scenario_cfg"]["tc"]
+ self.task_id = data["scenario_cfg"]["task_id"]
+ return 0
+
+ if self.case_name == "":
+ LOG.error('Test result : %s' % json.dumps(data))
+ LOG.error('The case_name cannot be found, no data will be posted.')
+ return -1
+
+ try:
+ line = self._data_to_line_protocol(data)
+ LOG.debug('Test result line format : %s' % line)
+ res = requests.post(self.influxdb_url,
+ data=line,
+ timeout=self.timeout)
+ if res.status_code != 204:
+ LOG.error('Test result posting finished with status code'
+ ' %d.' % res.status_code)
+ except Exception as err:
+ LOG.exception('Failed to record result data: %s',
+ err)
+ return -1
+ return 0
+
+ def flush_result_data(self):
+ LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+ return 0
diff --git a/yardstick/dispatcher/influxdb_line_protocol.py b/yardstick/dispatcher/influxdb_line_protocol.py
new file mode 100644
index 000000000..3e830ed5e
--- /dev/null
+++ b/yardstick/dispatcher/influxdb_line_protocol.py
@@ -0,0 +1,114 @@
+# yardstick comment: this file is a modified copy of
+# influxdb-python/influxdb/line_protocol.py
+
+from __future__ import unicode_literals
+from copy import copy
+
+from six import binary_type, text_type, integer_types
+
+
+def _escape_tag(tag):
+ tag = _get_unicode(tag, force=True)
+ return tag.replace(
+ "\\", "\\\\"
+ ).replace(
+ " ", "\\ "
+ ).replace(
+ ",", "\\,"
+ ).replace(
+ "=", "\\="
+ )
+
+
+def _escape_value(value):
+ value = _get_unicode(value)
+ if isinstance(value, text_type) and value != '':
+ return "\"{}\"".format(
+ value.replace(
+ "\"", "\\\""
+ ).replace(
+ "\n", "\\n"
+ )
+ )
+ elif isinstance(value, integer_types) and not isinstance(value, bool):
+ return str(value) + 'i'
+ else:
+ return str(value)
+
+
+def _get_unicode(data, force=False):
+ """
+ Try to return a text aka unicode object from the given data.
+ """
+ if isinstance(data, binary_type):
+ return data.decode('utf-8')
+ elif data is None:
+ return ''
+ elif force:
+ return str(data)
+ else:
+ return data
+
+
+def make_lines(data):
+ """
+ Extracts the points from the given dict and returns a Unicode string
+ matching the line protocol introduced in InfluxDB 0.9.0.
+
+ line protocol format:
+ <measurement>[,<tag-key>=<tag-value>...] <field-key>=<field-value>\
+ [,<field2-key>=<field2-value>...] [unix-nano-timestamp]
+
+ Ref:
+ https://influxdb.com/docs/v0.9/write_protocols/write_syntax.html
+ https://influxdb.com/docs/v0.9/write_protocols/line.html
+ """
+ lines = []
+ static_tags = data.get('tags', None)
+ for point in data['points']:
+ elements = []
+
+ # add measurement name
+ measurement = _escape_tag(_get_unicode(
+ point.get('measurement', data.get('measurement'))
+ ))
+ key_values = [measurement]
+
+ # add tags
+ if static_tags is None:
+ tags = point.get('tags', {})
+ else:
+ tags = copy(static_tags)
+ tags.update(point.get('tags', {}))
+
+ # tags should be sorted client-side to take load off server
+ for tag_key in sorted(tags.keys()):
+ key = _escape_tag(tag_key)
+ value = _escape_tag(tags[tag_key])
+
+ if key != '' and value != '':
+ key_values.append("{key}={value}".format(key=key, value=value))
+ key_values = ','.join(key_values)
+ elements.append(key_values)
+
+ # add fields
+ field_values = []
+ for field_key in sorted(point['fields'].keys()):
+ key = _escape_tag(field_key)
+ value = _escape_value(point['fields'][field_key])
+ if key != '' and value != '':
+ field_values.append("{key}={value}".format(
+ key=key,
+ value=value
+ ))
+ field_values = ','.join(field_values)
+ elements.append(field_values)
+
+ # add timestamp
+ if 'time' in point:
+ elements.append(point['time'])
+
+ line = ' '.join(elements)
+ lines.append(line)
+ lines = '\n'.join(lines)
+ return lines + '\n'
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 253fd2e3d..339f834b7 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -63,6 +63,7 @@ import socket
import time
import paramiko
+from scp import SCPClient
import six
import logging
@@ -254,3 +255,9 @@ class SSH(object):
time.sleep(interval)
if time.time() > (start_time + timeout):
raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
diff --git a/yardstick/vTC/__init__.py b/yardstick/vTC/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/vTC/__init__.py
diff --git a/yardstick/vTC/apexlake/experimental_framework/api.py b/yardstick/vTC/apexlake/experimental_framework/api.py
index 635dcd2cf..1851f1b09 100644
--- a/yardstick/vTC/apexlake/experimental_framework/api.py
+++ b/yardstick/vTC/apexlake/experimental_framework/api.py
@@ -27,18 +27,18 @@ class FrameworkApi(object):
"""
common.init(api=True)
- @staticmethod
- def get_available_test_cases():
- """
- Returns a list of available test cases.
- This list include eventual modules developed by the user, if any.
- Each test case is returned as a string that represents the full name
- of the test case and that can be used to get more information
- calling get_test_case_features(test_case_name)
-
- :return: list of strings
- """
- return b_unit.BenchmarkingUnit.get_available_test_cases()
+ # @staticmethod
+ # def get_available_test_cases():
+ # """
+ # Returns a list of available test cases.
+ # This list include eventual modules developed by the user, if any.
+ # Each test case is returned as a string that represents the full name
+ # of the test case and that can be used to get more information
+ # calling get_test_case_features(test_case_name)
+ #
+ # :return: list of strings
+ # """
+ # return b_unit.BenchmarkingUnit.get_available_test_cases()
@staticmethod
def get_test_case_features(test_case):
diff --git a/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py b/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
index 1a19826ca..1963696f8 100644
--- a/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
+++ b/yardstick/vTC/apexlake/experimental_framework/benchmarking_unit.py
@@ -188,7 +188,7 @@ class BenchmarkingUnit:
for key in benchmark.get_params():
experiment[key] = benchmark.get_params()[key]
common.LOG.info('Benchmark Finished')
- self.data_manager.generate_result_csv_file()
+ # self.data_manager.generate_result_csv_file()
common.LOG.info('Benchmarking Unit: Experiments completed!')
return result
diff --git a/yardstick/vTC/apexlake/experimental_framework/common.py b/yardstick/vTC/apexlake/experimental_framework/common.py
index 6b5f932ce..afe70241a 100644
--- a/yardstick/vTC/apexlake/experimental_framework/common.py
+++ b/yardstick/vTC/apexlake/experimental_framework/common.py
@@ -76,10 +76,10 @@ def init_conf_file(api=False):
global CONF_FILE
if api:
CONF_FILE = ConfigurationFile(cf.get_sections_api(),
- '/etc/apexlake/apexlake.conf')
+ '/tmp/apexlake/apexlake.conf')
else:
CONF_FILE = ConfigurationFile(cf.get_sections(),
- '/etc/apexlake/apexlake.conf')
+ '/tmp/apexlake/apexlake.conf')
def init_general_vars(api=False):
@@ -99,10 +99,10 @@ def init_general_vars(api=False):
"is not present in configuration file")
TEMPLATE_DIR = '/tmp/apexlake/heat_templates/'
- if not os.path.exists(TEMPLATE_DIR):
- os.makedirs(TEMPLATE_DIR)
- cmd = "cp /etc/apexlake/heat_templates/*.yaml {}".format(TEMPLATE_DIR)
- run_command(cmd)
+ # if not os.path.exists(TEMPLATE_DIR):
+ # os.makedirs(TEMPLATE_DIR)
+ # cmd = "cp /tmp/apexlake/heat_templates/*.yaml {}".format(TEMPLATE_DIR)
+ # run_command(cmd)
if not api:
# Validate template name
diff --git a/yardstick/vTC/apexlake/setup.py b/yardstick/vTC/apexlake/setup.py
index e33b5bfc8..8ab3f4845 100644
--- a/yardstick/vTC/apexlake/setup.py
+++ b/yardstick/vTC/apexlake/setup.py
@@ -30,9 +30,9 @@ setup(name='apexlake',
]
},
data_files=[
- ('/etc/apexlake/', ['apexlake.conf']),
- ('/etc/apexlake/heat_templates/',
+ ('/tmp/apexlake/', ['apexlake.conf']),
+ ('/tmp/apexlake/heat_templates/',
['heat_templates/vTC.yaml']),
- ('/etc/apexlake/heat_templates/',
+ ('/tmp/apexlake/heat_templates/',
['heat_templates/stress_workload.yaml'])
])
diff --git a/yardstick/vTC/apexlake/tests/api_test.py b/yardstick/vTC/apexlake/tests/api_test.py
index e3d5a8b2c..4b70b9bd6 100644
--- a/yardstick/vTC/apexlake/tests/api_test.py
+++ b/yardstick/vTC/apexlake/tests/api_test.py
@@ -80,35 +80,34 @@ class TestGeneratesTemplate(unittest.TestCase):
# output = FrameworkApi.get_available_test_cases()
# self.assertEqual(expected, output)
- # @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
- # 'get_required_benchmarks',
- # side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
- # def test_get_test_case_features_for_success(self, mock_get_req_bench):
- #
- # expected = dict()
- # expected['description'] = 'Instantiation Validation Benchmark'
- # expected['parameters'] = [
- # iv.THROUGHPUT,
- # iv.VLAN_SENDER,
- # iv.VLAN_RECEIVER]
- # expected['allowed_values'] = dict()
- # expected['allowed_values'][iv.THROUGHPUT] = \
- # map(str, range(0, 100))
- # expected['allowed_values'][iv.VLAN_SENDER] = \
- # map(str, range(-1, 4096))
- # expected['allowed_values'][iv.VLAN_RECEIVER] = \
- # map(str, range(-1, 4096))
- # expected['default_values'] = dict()
- # expected['default_values'][iv.THROUGHPUT] = '1'
- # expected['default_values'][iv.VLAN_SENDER] = '-1'
- # expected['default_values'][iv.VLAN_RECEIVER] = '-1'
- #
- # test_case = 'instantiation_validation_benchmark.' \
- # 'InstantiationValidationBenchmark'
- # output = FrameworkApi.get_test_case_features(test_case)
- # self.assertEqual(expected, output)
-
- def test____for_failure(self):
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+ 'get_required_benchmarks',
+ side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
+ def test_get_test_case_features_for_success(self, mock_get_req_bench):
+ expected = dict()
+ expected['description'] = 'Instantiation Validation Benchmark'
+ expected['parameters'] = [
+ iv.THROUGHPUT,
+ iv.VLAN_SENDER,
+ iv.VLAN_RECEIVER]
+ expected['allowed_values'] = dict()
+ expected['allowed_values'][iv.THROUGHPUT] = \
+ map(str, range(0, 100))
+ expected['allowed_values'][iv.VLAN_SENDER] = \
+ map(str, range(-1, 4096))
+ expected['allowed_values'][iv.VLAN_RECEIVER] = \
+ map(str, range(-1, 4096))
+ expected['default_values'] = dict()
+ expected['default_values'][iv.THROUGHPUT] = '1'
+ expected['default_values'][iv.VLAN_SENDER] = '-1'
+ expected['default_values'][iv.VLAN_RECEIVER] = '-1'
+
+ test_case = 'instantiation_validation_benchmark.' \
+ 'InstantiationValidationBenchmark'
+ output = FrameworkApi.get_test_case_features(test_case)
+ self.assertEqual(expected, output)
+
+ def test__get_test_case_features__for_failure(self):
self.assertRaises(
ValueError, FrameworkApi.get_test_case_features, 111)
diff --git a/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py b/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
index b0f800a4d..ccf64066a 100644
--- a/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
+++ b/yardstick/vTC/apexlake/tests/benchmarking_unit_test.py
@@ -18,59 +18,59 @@ __author__ = 'vmriccox'
import unittest
import mock
from experimental_framework.benchmarking_unit import BenchmarkingUnit
-from experimental_framework.data_manager import DataManager
+# from experimental_framework.data_manager import DataManager
from experimental_framework.deployment_unit import DeploymentUnit
import experimental_framework.common as common
from experimental_framework.benchmarks.rfc2544_throughput_benchmark import \
RFC2544ThroughputBenchmark
-class DummyDataManager(DataManager):
-
- def __init__(self, experiment_directory):
- self.experiment_directory = experiment_directory
- self.experiments = dict()
- self.new_exp_counter = 0
- self.add_bench_counter = 0
- self.close_experiment_1_counter = 0
- self.close_experiment_2_counter = 0
- self.generate_csv_counter = 0
-
- def create_new_experiment(self, experiment_name, get_counter=None):
- if not get_counter:
- self.new_exp_counter += 1
- else:
- return self.new_exp_counter
-
- def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
- if not get_counter:
- self.add_bench_counter += 1
- else:
- return self.add_bench_counter
-
- def close_experiment(self, experiment, get_counter=None):
- if get_counter:
- return [self.close_experiment_1_counter,
- self.close_experiment_2_counter]
- if experiment == 'VTC_base_single_vm_wait_1':
- self.close_experiment_1_counter += 1
- if experiment == 'VTC_base_single_vm_wait_2':
- self.close_experiment_2_counter += 1
-
- def generate_result_csv_file(self, get_counter=None):
- if get_counter:
- return self.generate_csv_counter
- else:
- self.generate_csv_counter += 1
-
- def add_metadata(self, experiment_name, metadata):
- pass
-
- def add_configuration(self, experiment_name, configuration):
- pass
-
- def add_data_points(self, experiment_name, benchmark_name, result):
- pass
+# class DummyDataManager(DataManager):
+#
+# def __init__(self, experiment_directory):
+# self.experiment_directory = experiment_directory
+# self.experiments = dict()
+# self.new_exp_counter = 0
+# self.add_bench_counter = 0
+# self.close_experiment_1_counter = 0
+# self.close_experiment_2_counter = 0
+# self.generate_csv_counter = 0
+#
+# def create_new_experiment(self, experiment_name, get_counter=None):
+# if not get_counter:
+# self.new_exp_counter += 1
+# else:
+# return self.new_exp_counter
+#
+# def add_benchmark(self, experiment_name, benchmark_name, get_counter=None):
+# if not get_counter:
+# self.add_bench_counter += 1
+# else:
+# return self.add_bench_counter
+#
+# def close_experiment(self, experiment, get_counter=None):
+# if get_counter:
+# return [self.close_experiment_1_counter,
+# self.close_experiment_2_counter]
+# if experiment == 'VTC_base_single_vm_wait_1':
+# self.close_experiment_1_counter += 1
+# if experiment == 'VTC_base_single_vm_wait_2':
+# self.close_experiment_2_counter += 1
+#
+# def generate_result_csv_file(self, get_counter=None):
+# if get_counter:
+# return self.generate_csv_counter
+# else:
+# self.generate_csv_counter += 1
+#
+# def add_metadata(self, experiment_name, metadata):
+# pass
+#
+# def add_configuration(self, experiment_name, configuration):
+# pass
+#
+# def add_data_points(self, experiment_name, benchmark_name, result):
+# pass
class Dummy_2544(RFC2544ThroughputBenchmark):
@@ -122,12 +122,13 @@ class TestBenchmarkingUnit(unittest.TestCase):
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.heat.'
'get_all_heat_templates')
- def test___init__(self, mock_heat, mock_dep_unit, mock_data_manager,
+ def test___init__(self, mock_heat, mock_dep_unit,
+ # mock_data_manager,
mock_temp_dir, mock_time):
mock_heat.return_value = list()
mock_time.return_value = '12345'
@@ -152,7 +153,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
benchmarks)
self.assertEqual(bu.required_benchmarks, benchmarks)
bu.heat_template_parameters = heat_template_parameters
- mock_data_manager.assert_called_once_with('tests/data/results/12345')
+ # mock_data_manager.assert_called_once_with('tests/data/results/12345')
mock_dep_unit.assert_called_once_with(openstack_credentials)
mock_heat.assert_called_once_with('tests/data/results/', '.ext')
@@ -160,13 +161,14 @@ class TestBenchmarkingUnit(unittest.TestCase):
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_initialize_for_success(self, mock_heat, mock_dep_unit,
- mock_data_manager, mock_temp_dir,
+ # mock_data_manager,
+ mock_temp_dir,
mock_time, mock_rfc2544):
mock_heat.return_value = list()
mock_time.return_value = '12345'
@@ -204,21 +206,22 @@ class TestBenchmarkingUnit(unittest.TestCase):
self.assertTrue(len(bu.benchmarks) == 1)
self.assertEqual(bu.benchmarks[0].__class__,
Dummy_2544)
- self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
- self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
+ # self.assertEqual(bu.data_manager.create_new_experiment('', True), 2)
+ # self.assertEqual(bu.data_manager.add_benchmark('', '', True), 2)
@mock.patch('experimental_framework.benchmarks.'
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_finalize_for_success(
- self, mock_heat, mock_dep_unit, mock_data_manager, mock_temp_dir,
- mock_time, mock_rfc2544):
+ self, mock_heat, mock_dep_unit,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544):
mock_heat.return_value = list()
mock_time.return_value = '12345'
mock_temp_dir.return_value = 'tests/data/test_templates/'
@@ -252,7 +255,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
'VTC_base_single_vm_wait_2.yaml']
bu.finalize()
# self.assertEqual(bu.data_manager.close_experiment('', True), [1, 1])
- self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
+ # self.assertEqual(bu.data_manager.generate_result_csv_file(True), 1)
@mock.patch('experimental_framework.common.push_data_influxdb')
@mock.patch('experimental_framework.common.LOG')
@@ -260,14 +263,15 @@ class TestBenchmarkingUnit(unittest.TestCase):
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_run_benchmarks_for_success(self, mock_heat, mock_common_dep_unit,
- mock_dep_unit, mock_data_manager,
+ mock_dep_unit,
+ # mock_data_manager,
mock_temp_dir, mock_time,
mock_rfc2544, mock_log, mock_influx):
mock_heat.return_value = list()
@@ -301,7 +305,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
heat_template_parameters,
iterations,
benchmarks)
- bu.data_manager = DummyDataManager('tests/data/results/12345')
+ # bu.data_manager = DummyDataManager('tests/data/results/12345')
bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
'VTC_base_single_vm_wait_2.yaml']
bu.benchmarks = [Dummy_2544('dummy', {'param1': 'val1'})]
@@ -320,15 +324,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_run_benchmarks_2_for_success(
self, mock_heat, mock_common_dep_unit, mock_dep_unit,
- mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
mock_log):
mock_heat.return_value = list()
mock_time.return_value = '12345'
@@ -358,7 +363,7 @@ class TestBenchmarkingUnit(unittest.TestCase):
heat_template_parameters,
iterations,
benchmarks)
- bu.data_manager = DummyDataManager('tests/data/results/12345')
+ # bu.data_manager = DummyDataManager('tests/data/results/12345')
bu.template_files = ['VTC_base_single_vm_wait_1.yaml',
'VTC_base_single_vm_wait_2.yaml']
bu.benchmarks = [Dummy_2544('dummy', dict())]
@@ -373,15 +378,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_get_benchmark_name_for_success(
self, mock_heat, mock_common_dep_unit, mock_dep_unit,
- mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
mock_log):
mock_heat.return_value = list()
mock_time.return_value = '12345'
@@ -427,15 +433,16 @@ class TestBenchmarkingUnit(unittest.TestCase):
'rfc2544_throughput_benchmark', side_effect=Dummy_2544)
@mock.patch('time.time')
@mock.patch('experimental_framework.common.get_template_dir')
- @mock.patch('experimental_framework.data_manager.DataManager',
- side_effect=DummyDataManager)
+ # @mock.patch('experimental_framework.data_manager.DataManager',
+ # side_effect=DummyDataManager)
@mock.patch('experimental_framework.common.DEPLOYMENT_UNIT')
@mock.patch('experimental_framework.deployment_unit.DeploymentUnit')
@mock.patch('experimental_framework.benchmarking_unit.'
'heat.get_all_heat_templates')
def test_get_required_benchmarks_for_success(
self, mock_heat, mock_common_dep_unit, mock_dep_unit,
- mock_data_manager, mock_temp_dir, mock_time, mock_rfc2544,
+ # mock_data_manager,
+ mock_temp_dir, mock_time, mock_rfc2544,
mock_log):
mock_heat.return_value = list()
mock_time.return_value = '12345'
diff --git a/yardstick/vTC/apexlake/tests/common_test.py b/yardstick/vTC/apexlake/tests/common_test.py
index 2ce6f7717..293754b16 100644
--- a/yardstick/vTC/apexlake/tests/common_test.py
+++ b/yardstick/vTC/apexlake/tests/common_test.py
@@ -137,7 +137,7 @@ class TestCommonInit(unittest.TestCase):
self.assertEqual(common.TEMPLATE_NAME, 'vTC.yaml')
self.assertEqual(common.RESULT_DIR, '/tmp/apexlake/results/')
self.assertEqual(common.ITERATIONS, 1)
- mock_makedirs.assert_called_once_with('/tmp/apexlake/heat_templates/')
+ # mock_makedirs.assert_called_once_with('/tmp/apexlake/heat_templates/')
class TestCommonInit2(unittest.TestCase):
diff --git a/yardstick/vTC/apexlake/tests/experiment_test.py b/yardstick/vTC/apexlake/tests/experiment_test.py
deleted file mode 100644
index 47d1fbb77..000000000
--- a/yardstick/vTC/apexlake/tests/experiment_test.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = 'gpetralx'
-
-import unittest
-from experimental_framework import data_manager
-
-
-class TestExperiment(unittest.TestCase):
- def setUp(self):
- self.exp = data_manager.Experiment('experiment_1')
-
- def tearDown(self):
- pass
-
- def test_add_experiment_metadata(self):
- with self.assertRaises(ValueError):
- self.exp.add_experiment_metadata('metadata')
-
- metadata = {
- 'item_1': 'value_1',
- 'item_2': 'value_2',
- 'item_3': 'value_3'
- }
- self.exp.add_experiment_metadata(metadata)
- self.assertDictEqual(metadata, self.exp._metadata)
- self.assertDictEqual(metadata, self.exp.get_metadata())
-
- def test_experiment_configuration(self):
- with self.assertRaises(ValueError):
- self.exp.add_experiment_configuration('configuration')
- configuration = {
- 'item_1': 'value_1',
- 'item_2': 'value_2',
- 'item_3': 'value_3'
- }
- self.exp.add_experiment_configuration(configuration)
- self.assertDictEqual(configuration, self.exp._configuration)
- self.assertDictEqual(configuration, self.exp.get_configuration())
-
- def test_add_benchmark(self):
- with self.assertRaises(ValueError):
- self.exp.add_benchmark(1)
- self.exp.add_benchmark('benchmark_1')
- self.assertListEqual(list(), self.exp._benchmarks['benchmark_1'])
-
- def test_add_datapoint(self):
- with self.assertRaises(ValueError):
- self.exp.add_data_point('benchmark_1', 'datapoint')
-
- data_point_1 = {
- 'point_1': 'value_1',
- 'point_2': 'value_2',
- 'point_3': 'value_3'
- }
-
- with self.assertRaises(ValueError):
- self.exp.add_data_point('benchmark_1', data_point_1)
-
- self.exp.add_benchmark('benchmark_1')
- self.exp.add_data_point('benchmark_1', data_point_1)
- self.assertListEqual([data_point_1],
- self.exp._benchmarks['benchmark_1'])
-
- def test_get_data_points(self):
- self.assertListEqual(list(), self.exp.get_data_points('benchmark_1'))
- data_point_1 = {
- 'point_1': 'value_1',
- 'point_2': 'value_2',
- 'point_3': 'value_3'
- }
- self.exp.add_benchmark('benchmark_1')
- self.exp.add_data_point('benchmark_1', data_point_1)
- self.assertListEqual([data_point_1],
- self.exp.get_data_points('benchmark_1'))
-
- def test_get_benchmarks(self):
- self.exp.add_benchmark('benchmark_1')
- self.exp.add_benchmark('benchmark_2')
- self.exp.add_benchmark('benchmark_3')
- expected = ['benchmark_3', 'benchmark_2', 'benchmark_1']
- self.assertListEqual(expected, self.exp.get_benchmarks())
diff --git a/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py b/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
index cdcce37e3..463035743 100644
--- a/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
+++ b/yardstick/vTC/apexlake/tests/instantiation_validation_noisy_bench_test.py
@@ -78,14 +78,14 @@ class InstantiationValidationInitTest(unittest.TestCase):
expected['allowed_values'][mut.NUMBER_OF_CORES] = \
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
expected['allowed_values'][mut.AMOUNT_OF_RAM] = \
- ['250M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
+ ['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
'10G']
expected['default_values']['throughput'] = '1'
expected['default_values']['vlan_sender'] = '-1'
expected['default_values']['vlan_receiver'] = '-1'
expected['default_values'][mut.NUM_OF_NEIGHBORS] = '1'
expected['default_values'][mut.NUMBER_OF_CORES] = '1'
- expected['default_values'][mut.AMOUNT_OF_RAM] = '250M'
+ expected['default_values'][mut.AMOUNT_OF_RAM] = '256M'
output = self.iv.get_features()
self.assertEqual(expected['description'], output['description'])