summaryrefslogtreecommitdiffstats
path: root/yardstick/vTC
diff options
context:
space:
mode:
authorVincenzo Riccobene <vincenzox.m.riccobene@intel.com>2015-12-18 16:03:29 +0000
committerJörgen Karlsson <jorgen.w.karlsson@ericsson.com>2015-12-23 14:14:00 +0000
commitfa490948dd370b69e821784e12f67299c7fb0f26 (patch)
tree94a180c30485bb88e71f2e9d02ff150374ebbe1a /yardstick/vTC
parent52d1b3111f37a6b8bc2b6a18e3443dc29fcb943b (diff)
Add API to configure and execute ApexLake
Add python module providing API for Yardstick to configure and execute ApexLake and the test cases for the virtual Traffic Classifier. Includes documentation. JIRA: YARDSTICK-215 Change-Id: I87af59c715d789ac78c836123e9ed3d8e0036eec Signed-off-by: Vincenzo Riccobene <vincenzo.m.riccobene@intel.com> Signed-off-by: Vincenzo Riccobene <vincenzox.m.riccobene@intel.com>
Diffstat (limited to 'yardstick/vTC')
-rw-r--r--yardstick/vTC/apexlake/docs/source/api.rst5
-rw-r--r--yardstick/vTC/apexlake/experimental_framework/api.py137
-rw-r--r--yardstick/vTC/apexlake/tests/api_test.py142
3 files changed, 284 insertions, 0 deletions
diff --git a/yardstick/vTC/apexlake/docs/source/api.rst b/yardstick/vTC/apexlake/docs/source/api.rst
new file mode 100644
index 000000000..38085900b
--- /dev/null
+++ b/yardstick/vTC/apexlake/docs/source/api.rst
@@ -0,0 +1,5 @@
+.. automodule:: experimental_framework.api
+ :members:
+ :undoc-members:
+ :inherited-members:
+ :show-inheritance:
diff --git a/yardstick/vTC/apexlake/experimental_framework/api.py b/yardstick/vTC/apexlake/experimental_framework/api.py
new file mode 100644
index 000000000..b9e806157
--- /dev/null
+++ b/yardstick/vTC/apexlake/experimental_framework/api.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import experimental_framework.benchmarking_unit as b_unit
+from experimental_framework import heat_template_generation, common
+
+
+class FrameworkApi(object):
+
+ @staticmethod
+ def init():
+ """
+ Initializes the Framework
+
+ :return: None
+ """
+ common.init(api=True)
+
+ @staticmethod
+ def get_available_test_cases():
+ """
+ Returns a list of available test cases.
+ This list include eventual modules developed by the user, if any.
+ Each test case is returned as a string that represents the full name
+ of the test case and that can be used to get more information
+ calling get_test_case_features(test_case_name)
+
+ :return: list of strings
+ """
+ return b_unit.BenchmarkingUnit.get_available_test_cases()
+
+ @staticmethod
+ def get_test_case_features(test_case):
+ """
+ Returns a list of features (description, requested parameters,
+ allowed values, etc.) for a specified test case.
+
+ :param test_case: name of the test case (string)
+ The string represents the test case and can be
+ obtained calling "get_available_test_cases()"
+ method.
+
+ :return: dict() containing the features of the test case
+ """
+ if not isinstance(test_case, str):
+ raise ValueError('The provided test_case parameter has to be '
+ 'a string')
+ benchmark = b_unit.BenchmarkingUnit.get_required_benchmarks(
+ [test_case])[0]
+ return benchmark.get_features()
+
+ @staticmethod
+ def execute_framework(
+ test_cases,
+ iterations,
+ heat_template,
+ heat_template_parameters,
+ deployment_configuration,
+ openstack_credentials
+ ):
+ """
+ Executes the framework according the inputs
+
+ :param test_cases: Test cases to be ran on the workload
+ (dict() of dict())
+ Each string represents a test case and it is one
+ of the strings provided by the
+ "get_available_test_cases()" function output.
+
+ :param iterations: Number of cycles to be executed (int)
+
+ :param heat_template: (string) File name of the heat template of the
+ workload to be deployed. It contains the
+ parameters to be evaluated in the form of
+ #parameter_name. (See heat_templates/vTC.yaml as
+ example).
+
+ :param heat_template_parameters: (dict) Parameters to be provided
+ as input to the heat template.
+ See http://docs.openstack.org/developer/heat/
+ template_guide/hot_guide.html - section
+ "Template input parameters" for further info.
+
+ :param deployment_configuration: ( dict[string] = list(strings) ) )
+ Dictionary of parameters representing the
+ deployment configuration of the workload
+ The key is a string corresponding to the name of
+ the parameter, the value is a list of strings
+ representing the value to be assumed by a specific
+ param.
+ The parameters are user defined: they have to
+ correspond to the place holders (#parameter_name)
+ specified in the heat template.
+
+ :return: None
+ """
+
+ # Input Validation
+ common.InputValidation.validate_os_credentials(openstack_credentials)
+ credentials = openstack_credentials
+ msg = 'The provided heat_template does not exist'
+ template = "{}{}".format(common.get_template_dir(), heat_template)
+ common.InputValidation.validate_file_exist(template, msg)
+ msg = 'The provided iterations variable must be an integer value'
+ common.InputValidation.validate_integer(iterations, msg)
+ msg = 'The provided heat_template_parameters variable must be a ' \
+ 'dictionary'
+ common.InputValidation.validate_dictionary(heat_template_parameters,
+ msg)
+ log_msg = "Generation of all the heat templates " \
+ "required by the experiment"
+ common.LOG.info(log_msg)
+ heat_template_generation.generates_templates(heat_template,
+ deployment_configuration)
+ benchmarking_unit = \
+ b_unit.BenchmarkingUnit(
+ heat_template, credentials, heat_template_parameters,
+ iterations, test_cases)
+ try:
+ common.LOG.info("Benchmarking Unit initialization")
+ benchmarking_unit.initialize()
+ common.LOG.info("Benchmarking Unit Running")
+ benchmarking_unit.run_benchmarks()
+ finally:
+ common.LOG.info("Benchmarking Unit Finalization")
+ benchmarking_unit.finalize()
diff --git a/yardstick/vTC/apexlake/tests/api_test.py b/yardstick/vTC/apexlake/tests/api_test.py
new file mode 100644
index 000000000..51762801b
--- /dev/null
+++ b/yardstick/vTC/apexlake/tests/api_test.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import unittest
+import mock
+import os
+import experimental_framework.common as common
+from experimental_framework.api import FrameworkApi
+from experimental_framework.benchmarking_unit import BenchmarkingUnit
+import experimental_framework.benchmarks.\
+ instantiation_validation_benchmark as iv
+
+
+class DummyBenchmarkingUnit(BenchmarkingUnit):
+
+ def __init__(self):
+ BenchmarkingUnit.__init__(self)
+
+ @staticmethod
+ def get_available_test_cases():
+ return ['BenchA', 'BenchB']
+
+ @staticmethod
+ def get_required_benchmarks(required_benchmarks):
+ common.BASE_DIR = "base_dir/"
+ return [iv.InstantiationValidationBenchmark('benchmark', dict())]
+
+
+class DummyBenchmarkingUnit2(BenchmarkingUnit):
+
+ counter_init = 0
+ counter_finalize = 0
+ counter_run = 0
+
+ def __init__(self, base_heat_template, credentials,
+ heat_template_parameters, iterations, test_cases):
+ DummyBenchmarkingUnit.counter_init = 0
+ DummyBenchmarkingUnit.counter_finalize = 0
+ DummyBenchmarkingUnit.counter_run = 0
+
+ def initialize(self):
+ DummyBenchmarkingUnit2.counter_init += 1
+
+ def run_benchmarks(self):
+ DummyBenchmarkingUnit2.counter_run += 1
+
+ def finalize(self):
+ DummyBenchmarkingUnit2.counter_finalize += 1
+
+
+class TestGeneratesTemplate(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('experimental_framework.common.init')
+ def test_init_for_success(self, mock_init):
+ FrameworkApi.init()
+ mock_init.assert_called_once_with(api=True)
+
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+ 'get_available_test_cases',
+ side_effect=DummyBenchmarkingUnit.get_available_test_cases)
+ def test_get_available_test_cases_for_success(self, mock_bench):
+ expected = ['BenchA', 'BenchB']
+ output = FrameworkApi.get_available_test_cases()
+ self.assertEqual(expected, output)
+
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit.'
+ 'get_required_benchmarks',
+ side_effect=DummyBenchmarkingUnit.get_required_benchmarks)
+ def test_get_test_case_features_for_success(self, mock_get_req_bench):
+
+ expected = dict()
+ expected['description'] = 'Instantiation Validation Benchmark'
+ expected['parameters'] = [
+ iv.THROUGHPUT,
+ iv.VLAN_SENDER,
+ iv.VLAN_RECEIVER]
+ expected['allowed_values'] = dict()
+ expected['allowed_values'][iv.THROUGHPUT] = \
+ map(str, range(0, 100))
+ expected['allowed_values'][iv.VLAN_SENDER] = \
+ map(str, range(-1, 4096))
+ expected['allowed_values'][iv.VLAN_RECEIVER] = \
+ map(str, range(-1, 4096))
+ expected['default_values'] = dict()
+ expected['default_values'][iv.THROUGHPUT] = '1'
+ expected['default_values'][iv.VLAN_SENDER] = '-1'
+ expected['default_values'][iv.VLAN_RECEIVER] = '-1'
+
+ test_case = 'instantiation_validation_benchmark.' \
+ 'InstantiationValidationBenchmark'
+ output = FrameworkApi.get_test_case_features(test_case)
+ self.assertEqual(expected, output)
+
+ def test____for_failure(self):
+ self.assertRaises(
+ ValueError, FrameworkApi.get_test_case_features, 111)
+
+ @mock.patch('experimental_framework.common.LOG')
+ @mock.patch('experimental_framework.common.get_credentials')
+ @mock.patch('experimental_framework.heat_template_generation.'
+ 'generates_templates')
+ @mock.patch('experimental_framework.benchmarking_unit.BenchmarkingUnit',
+ side_effect=DummyBenchmarkingUnit2)
+ def test_execute_framework_for_success(self, mock_b_unit, mock_heat,
+ mock_credentials, mock_log):
+ common.TEMPLATE_DIR = "{}/{}/".format(
+ os.getcwd(), 'tests/data/generated_templates'
+ )
+
+ test_cases = dict()
+ iterations = 1
+ heat_template = 'VTC_base_single_vm_wait.tmp'
+ heat_template_parameters = dict()
+ deployment_configuration = ''
+ openstack_credentials = dict()
+ openstack_credentials['ip_controller'] = ''
+ openstack_credentials['heat_url'] = ''
+ openstack_credentials['user'] = ''
+ openstack_credentials['password'] = ''
+ openstack_credentials['auth_uri'] = ''
+ openstack_credentials['project'] = ''
+ FrameworkApi.execute_framework(
+ test_cases, iterations, heat_template,
+ heat_template_parameters, deployment_configuration,
+ openstack_credentials)