aboutsummaryrefslogtreecommitdiffstats
path: root/tests/unit/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unit/benchmark')
-rw-r--r--tests/unit/benchmark/contexts/test_dummy.py1
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py33
-rw-r--r--tests/unit/benchmark/contexts/test_model.py5
-rw-r--r--tests/unit/benchmark/contexts/test_node.py2
-rw-r--r--tests/unit/benchmark/core/__init__.py0
-rw-r--r--tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml13
-rw-r--r--tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml15
-rw-r--r--tests/unit/benchmark/core/test_plugin.py102
-rw-r--r--tests/unit/benchmark/core/test_task.py168
-rw-r--r--tests/unit/benchmark/core/test_testcase.py45
-rw-r--r--tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml16
-rw-r--r--tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml18
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py57
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_general.py10
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_process.py8
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_basemonitor.py18
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_baseoperation.py19
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py17
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_director.py39
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py28
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_general.py17
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_process.py9
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_operation_general.py11
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_result_checker_general.py30
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_scenario_general.py11
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_serviceha.py8
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cachestat.py19
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_computecapacity.py13
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cpuload.py3
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_cyclictest.py21
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_lmbench.py21
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_memload.py18
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_plugintest.py10
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_ramspeed.py27
-rw-r--r--tests/unit/benchmark/scenarios/compute/test_unixbench.py16
-rw-r--r--tests/unit/benchmark/scenarios/dummy/test_dummy.py1
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_iperf3.py23
-rwxr-xr-xtests/unit/benchmark/scenarios/networking/test_netperf.py13
-rwxr-xr-xtests/unit/benchmark/scenarios/networking/test_netperf_node.py13
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_netutilization.py4
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_networkcapacity.py27
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping.py5
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_ping6.py52
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen.py11
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py13
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_sfc.py6
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vsperf.py26
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py8
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py9
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py1
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py1
-rw-r--r--tests/unit/benchmark/scenarios/parser/test_parser.py14
-rw-r--r--tests/unit/benchmark/scenarios/storage/test_fio.py22
-rw-r--r--tests/unit/benchmark/scenarios/storage/test_storagecapacity.py44
-rw-r--r--tests/unit/benchmark/scenarios/storage/test_storperf.py48
55 files changed, 886 insertions, 303 deletions
diff --git a/tests/unit/benchmark/contexts/test_dummy.py b/tests/unit/benchmark/contexts/test_dummy.py
index 5214e6630..1a54035df 100644
--- a/tests/unit/benchmark/contexts/test_dummy.py
+++ b/tests/unit/benchmark/contexts/test_dummy.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.contexts.dummy
+from __future__ import absolute_import
import unittest
from yardstick.benchmark.contexts import dummy
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index f891b0a5f..f8f349205 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -11,13 +11,21 @@
# Unittest for yardstick.benchmark.contexts.heat
-import mock
+from __future__ import absolute_import
+
+import logging
+import os
import unittest
+import uuid
+
+import mock
-from yardstick.benchmark.contexts import model
from yardstick.benchmark.contexts import heat
+LOG = logging.getLogger(__name__)
+
+
class HeatContextTestCase(unittest.TestCase):
def setUp(self):
@@ -39,6 +47,8 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(self.test_context._user)
self.assertIsNone(self.test_context.template_file)
self.assertIsNone(self.test_context.heat_parameters)
+ self.assertIsNotNone(self.test_context.key_uuid)
+ self.assertIsNotNone(self.test_context.key_filename)
@mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup')
@mock.patch('yardstick.benchmark.contexts.heat.Network')
@@ -55,6 +65,7 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.init(attrs)
+ self.assertEqual(self.test_context.name, "foo")
self.assertEqual(self.test_context.keypair_name, "foo-key")
self.assertEqual(self.test_context.secgroup_name, "foo-secgroup")
@@ -66,17 +77,29 @@ class HeatContextTestCase(unittest.TestCase):
'bar', self.test_context, networks['bar'])
self.assertTrue(len(self.test_context.networks) == 1)
- mock_server.assert_called_with('baz', self.test_context, servers['baz'])
+ mock_server.assert_called_with('baz', self.test_context,
+ servers['baz'])
self.assertTrue(len(self.test_context.servers) == 1)
+ if os.path.exists(self.test_context.key_filename):
+ try:
+ os.remove(self.test_context.key_filename)
+ os.remove(self.test_context.key_filename + ".pub")
+ except OSError:
+ LOG.exception("key_filename: %s",
+ self.test_context.key_filename)
+
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test__add_resources_to_template_no_servers(self, mock_template):
self.test_context.keypair_name = "foo-key"
self.test_context.secgroup_name = "foo-secgroup"
+ self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
self.test_context._add_resources_to_template(mock_template)
- mock_template.add_keypair.assert_called_with("foo-key")
+ mock_template.add_keypair.assert_called_with(
+ "foo-key",
+ "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b")
mock_template.add_security_group.assert_called_with("foo-secgroup")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
@@ -105,6 +128,8 @@ class HeatContextTestCase(unittest.TestCase):
self.mock_context.name = 'bar'
self.mock_context.stack.outputs = {'public_ip': '127.0.0.1',
'private_ip': '10.0.0.1'}
+ self.mock_context.key_uuid = uuid.uuid4()
+
attr_name = {'name': 'foo.bar',
'public_ip_attr': 'public_ip',
'private_ip_attr': 'private_ip'}
diff --git a/tests/unit/benchmark/contexts/test_model.py b/tests/unit/benchmark/contexts/test_model.py
index a1978e320..537a8c008 100644
--- a/tests/unit/benchmark/contexts/test_model.py
+++ b/tests/unit/benchmark/contexts/test_model.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.contexts.model
+from __future__ import absolute_import
import mock
import unittest
@@ -119,7 +120,8 @@ class NetworkTestCase(unittest.TestCase):
attrs = {'external_network': 'ext_net'}
test_network = model.Network('foo', self.mock_context, attrs)
- exp_router = model.Router('router', 'foo', self.mock_context, 'ext_net')
+ exp_router = model.Router('router', 'foo', self.mock_context,
+ 'ext_net')
self.assertEqual(test_network.router.stack_name, exp_router.stack_name)
self.assertEqual(test_network.router.stack_if_name,
@@ -219,4 +221,3 @@ class ServerTestCase(unittest.TestCase):
user=self.mock_context.user,
key_name=self.mock_context.keypair_name,
scheduler_hints='hints')
-
diff --git a/tests/unit/benchmark/contexts/test_node.py b/tests/unit/benchmark/contexts/test_node.py
index 6939b8551..de5ba7066 100644
--- a/tests/unit/benchmark/contexts/test_node.py
+++ b/tests/unit/benchmark/contexts/test_node.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.contexts.node
+from __future__ import absolute_import
import os
import unittest
@@ -21,6 +22,7 @@ class NodeContextTestCase(unittest.TestCase):
NODES_SAMPLE = "nodes_sample.yaml"
NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample.yaml"
+
def setUp(self):
self.test_context = node.NodeContext()
diff --git a/tests/unit/benchmark/core/__init__.py b/tests/unit/benchmark/core/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/tests/unit/benchmark/core/__init__.py
diff --git a/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
new file mode 100644
index 000000000..4933b93ae
--- /dev/null
+++ b/tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml
@@ -0,0 +1,13 @@
+---
+# Huawei US bare daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-nosdn-nofeature-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc037.yaml
+-
+ file_name: opnfv_yardstick_tc043.yaml
+
diff --git a/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
new file mode 100644
index 000000000..f39df7346
--- /dev/null
+++ b/tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml
@@ -0,0 +1,15 @@
+---
+# Huawei US bare daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-nosdn-nofeature-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc037.yaml
+-
+ file_name: opnfv_yardstick_tc043.yaml
+ task_args:
+ huawei-pod1: '{"host": "node1.LF","target": "node2.LF"}'
+
diff --git a/tests/unit/benchmark/core/test_plugin.py b/tests/unit/benchmark/core/test_plugin.py
new file mode 100644
index 000000000..edc103415
--- /dev/null
+++ b/tests/unit/benchmark/core/test_plugin.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.core.plugin
+from __future__ import absolute_import
+import os
+from os.path import dirname as dirname
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+import unittest
+
+from yardstick.benchmark.core import plugin
+
+
+class Arg(object):
+
+ def __init__(self):
+ # self.input_file = ('plugin/sample_config.yaml',)
+ self.input_file = [
+ os.path.join(os.path.abspath(
+ dirname(dirname(dirname(dirname(dirname(__file__)))))),
+ 'plugin/sample_config.yaml')]
+
+
+@mock.patch('yardstick.benchmark.core.plugin.ssh')
+class pluginTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.result = {}
+
+ def test_install(self, mock_ssh):
+ p = plugin.Plugin()
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ input_file = Arg()
+ p.install(input_file)
+ expected_result = {}
+ self.assertEqual(self.result, expected_result)
+
+ def test_remove(self, mock_ssh):
+ p = plugin.Plugin()
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ input_file = Arg()
+ p.remove(input_file)
+ expected_result = {}
+ self.assertEqual(self.result, expected_result)
+
+ def test_install_setup_run(self, mock_ssh):
+ p = plugin.Plugin()
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ plugins = {
+ "name": "sample"
+ }
+ deployment = {
+ "ip": "10.1.0.50",
+ "user": "root",
+ "password": "root"
+ }
+ plugin_name = plugins.get("name")
+ p._install_setup(plugin_name, deployment)
+ self.assertIsNotNone(p.client)
+
+ p._run(plugin_name)
+ expected_result = {}
+ self.assertEqual(self.result, expected_result)
+
+ def test_remove_setup_run(self, mock_ssh):
+ p = plugin.Plugin()
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ plugins = {
+ "name": "sample"
+ }
+ deployment = {
+ "ip": "10.1.0.50",
+ "user": "root",
+ "password": "root"
+ }
+ plugin_name = plugins.get("name")
+ p._remove_setup(plugin_name, deployment)
+ self.assertIsNotNone(p.client)
+
+ p._run(plugin_name)
+ expected_result = {}
+ self.assertEqual(self.result, expected_result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/core/test_task.py b/tests/unit/benchmark/core/test_task.py
new file mode 100644
index 000000000..5dd32ea17
--- /dev/null
+++ b/tests/unit/benchmark/core/test_task.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.core.task
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+import os
+import unittest
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+from yardstick.benchmark.core import task
+
+
+class TaskTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.benchmark.core.task.Context')
+ def test_parse_nodes_host_target_same_context(self, mock_context):
+ nodes = {
+ "host": "node1.LF",
+ "target": "node2.LF"
+ }
+ scenario_cfg = {"nodes": nodes}
+ server_info = {
+ "ip": "10.20.0.3",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ mock_context.get_server.return_value = server_info
+ context_cfg = task.parse_nodes_with_context(scenario_cfg)
+
+ self.assertEqual(context_cfg["host"], server_info)
+ self.assertEqual(context_cfg["target"], server_info)
+
+ @mock.patch('yardstick.benchmark.core.task.Context')
+ @mock.patch('yardstick.benchmark.core.task.base_runner')
+ def test_run(self, mock_base_runner, mock_ctx):
+ scenario = {
+ 'host': 'athena.demo',
+ 'target': 'ares.demo',
+ 'runner': {
+ 'duration': 60,
+ 'interval': 1,
+ 'type': 'Duration'
+ },
+ 'type': 'Ping'
+ }
+
+ t = task.Task()
+ runner = mock.Mock()
+ runner.join.return_value = 0
+ mock_base_runner.Runner.get.return_value = runner
+ t._run([scenario], False, "yardstick.out")
+ self.assertTrue(runner.run.called)
+
+ @mock.patch('yardstick.benchmark.core.task.os')
+ def test_check_precondition(self, mock_os):
+ cfg = {
+ 'precondition': {
+ 'installer_type': 'compass',
+ 'deploy_scenarios': 'os-nosdn',
+ 'pod_name': 'huawei-pod1'
+ }
+ }
+
+ t = task.TaskParser('/opt')
+ mock_os.environ.get.side_effect = ['compass',
+ 'os-nosdn',
+ 'huawei-pod1']
+ result = t._check_precondition(cfg)
+ self.assertTrue(result)
+
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
+ def test_parse_suite_no_constraint_no_args(self, mock_environ):
+ SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml"
+ t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
+ mock_environ.get.side_effect = ['huawei-pod1', 'compass']
+ task_files, task_args, task_args_fnames = t.parse_suite()
+ print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
+ task_args_fnames))
+ self.assertEqual(task_files[0],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ self.assertEqual(task_files[1],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ self.assertEqual(task_args[0], None)
+ self.assertEqual(task_args[1], None)
+ self.assertEqual(task_args_fnames[0], None)
+ self.assertEqual(task_args_fnames[1], None)
+
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
+ def test_parse_suite_no_constraint_with_args(self, mock_environ):
+ SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml"
+ t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
+ mock_environ.get.side_effect = ['huawei-pod1', 'compass']
+ task_files, task_args, task_args_fnames = t.parse_suite()
+ print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
+ task_args_fnames))
+ self.assertEqual(task_files[0],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ self.assertEqual(task_files[1],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ self.assertEqual(task_args[0], None)
+ self.assertEqual(task_args[1],
+ '{"host": "node1.LF","target": "node2.LF"}')
+ self.assertEqual(task_args_fnames[0], None)
+ self.assertEqual(task_args_fnames[1], None)
+
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
+ def test_parse_suite_with_constraint_no_args(self, mock_environ):
+ SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml"
+ t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
+ mock_environ.get.side_effect = ['huawei-pod1', 'compass']
+ task_files, task_args, task_args_fnames = t.parse_suite()
+ print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
+ task_args_fnames))
+ self.assertEqual(task_files[0],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ self.assertEqual(task_files[1],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ self.assertEqual(task_args[0], None)
+ self.assertEqual(task_args[1], None)
+ self.assertEqual(task_args_fnames[0], None)
+ self.assertEqual(task_args_fnames[1], None)
+
+ @mock.patch('yardstick.benchmark.core.task.os.environ')
+ def test_parse_suite_with_constraint_with_args(self, mock_environ):
+ SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml"
+ t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
+ mock_environ.get.side_effect = ['huawei-pod1', 'compass']
+ task_files, task_args, task_args_fnames = t.parse_suite()
+ print("files=%s, args=%s, fnames=%s" % (task_files, task_args,
+ task_args_fnames))
+ self.assertEqual(task_files[0],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+ self.assertEqual(task_files[1],
+ 'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+ self.assertEqual(task_args[0], None)
+ self.assertEqual(task_args[1],
+ '{"host": "node1.LF","target": "node2.LF"}')
+ self.assertEqual(task_args_fnames[0], None)
+ self.assertEqual(task_args_fnames[1], None)
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/core/test_testcase.py b/tests/unit/benchmark/core/test_testcase.py
new file mode 100644
index 000000000..c7da2de7c
--- /dev/null
+++ b/tests/unit/benchmark/core/test_testcase.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.cmd.commands.testcase
+
+from __future__ import absolute_import
+import unittest
+
+from yardstick.benchmark.core import testcase
+
+
+class Arg(object):
+
+ def __init__(self):
+ self.casename = ('opnfv_yardstick_tc001',)
+
+
+class TestcaseUT(unittest.TestCase):
+
+ def test_list_all(self):
+ t = testcase.Testcase()
+ result = t.list_all("")
+ self.assertEqual(result, True)
+
+ def test_show(self):
+ t = testcase.Testcase()
+ casename = Arg()
+ result = t.show(casename)
+ self.assertEqual(result, True)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
new file mode 100644
index 000000000..8194a2361
--- /dev/null
+++ b/tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml
@@ -0,0 +1,16 @@
+---
+# Huawei US bare daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-nosdn-nofeature-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc037.yaml
+-
+ file_name: opnfv_yardstick_tc043.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+
diff --git a/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
new file mode 100644
index 000000000..86c9b2800
--- /dev/null
+++ b/tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml
@@ -0,0 +1,18 @@
+---
+# Huawei US bare daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-nosdn-nofeature-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+ file_name: opnfv_yardstick_tc037.yaml
+-
+ file_name: opnfv_yardstick_tc043.yaml
+ constraint:
+ installer: compass
+ pod: huawei-pod1
+ task_args:
+ huawei-pod1: '{"host": "node1.LF","target": "node2.LF"}'
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index 340f94cb0..9e2e8b172 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -9,15 +9,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
+# Unittest for
+# yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal
+from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.attacker import baseattacker
-from yardstick.benchmark.scenarios.availability.attacker import attacker_baremetal
+from yardstick.benchmark.scenarios.availability.attacker import \
+ attacker_baremetal
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
+ '.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
@@ -26,34 +31,37 @@ class ExecuteShellTestCase(unittest.TestCase):
exitcode, output = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ def test__fun_execute_shell_command_fail_cmd_exception(self,
+ mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
exitcode, output = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
+ '.ssh')
class AttackerBaremetalTestCase(unittest.TestCase):
def setUp(self):
- host = {
- "ipmi_ip": "10.20.0.5",
- "ipmi_user": "root",
- "ipmi_pwd": "123456",
- "ip": "10.20.0.5",
- "user": "root",
- "key_filename": "/root/.ssh/id_rsa"
- }
- self.context = {"node1": host}
- self.attacker_cfg = {
- 'fault_type': 'bear-metal-down',
- 'host': 'node1',
- }
+ host = {
+ "ipmi_ip": "10.20.0.5",
+ "ipmi_user": "root",
+ "ipmi_pwd": "123456",
+ "ip": "10.20.0.5",
+ "user": "root",
+ "key_filename": "/root/.ssh/id_rsa"
+ }
+ self.context = {"node1": host}
+ self.attacker_cfg = {
+ 'fault_type': 'bear-metal-down',
+ 'host': 'node1',
+ }
def test__attacker_baremetal_all_successful(self, mock_ssh):
-
- ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "running", '')
ins.setup()
@@ -61,8 +69,8 @@ class AttackerBaremetalTestCase(unittest.TestCase):
ins.recover()
def test__attacker_baremetal_check_failuer(self, mock_ssh):
-
- ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "error check", '')
ins.setup()
@@ -70,7 +78,8 @@ class AttackerBaremetalTestCase(unittest.TestCase):
self.attacker_cfg["jump_host"] = 'node1'
self.context["node1"]["pwd"] = "123456"
- ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg, self.context)
+ ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "running", '')
ins.setup()
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py b/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
index aa2e0cc4d..322b58391 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_general.py
@@ -12,11 +12,13 @@
# Unittest for yardstick.benchmark.scenarios.availability.attacker
# .attacker_general
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
@mock.patch('yardstick.benchmark.scenarios.availability.attacker.'
'attacker_general.ssh')
class GeneralAttackerServiceTestCase(unittest.TestCase):
@@ -30,10 +32,10 @@ class GeneralAttackerServiceTestCase(unittest.TestCase):
self.context = {"node1": host}
self.attacker_cfg = {
'fault_type': 'general-attacker',
- 'action_parameter':{'process_name':'nova_api'},
- 'rollback_parameter':{'process_name':'nova_api'},
- 'key':'stop-service',
- 'attack_key':'stop-service',
+ 'action_parameter': {'process_name': 'nova_api'},
+ 'rollback_parameter': {'process_name': 'nova_api'},
+ 'key': 'stop-service',
+ 'attack_key': 'stop-service',
'host': 'node1',
}
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
index eb0cce70d..d7771bd33 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_process.py
@@ -9,14 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.attacker.attacker_process
+# Unittest for
+# yardstick.benchmark.scenarios.availability.attacker.attacker_process
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.attacker import baseattacker
-@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_process.ssh')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.attacker.attacker_process.ssh')
class AttackerServiceTestCase(unittest.TestCase):
def setUp(self):
diff --git a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index a20cf8187..7030c7849 100644
--- a/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -9,21 +9,25 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+# Unittest for
+# yardstick.benchmark.scenarios.availability.monitor.monitor_command
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import basemonitor
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.BaseMonitor')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
+ '.BaseMonitor')
class MonitorMgrTestCase(unittest.TestCase):
def setUp(self):
config = {
'monitor_type': 'openstack-api',
- 'key' : 'service-status'
+ 'key': 'service-status'
}
self.monitor_configs = []
@@ -42,10 +46,12 @@ class MonitorMgrTestCase(unittest.TestCase):
monitorMgr.init_monitors(self.monitor_configs, None)
monitorIns = monitorMgr['service-status']
+
class BaseMonitorTestCase(unittest.TestCase):
class MonitorSimple(basemonitor.BaseMonitor):
__monitor_type__ = "MonitorForTest"
+
def setup(self):
self.monitor_result = False
@@ -65,14 +71,15 @@ class BaseMonitorTestCase(unittest.TestCase):
ins.start_monitor()
ins.wait_monitor()
-
def test__basemonitor_all_successful(self):
ins = self.MonitorSimple(self.monitor_cfg, None)
ins.setup()
ins.run()
ins.verify_SLA()
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.basemonitor.multiprocessing')
+ @mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
+ '.multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None)
ins.setup()
@@ -87,4 +94,3 @@ class BaseMonitorTestCase(unittest.TestCase):
except Exception:
pass
self.assertIsNone(cls)
-
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py b/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
index d85f1e19f..03ec1492b 100644
--- a/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
+++ b/tests/unit/benchmark/scenarios/availability/test_baseoperation.py
@@ -9,26 +9,31 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.operation.baseoperation
+# Unittest for
+# yardstick.benchmark.scenarios.availability.operation.baseoperation
+from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.operation import baseoperation
+from yardstick.benchmark.scenarios.availability.operation import baseoperation
-@mock.patch('yardstick.benchmark.scenarios.availability.operation.baseoperation.BaseOperation')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.operation.baseoperation'
+ '.BaseOperation')
class OperationMgrTestCase(unittest.TestCase):
def setUp(self):
config = {
'operation_type': 'general-operation',
- 'key' : 'service-status'
+ 'key': 'service-status'
}
self.operation_configs = []
self.operation_configs.append(config)
- def test_all_successful(self, mock_operation):
+ def test_all_successful(self, mock_operation):
mgr_ins = baseoperation.OperationMgr()
mgr_ins.init_operations(self.operation_configs, None)
operation_ins = mgr_ins["service-status"]
@@ -59,7 +64,7 @@ class BaseOperationTestCase(unittest.TestCase):
def setUp(self):
self.config = {
'operation_type': 'general-operation',
- 'key' : 'service-status'
+ 'key': 'service-status'
}
def test_all_successful(self):
@@ -70,7 +75,7 @@ class BaseOperationTestCase(unittest.TestCase):
def test_get_script_fullpath(self):
base_ins = baseoperation.BaseOperation(self.config, None)
- base_ins.get_script_fullpath("ha_tools/test.bash");
+ base_ins.get_script_fullpath("ha_tools/test.bash")
def test_get_operation_cls_successful(self):
base_ins = baseoperation.BaseOperation(self.config, None)
diff --git a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
index 9972d6b1b..36ce900fb 100644
--- a/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
+++ b/tests/unit/benchmark/scenarios/availability/test_baseresultchecker.py
@@ -12,20 +12,22 @@
# Unittest for yardstick.benchmark.scenarios.availability.result_checker
# .baseresultchecker
+from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.result_checker import baseresultchecker
+from yardstick.benchmark.scenarios.availability.result_checker import \
+ baseresultchecker
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker'
- '.baseresultchecker.BaseResultChecker')
+ '.baseresultchecker.BaseResultChecker')
class ResultCheckerMgrTestCase(unittest.TestCase):
def setUp(self):
config = {
'checker_type': 'general-result-checker',
- 'key' : 'process-checker'
+ 'key': 'process-checker'
}
self.checker_configs = []
@@ -52,6 +54,7 @@ class BaseResultCheckerTestCase(unittest.TestCase):
class ResultCheckeSimple(baseresultchecker.BaseResultChecker):
__result_checker__type__ = "ResultCheckeForTest"
+
def setup(self):
self.success = False
@@ -61,7 +64,7 @@ class BaseResultCheckerTestCase(unittest.TestCase):
def setUp(self):
self.checker_cfg = {
'checker_type': 'general-result-checker',
- 'key' : 'process-checker'
+ 'key': 'process-checker'
}
def test_baseresultchecker_setup_verify_successful(self):
@@ -81,8 +84,10 @@ class BaseResultCheckerTestCase(unittest.TestCase):
path = ins.get_script_fullpath("test.bash")
def test_get_resultchecker_cls_successful(self):
- baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeForTest")
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls(
+ "ResultCheckeForTest")
def test_get_resultchecker_cls_fail(self):
with self.assertRaises(RuntimeError):
- baseresultchecker.BaseResultChecker.get_resultchecker_cls("ResultCheckeNotExist")
+ baseresultchecker.BaseResultChecker.get_resultchecker_cls(
+ "ResultCheckeNotExist")
diff --git a/tests/unit/benchmark/scenarios/availability/test_director.py b/tests/unit/benchmark/scenarios/availability/test_director.py
index 06116725d..d01a60e2d 100644
--- a/tests/unit/benchmark/scenarios/availability/test_director.py
+++ b/tests/unit/benchmark/scenarios/availability/test_director.py
@@ -11,24 +11,26 @@
# Unittest for yardstick.benchmark.scenarios.availability.director
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.director import Director
-from yardstick.benchmark.scenarios.availability import actionplayers
@mock.patch('yardstick.benchmark.scenarios.availability.director.basemonitor')
@mock.patch('yardstick.benchmark.scenarios.availability.director.baseattacker')
-@mock.patch('yardstick.benchmark.scenarios.availability.director.baseoperation')
-@mock.patch('yardstick.benchmark.scenarios.availability.director.baseresultchecker')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.director.baseoperation')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.director.baseresultchecker')
class DirectorTestCase(unittest.TestCase):
def setUp(self):
self.scenario_cfg = {
'type': "general_scenario",
'options': {
- 'attackers':[{
+ 'attackers': [{
'fault_type': "general-attacker",
'key': "kill-process"}],
'monitors': [{
@@ -36,11 +38,11 @@ class DirectorTestCase(unittest.TestCase):
'key': "service-status"}],
'operations': [{
'operation_type': 'general-operation',
- 'key' : 'service-status'}],
+ 'key': 'service-status'}],
'resultCheckers': [{
'checker_type': 'general-result-checker',
- 'key' : 'process-checker',}],
- 'steps':[
+ 'key': 'process-checker', }],
+ 'steps': [
{
'actionKey': "service-status",
'actionType': "operation",
@@ -57,7 +59,7 @@ class DirectorTestCase(unittest.TestCase):
'actionKey': "service-status",
'actionType': "monitor",
'index': 4},
- ]
+ ]
}
}
host = {
@@ -67,15 +69,19 @@ class DirectorTestCase(unittest.TestCase):
}
self.ctx = {"nodes": {"node1": host}}
- def test_director_all_successful(self, mock_checer, mock_opertion, mock_attacker, mock_monitor):
+ def test_director_all_successful(self, mock_checer, mock_opertion,
+ mock_attacker, mock_monitor):
ins = Director(self.scenario_cfg, self.ctx)
opertion_action = ins.createActionPlayer("operation", "service-status")
attacker_action = ins.createActionPlayer("attacker", "kill-process")
- checker_action = ins.createActionPlayer("resultchecker", "process-checker")
+ checker_action = ins.createActionPlayer("resultchecker",
+ "process-checker")
monitor_action = ins.createActionPlayer("monitor", "service-status")
- opertion_rollback = ins.createActionRollbacker("operation", "service-status")
- attacker_rollback = ins.createActionRollbacker("attacker", "kill-process")
+ opertion_rollback = ins.createActionRollbacker("operation",
+ "service-status")
+ attacker_rollback = ins.createActionRollbacker("attacker",
+ "kill-process")
ins.executionSteps.append(opertion_rollback)
ins.executionSteps.append(attacker_rollback)
@@ -91,13 +97,8 @@ class DirectorTestCase(unittest.TestCase):
ins.verify()
ins.knockoff()
- def test_director_get_wrong_item(self, mock_checer, mock_opertion, mock_attacker, mock_monitor):
+ def test_director_get_wrong_item(self, mock_checer, mock_opertion,
+ mock_attacker, mock_monitor):
ins = Director(self.scenario_cfg, self.ctx)
ins.createActionPlayer("wrong_type", "wrong_key")
ins.createActionRollbacker("wrong_type", "wrong_key")
-
-
-
-
-
-
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
index c8cda7dc7..a84bfd2c5 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -9,14 +9,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_command
+# Unittest for
+# yardstick.benchmark.scenarios.availability.monitor.monitor_command
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_command
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.monitor_command'
+ '.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
@@ -25,13 +30,17 @@ class ExecuteShellTestCase(unittest.TestCase):
exitcode, output = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_subprocess):
+ def test__fun_execute_shell_command_fail_cmd_exception(self,
+ mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
exitcode, output = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.subprocess')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.monitor_command'
+ '.subprocess')
class MonitorOpenstackCmdTestCase(unittest.TestCase):
def setUp(self):
@@ -48,7 +57,6 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
'sla': {'max_outage_time': 5}
}
-
def test__monitor_command_monitor_func_successful(self, mock_subprocess):
instance = monitor_command.MonitorOpenstackCmd(self.config, None)
@@ -69,11 +77,15 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
instance._result = {"outage_time": 10}
instance.verify_SLA()
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.ssh')
- def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess):
+ @mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.monitor_command'
+ '.ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh,
+ mock_subprocess):
self.config["host"] = "node1"
- instance = monitor_command.MonitorOpenstackCmd(self.config, self.context)
+ instance = monitor_command.MonitorOpenstackCmd(
+ self.config, self.context)
instance.setup()
mock_ssh.SSH().execute.return_value = (0, "0", '')
ret = instance.monitor_func()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
index de7d26cbf..369f6f4f7 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_general.py
@@ -12,6 +12,7 @@
# Unittest for yardstick.benchmark.scenarios.availability.monitor
# .monitor_general
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_general
@@ -22,6 +23,7 @@ from yardstick.benchmark.scenarios.availability.monitor import monitor_general
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
'monitor_general.open')
class GeneralMonitorServiceTestCase(unittest.TestCase):
+
def setUp(self):
host = {
"ip": "10.20.0.5",
@@ -53,23 +55,26 @@ class GeneralMonitorServiceTestCase(unittest.TestCase):
ins.setup()
mock_ssh.SSH().execute.return_value = (0, "running", '')
ins.monitor_func()
- ins._result = {'outage_time' : 0}
+ ins._result = {'outage_time': 0}
ins.verify_SLA()
- def test__monitor_general_all_successful_noparam(self, mock_open, mock_ssh):
- ins = monitor_general.GeneralMonitor(self.monitor_cfg_noparam, self.context)
+ def test__monitor_general_all_successful_noparam(self, mock_open,
+ mock_ssh):
+ ins = monitor_general.GeneralMonitor(
+ self.monitor_cfg_noparam, self.context)
ins.setup()
mock_ssh.SSH().execute.return_value = (0, "running", '')
ins.monitor_func()
- ins._result = {'outage_time' : 0}
+ ins._result = {'outage_time': 0}
ins.verify_SLA()
def test__monitor_general_failure(self, mock_open, mock_ssh):
- ins = monitor_general.GeneralMonitor(self.monitor_cfg_noparam, self.context)
+ ins = monitor_general.GeneralMonitor(
+ self.monitor_cfg_noparam, self.context)
ins.setup()
mock_ssh.SSH().execute.return_value = (1, "error", 'error')
ins.monitor_func()
- ins._result = {'outage_time' : 2}
+ ins._result = {'outage_time': 2}
ins.verify_SLA()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
index dda104b4e..8270405cd 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_process.py
@@ -9,14 +9,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.monitor.monitor_process
+# Unittest for
+# yardstick.benchmark.scenarios.availability.monitor.monitor_process
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_process
-@mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh')
+
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.monitor.monitor_process.ssh')
class MonitorProcessTestCase(unittest.TestCase):
def setUp(self):
@@ -53,4 +57,3 @@ class MonitorProcessTestCase(unittest.TestCase):
ins.monitor_func()
ins._result = {"outage_time": 10}
ins.verify_SLA()
-
diff --git a/tests/unit/benchmark/scenarios/availability/test_operation_general.py b/tests/unit/benchmark/scenarios/availability/test_operation_general.py
index 26cd3f7c4..2c6dc1617 100644
--- a/tests/unit/benchmark/scenarios/availability/test_operation_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_operation_general.py
@@ -12,9 +12,12 @@
# Unittest for yardstick.benchmark.scenarios.availability.operation
# .operation_general
+from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.operation import operation_general
+from yardstick.benchmark.scenarios.availability.operation import \
+ operation_general
+
@mock.patch('yardstick.benchmark.scenarios.availability.operation.'
'operation_general.ssh')
@@ -46,7 +49,7 @@ class GeneralOperaionTestCase(unittest.TestCase):
def test__operation_successful(self, mock_open, mock_ssh):
ins = operation_general.GeneralOperaion(self.operation_cfg,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "success", '')
ins.setup()
ins.run()
@@ -54,7 +57,7 @@ class GeneralOperaionTestCase(unittest.TestCase):
def test__operation_successful_noparam(self, mock_open, mock_ssh):
ins = operation_general.GeneralOperaion(self.operation_cfg_noparam,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "success", '')
ins.setup()
ins.run()
@@ -62,7 +65,7 @@ class GeneralOperaionTestCase(unittest.TestCase):
def test__operation_fail(self, mock_open, mock_ssh):
ins = operation_general.GeneralOperaion(self.operation_cfg,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (1, "failed", '')
ins.setup()
ins.run()
diff --git a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
index bbadf0ac3..c5451fabd 100644
--- a/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_result_checker_general.py
@@ -12,11 +12,13 @@
# Unittest for yardstick.benchmark.scenarios.availability.result_checker
# .result_checker_general
+from __future__ import absolute_import
import mock
import unittest
import copy
-from yardstick.benchmark.scenarios.availability.result_checker import result_checker_general
+from yardstick.benchmark.scenarios.availability.result_checker import \
+ result_checker_general
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
@@ -35,16 +37,16 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
self.checker_cfg = {
'parameter': {'processname': 'process'},
'checker_type': 'general-result-checker',
- 'condition' : 'eq',
- 'expectedValue' : 1,
- 'key' : 'process-checker',
- 'checker_key' : 'process-checker',
+ 'condition': 'eq',
+ 'expectedValue': 1,
+ 'key': 'process-checker',
+ 'checker_key': 'process-checker',
'host': 'node1'
}
def test__result_checker_eq(self, mock_open, mock_ssh):
ins = result_checker_general.GeneralResultChecker(self.checker_cfg,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "1", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -53,7 +55,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config['condition'] = 'gt'
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "2", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -62,7 +64,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config['condition'] = 'gt_eq'
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "1", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -71,7 +73,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config['condition'] = 'lt'
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "0", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -80,7 +82,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config['condition'] = 'lt_eq'
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "1", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -90,7 +92,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config['condition'] = 'in'
config['expectedValue'] = "value"
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "value return", '')
ins.setup()
self.assertTrue(ins.verify())
@@ -99,7 +101,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config['condition'] = 'wrong'
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (0, "1", '')
ins.setup()
self.assertFalse(ins.verify())
@@ -108,7 +110,7 @@ class GeneralResultCheckerTestCase(unittest.TestCase):
config = copy.deepcopy(self.checker_cfg)
config.pop('parameter')
ins = result_checker_general.GeneralResultChecker(config,
- self.context);
+ self.context)
mock_ssh.SSH().execute.return_value = (1, "fail", '')
ins.setup()
- ins.verify() \ No newline at end of file
+ ins.verify()
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index bab9d62f1..593fc77b3 100644
--- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -11,26 +11,29 @@
# Unittest for yardstick.benchmark.scenarios.availability.scenario_general
+from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.scenario_general import ScenarioGeneral
+from yardstick.benchmark.scenarios.availability.scenario_general import \
+ ScenarioGeneral
-@mock.patch('yardstick.benchmark.scenarios.availability.scenario_general.Director')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.scenario_general.Director')
class ScenarioGeneralTestCase(unittest.TestCase):
def setUp(self):
self.scenario_cfg = {
'type': "general_scenario",
'options': {
- 'attackers':[{
+ 'attackers': [{
'fault_type': "general-attacker",
'key': "kill-process"}],
'monitors': [{
'monitor_type': "general-monitor",
'key': "service-status"}],
- 'steps':[
+ 'steps': [
{
'actionKey': "kill-process",
'actionType': "attacker",
diff --git a/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 6e58b6e7a..4ae508958 100644
--- a/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -11,13 +11,16 @@
# Unittest for yardstick.benchmark.scenarios.availability.serviceha
+from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability import serviceha
+
@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor')
-@mock.patch('yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
+@mock.patch(
+ 'yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
class ServicehaTestCase(unittest.TestCase):
def setUp(self):
@@ -48,7 +51,8 @@ class ServicehaTestCase(unittest.TestCase):
sla = {"outage_time": 5}
self.args = {"options": options, "sla": sla}
- def test__serviceha_setup_run_successful(self, mock_attacker, mock_monitor):
+ def test__serviceha_setup_run_successful(self, mock_attacker,
+ mock_monitor):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
diff --git a/tests/unit/benchmark/scenarios/compute/test_cachestat.py b/tests/unit/benchmark/scenarios/compute/test_cachestat.py
index f5a6b5ff9..8a06c754b 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cachestat.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cachestat.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.compute.cachestat.CACHEstat
+from __future__ import absolute_import
import mock
import unittest
import os
@@ -72,11 +73,19 @@ class CACHEstatTestCase(unittest.TestCase):
output = self._read_file("cachestat_sample_output.txt")
mock_ssh.SSH().execute.return_value = (0, output, '')
result = c._get_cache_usage()
- expected_result = {"cachestat": {"cache0": {"HITS": "6462",\
- "DIRTIES": "29", "RATIO": "100.0%", "MISSES": "0", "BUFFERS_MB": "1157",\
- "CACHE_MB": "66782"}}, "average": {"HITS": 6462, "DIRTIES": 29, "RATIO": "100.0%",\
- "MISSES": 0, "BUFFERS_MB":1157, "CACHE_MB": 66782}, "max": {"HITS": 6462,\
- "DIRTIES": 29, "RATIO": 100.0, "MISSES": 0, "BUFFERS_MB": 1157, "CACHE_MB": 66782}}
+ expected_result = {"cachestat": {"cache0": {"HITS": "6462",
+ "DIRTIES": "29",
+ "RATIO": "100.0%",
+ "MISSES": "0",
+ "BUFFERS_MB": "1157",
+ "CACHE_MB": "66782"}},
+ "average": {"HITS": 6462, "DIRTIES": 29,
+ "RATIO": "100.0%",
+ "MISSES": 0, "BUFFERS_MB": 1157,
+ "CACHE_MB": 66782},
+ "max": {"HITS": 6462,
+ "DIRTIES": 29, "RATIO": 100.0, "MISSES": 0,
+ "BUFFERS_MB": 1157, "CACHE_MB": 66782}}
self.assertEqual(result, expected_result)
diff --git a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
index da06b5dbb..4efa66932 100644
--- a/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
+++ b/tests/unit/benchmark/scenarios/compute/test_computecapacity.py
@@ -9,12 +9,15 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.compute.computecapacity.ComputeCapacity
+# Unittest for
+# yardstick.benchmark.scenarios.compute.computecapacity.ComputeCapacity
+
+from __future__ import absolute_import
-import mock
import unittest
-import os
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import computecapacity
@@ -53,7 +56,7 @@ class ComputeCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, SAMPLE_OUTPUT, '')
c.run(self.result)
- expected_result = json.loads(SAMPLE_OUTPUT)
+ expected_result = jsonutils.loads(SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_unsuccessful_script_error(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/compute/test_cpuload.py b/tests/unit/benchmark/scenarios/compute/test_cpuload.py
index 77f2a02d8..ffa781215 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cpuload.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cpuload.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+from __future__ import absolute_import
import mock
import unittest
import os
@@ -208,7 +209,7 @@ class CPULoadTestCase(unittest.TestCase):
'%nice': '0.03'}}}
self.assertDictEqual(result, expected_result)
-
+
def test_run_proc_stat(self, mock_ssh):
options = {
"interval": 1,
diff --git a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index 807429025..04ca2abf9 100644
--- a/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -11,9 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.compute.cyclictest.Cyclictest
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import cyclictest
@@ -85,17 +88,17 @@ class CyclictestTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
c.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_cyclictest_successful_sla(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {
- "action": "monitor",
- "max_min_latency": 100,
- "max_avg_latency": 500,
- "max_max_latency": 1000
- }
+ "action": "monitor",
+ "max_min_latency": 100,
+ "max_avg_latency": 500,
+ "max_max_latency": 1000
+ }
})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
@@ -106,7 +109,7 @@ class CyclictestTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
c.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 6be116371..5b72ef75d 100644
--- a/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -11,9 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
@@ -65,7 +68,8 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ expected_result = jsonutils.loads(
+ '{"latencies": ' + sample_output + "}")
self.assertEqual(self.result, expected_result)
def test_successful_bandwidth_run_no_sla(self, mock_ssh):
@@ -82,7 +86,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_successful_latency_run_sla(self, mock_ssh):
@@ -101,7 +105,8 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 4.944, "size": 0.00049}]'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = json.loads('{"latencies": ' + sample_output + "}")
+ expected_result = jsonutils.loads(
+ '{"latencies": ' + sample_output + "}")
self.assertEqual(self.result, expected_result)
def test_successful_bandwidth_run_sla(self, mock_ssh):
@@ -121,7 +126,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_unsuccessful_latency_run_sla(self, mock_ssh):
@@ -163,7 +168,7 @@ class LmbenchTestCase(unittest.TestCase):
options = {
"test_type": "latency_for_cache",
- "repetition":1,
+ "repetition": 1,
"warmup": 0
}
args = {
@@ -175,7 +180,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = "{\"L1cache\": 1.6}"
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
l.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_unsuccessful_script_error(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/compute/test_memload.py b/tests/unit/benchmark/scenarios/compute/test_memload.py
index cdf518d82..76625ef11 100644
--- a/tests/unit/benchmark/scenarios/compute/test_memload.py
+++ b/tests/unit/benchmark/scenarios/compute/test_memload.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.compute.memload.MEMLoad
+from __future__ import absolute_import
import mock
import unittest
import os
@@ -74,15 +75,17 @@ class MEMLoadTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, output, '')
result = m._get_mem_usage()
expected_result = {"max": {"used": 76737332, "cached": 67252400,
- "free": 187016644, "shared": 2844,
- "total": 263753976, "buffers": 853528},
+ "free": 187016644, "shared": 2844,
+ "total": 263753976, "buffers": 853528},
"average": {"used": 76737332, "cached": 67252400,
- "free": 187016644, "shared": 2844,
- "total": 263753976, "buffers": 853528},
+ "free": 187016644, "shared": 2844,
+ "total": 263753976, "buffers": 853528},
"free": {"memory0": {"used": "76737332",
- "cached": "67252400", "free": "187016644",
- "shared": "2844", "total": "263753976",
- "buffers": "853528"}}}
+ "cached": "67252400",
+ "free": "187016644",
+ "shared": "2844",
+ "total": "263753976",
+ "buffers": "853528"}}}
self.assertEqual(result, expected_result)
def _read_file(self, filename):
@@ -91,4 +94,3 @@ class MEMLoadTestCase(unittest.TestCase):
with open(output) as f:
sample_output = f.read()
return sample_output
-
diff --git a/tests/unit/benchmark/scenarios/compute/test_plugintest.py b/tests/unit/benchmark/scenarios/compute/test_plugintest.py
index 94f52738c..a5331caf7 100644
--- a/tests/unit/benchmark/scenarios/compute/test_plugintest.py
+++ b/tests/unit/benchmark/scenarios/compute/test_plugintest.py
@@ -11,10 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.compute.plugintest.PluginTest
-import mock
-import json
+from __future__ import absolute_import
+
import unittest
-import os
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import plugintest
@@ -50,7 +52,7 @@ class PluginTestTestCase(unittest.TestCase):
sample_output = '{"Test Output": "Hello world!"}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
s.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_sample_unsuccessful_script_error(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index 100102d19..82cc93870 100644
--- a/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -11,9 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.compute.ramspeed.Ramspeed
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import ramspeed
@@ -69,12 +72,12 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\
"Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\
"INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\
- {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384, "Bandwidth(MBps)":\
- 14128.94}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 32768,\
- "Bandwidth(MBps)": 8340.85}]}'
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\
+ "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_ramspeed_successful_run_sla(self, mock_ssh):
@@ -105,12 +108,12 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 14756.45}, {"Test_type": "INTEGER & WRITING",\
"Block_size(kb)": 4096, "Bandwidth(MBps)": 14604.44}, {"Test_type":\
"INTEGER & WRITING", "Block_size(kb)": 8192, "Bandwidth(MBps)": 14159.86},\
- {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384, "Bandwidth(MBps)":\
- 14128.94}, {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 32768,\
- "Bandwidth(MBps)": 8340.85}]}'
+ {"Test_type": "INTEGER & WRITING", "Block_size(kb)": 16384,\
+ "Bandwidth(MBps)": 14128.94}, {"Test_type": "INTEGER & WRITING",\
+ "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_ramspeed_unsuccessful_run_sla(self, mock_ssh):
@@ -176,7 +179,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 9401.58}]}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_ramspeed_mem_successful_run_sla(self, mock_ssh):
@@ -197,7 +200,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 9401.58}]}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
r.run(self.result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(self.result, expected_result)
def test_ramspeed_mem_unsuccessful_run_sla(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py
index 0935bcad2..747bda1ed 100644
--- a/tests/unit/benchmark/scenarios/compute/test_unixbench.py
+++ b/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -11,9 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import unixbench
@@ -57,7 +60,7 @@ class UnixbenchTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
u.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_unixbench_successful_in_quiet_mode(self, mock_ssh):
@@ -65,7 +68,7 @@ class UnixbenchTestCase(unittest.TestCase):
options = {
"test_type": 'dhry2reg',
"run_mode": 'quiet',
- "copies":1
+ "copies": 1
}
args = {
"options": options,
@@ -79,10 +82,9 @@ class UnixbenchTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
u.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
-
def test_unixbench_successful_sla(self, mock_ssh):
options = {
@@ -106,7 +108,7 @@ class UnixbenchTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
u.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/dummy/test_dummy.py b/tests/unit/benchmark/scenarios/dummy/test_dummy.py
index 1f9b729a9..560675d09 100644
--- a/tests/unit/benchmark/scenarios/dummy/test_dummy.py
+++ b/tests/unit/benchmark/scenarios/dummy/test_dummy.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.dummy.dummy
+from __future__ import absolute_import
import unittest
from yardstick.benchmark.scenarios.dummy import dummy
diff --git a/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 91f800b60..ea53cb9ab 100644
--- a/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -11,10 +11,13 @@
# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
-import mock
-import unittest
+from __future__ import absolute_import
+
import os
-import json
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import iperf3
@@ -78,7 +81,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
@@ -97,7 +100,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
@@ -119,8 +122,7 @@ class IperfTestCase(unittest.TestCase):
self.assertRaises(AssertionError, p.run, result)
def test_iperf_successful_sla_jitter(self, mock_ssh):
-
- options = {"udp":"udp","bandwidth":"20m"}
+ options = {"udp": "udp", "bandwidth": "20m"}
args = {
'options': options,
'sla': {'jitter': 10}
@@ -133,13 +135,12 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_iperf_unsuccessful_sla_jitter(self, mock_ssh):
-
- options = {"udp":"udp","bandwidth":"20m"}
+ options = {"udp": "udp", "bandwidth": "20m"}
args = {
'options': options,
'sla': {'jitter': 0.0001}
@@ -167,7 +168,7 @@ class IperfTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
- def _read_sample_output(self,filename):
+ def _read_sample_output(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(curr_path, filename)
with open(output) as f:
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf.py b/tests/unit/benchmark/scenarios/networking/test_netperf.py
index 3f224733c..1b5dd6472 100755
--- a/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/tests/unit/benchmark/scenarios/networking/test_netperf.py
@@ -11,10 +11,13 @@
# Unittest for yardstick.benchmark.scenarios.networking.netperf.Netperf
-import mock
-import unittest
+from __future__ import absolute_import
+
import os
-import json
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf
@@ -59,7 +62,7 @@ class NetperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
@@ -78,7 +81,7 @@ class NetperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
index 1c39b292b..29a7edf67 100755
--- a/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
+++ b/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
@@ -12,10 +12,13 @@
# Unittest for
# yardstick.benchmark.scenarios.networking.netperf_node.NetperfNode
-import mock
-import unittest
+from __future__ import absolute_import
+
import os
-import json
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf_node
@@ -59,7 +62,7 @@ class NetperfNodeTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
@@ -78,7 +81,7 @@ class NetperfNodeTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_netutilization.py b/tests/unit/benchmark/scenarios/networking/test_netutilization.py
index eb6626fea..7c04f5e9a 100644
--- a/tests/unit/benchmark/scenarios/networking/test_netutilization.py
+++ b/tests/unit/benchmark/scenarios/networking/test_netutilization.py
@@ -9,8 +9,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.netutilization.NetUtilization
+# Unittest for
+# yardstick.benchmark.scenarios.networking.netutilization.NetUtilization
+from __future__ import absolute_import
import mock
import unittest
import os
diff --git a/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py b/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
index e42832f1b..3f8d84e54 100644
--- a/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
+++ b/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py
@@ -9,27 +9,32 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.networkcapacity.NetworkCapacity
+# Unittest for
+# yardstick.benchmark.scenarios.networking.networkcapacity.NetworkCapacity
+
+from __future__ import absolute_import
-import mock
import unittest
-import os
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import networkcapacity
-SAMPLE_OUTPUT = '{"Number of connections":"308","Number of frames received": "166503"}'
+SAMPLE_OUTPUT = \
+ '{"Number of connections":"308","Number of frames received": "166503"}'
+
@mock.patch('yardstick.benchmark.scenarios.networking.networkcapacity.ssh')
class NetworkCapacityTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
- 'host': {
- 'ip': '172.16.0.137',
- 'user': 'cirros',
- 'password': "root"
- },
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'password': "root"
+ },
}
self.result = {}
@@ -46,7 +51,7 @@ class NetworkCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, SAMPLE_OUTPUT, '')
c.run(self.result)
- expected_result = json.loads(SAMPLE_OUTPUT)
+ expected_result = jsonutils.loads(SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_unsuccessful_script_error(self, mock_ssh):
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping.py b/tests/unit/benchmark/scenarios/networking/test_ping.py
index 8d35b8490..5535a79a9 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping
+from __future__ import absolute_import
import mock
import unittest
@@ -37,7 +38,7 @@ class PingTestCase(unittest.TestCase):
args = {
'options': {'packetsize': 200},
'target': 'ares.demo'
- }
+ }
result = {}
p = ping.Ping(args, self.ctx)
@@ -53,7 +54,7 @@ class PingTestCase(unittest.TestCase):
'options': {'packetsize': 200},
'sla': {'max_rtt': 150},
'target': 'ares.demo'
- }
+ }
result = {}
p = ping.Ping(args, self.ctx)
diff --git a/tests/unit/benchmark/scenarios/networking/test_ping6.py b/tests/unit/benchmark/scenarios/networking/test_ping6.py
index 0b8fba268..e22cacb36 100644
--- a/tests/unit/benchmark/scenarios/networking/test_ping6.py
+++ b/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.networking.ping.Ping
+from __future__ import absolute_import
import mock
import unittest
@@ -21,37 +22,37 @@ class PingTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
- 'nodes':{
- 'host1': {
- 'ip': '172.16.0.137',
- 'user': 'cirros',
- 'role': "Controller",
- 'key_filename': "mykey.key",
- 'password': "root"
+ 'nodes': {
+ 'host1': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'role': "Controller",
+ 'key_filename': "mykey.key",
+ 'password': "root"
},
- 'host2': {
- "ip": "172.16.0.138",
- "key_filename": "/root/.ssh/id_rsa",
- "role": "Compute",
- "name": "node3.IPV6",
- "user": "root"
+ 'host2': {
+ "ip": "172.16.0.138",
+ "key_filename": "/root/.ssh/id_rsa",
+ "role": "Compute",
+ "name": "node3.IPV6",
+ "user": "root"
},
}
}
def test_get_controller_node(self):
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
'sla': {'max_rtt': 50}
}
p = ping6.Ping6(args, self.ctx)
- controller_node = p._get_controller_node(['host1','host2'])
+ controller_node = p._get_controller_node(['host1', 'host2'])
self.assertEqual(controller_node, 'host1')
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_successful_setup(self, mock_ssh):
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
'sla': {'max_rtt': 50}
}
p = ping6.Ping6(args, self.ctx)
@@ -63,58 +64,57 @@ class PingTestCase(unittest.TestCase):
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_successful_no_sla(self, mock_ssh):
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
}
result = {}
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH()
- mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''),(0, 100, '')]
+ mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
p.run(result)
self.assertEqual(result, {'rtt': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_successful_sla(self, mock_ssh):
-
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
'sla': {'max_rtt': 150}
}
result = {}
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH()
- mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''),(0, 100, '')]
+ mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
p.run(result)
self.assertEqual(result, {'rtt': 100.0})
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_unsuccessful_sla(self, mock_ssh):
-
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
'sla': {'max_rtt': 50}
}
result = {}
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH()
- mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''),(0, 100, '')]
+ mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
self.assertRaises(AssertionError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
args = {
- 'options': {'host': 'host1','packetsize': 200, 'ping_count': 5},
+ 'options': {'host': 'host1', 'packetsize': 200, 'ping_count': 5},
'sla': {'max_rtt': 150}
}
result = {}
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH()
- mock_ssh.SSH().execute.side_effect = [(0, 'host1', ''),(1, '', 'FOOBAR')]
+ mock_ssh.SSH().execute.side_effect = [
+ (0, 'host1', ''), (1, '', 'FOOBAR')]
self.assertRaises(RuntimeError, p.run, result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 13a4c1bd4..f50fa108c 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -11,9 +11,12 @@
# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
@@ -133,7 +136,7 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
p.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
self.assertEqual(result, expected_result)
@@ -159,7 +162,7 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
p.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
self.assertEqual(result, expected_result)
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index afc87abfb..7ba4db9d9 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -11,12 +11,14 @@
# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-import mock
+from __future__ import absolute_import
import unittest
-import json
+
+import mock
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk.ssh')
class PktgenDPDKLatencyTestCase(unittest.TestCase):
@@ -116,7 +118,11 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
p.run(result)
- self.assertEqual(result, {"avg_latency": 132})
+ # with python 3 we get float, might be due python division changes
+ # AssertionError: {'avg_latency': 132.33333333333334} != {
+ # 'avg_latency': 132}
+ delta = result['avg_latency'] - 132
+ self.assertLessEqual(delta, 1)
def test_pktgen_dpdk_successful_sla(self, mock_ssh):
@@ -169,5 +175,6 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_sfc.py b/tests/unit/benchmark/scenarios/networking/test_sfc.py
index 618efc32e..224a43bd8 100644
--- a/tests/unit/benchmark/scenarios/networking/test_sfc.py
+++ b/tests/unit/benchmark/scenarios/networking/test_sfc.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.networking.sfc
+from __future__ import absolute_import
import mock
import unittest
@@ -27,7 +28,7 @@ class SfcTestCase(unittest.TestCase):
context_cfg['target'] = dict()
context_cfg['target']['user'] = 'root'
context_cfg['target']['password'] = 'opnfv'
- context_cfg['target']['ip'] = '127.0.0.1'
+ context_cfg['target']['ip'] = '127.0.0.1'
# Used in Sfc.run()
context_cfg['host'] = dict()
@@ -58,7 +59,8 @@ class SfcTestCase(unittest.TestCase):
@mock.patch('yardstick.benchmark.scenarios.networking.sfc.subprocess')
def test2_run_for_success(self, mock_subprocess, mock_openstack, mock_ssh):
# Mock a successfull SSH in Sfc.setup() and Sfc.run()
- mock_ssh.SSH().execute.return_value = (0, 'vxlan_tool.py', 'succeeded timed out')
+ mock_ssh.SSH().execute.return_value = (
+ 0, 'vxlan_tool.py', 'succeeded timed out')
mock_openstack.get_an_IP.return_value = "127.0.0.1"
mock_subprocess.call.return_value = 'mocked!'
diff --git a/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index 25d52212b..76d2afdc0 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -16,17 +16,20 @@
# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
-import mock
+from __future__ import absolute_import
+try:
+ from unittest import mock
+except ImportError:
+ import mock
import unittest
-import os
-import subprocess
from yardstick.benchmark.scenarios.networking import vsperf
@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
-@mock.patch("__builtin__.open", return_value=None)
+@mock.patch("yardstick.benchmark.scenarios.networking.vsperf.open",
+ mock.mock_open())
class VsperfTestCase(unittest.TestCase):
def setUp(self):
@@ -58,7 +61,7 @@ class VsperfTestCase(unittest.TestCase):
}
}
- def test_vsperf_setup(self, mock_open, mock_ssh, mock_subprocess):
+ def test_vsperf_setup(self, mock_ssh, mock_subprocess):
p = vsperf.Vsperf(self.args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
mock_subprocess.call().execute.return_value = None
@@ -67,7 +70,7 @@ class VsperfTestCase(unittest.TestCase):
self.assertIsNotNone(p.client)
self.assertEqual(p.setup_done, True)
- def test_vsperf_teardown(self, mock_open, mock_ssh, mock_subprocess):
+ def test_vsperf_teardown(self, mock_ssh, mock_subprocess):
p = vsperf.Vsperf(self.args, self.ctx)
# setup() specific mocks
@@ -81,7 +84,7 @@ class VsperfTestCase(unittest.TestCase):
p.teardown()
self.assertEqual(p.setup_done, False)
- def test_vsperf_run_ok(self, mock_open, mock_ssh, mock_subprocess):
+ def test_vsperf_run_ok(self, mock_ssh, mock_subprocess):
p = vsperf.Vsperf(self.args, self.ctx)
# setup() specific mocks
@@ -90,14 +93,16 @@ class VsperfTestCase(unittest.TestCase):
# run() specific mocks
mock_ssh.SSH().execute.return_value = (0, '', '')
- mock_ssh.SSH().execute.return_value = (0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+ mock_ssh.SSH().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
result = {}
p.run(result)
self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_vsperf_run_falied_vsperf_execution(self, mock_open, mock_ssh, mock_subprocess):
+ def test_vsperf_run_falied_vsperf_execution(self, mock_ssh,
+ mock_subprocess):
p = vsperf.Vsperf(self.args, self.ctx)
# setup() specific mocks
@@ -110,7 +115,7 @@ class VsperfTestCase(unittest.TestCase):
result = {}
self.assertRaises(RuntimeError, p.run, result)
- def test_vsperf_run_falied_csv_report(self, mock_open, mock_ssh, mock_subprocess):
+ def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess):
p = vsperf.Vsperf(self.args, self.ctx)
# setup() specific mocks
@@ -128,5 +133,6 @@ class VsperfTestCase(unittest.TestCase):
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
index 418dd39e6..07b3da992 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation.py
@@ -11,10 +11,11 @@
# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
-import mock
+from __future__ import absolute_import
import unittest
-from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation
+from yardstick.benchmark.scenarios.networking import \
+ vtc_instantiation_validation
class VtcInstantiationValidationTestCase(unittest.TestCase):
@@ -34,7 +35,8 @@ class VtcInstantiationValidationTestCase(unittest.TestCase):
scenario['options']['vlan_sender'] = ''
scenario['options']['vlan_receiver'] = ''
- self.vt = vtc_instantiation_validation.VtcInstantiationValidation(scenario, '')
+ self.vt = vtc_instantiation_validation.VtcInstantiationValidation(
+ scenario, '')
def test_run_for_success(self):
result = {}
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
index e0a46241c..34f3610b1 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_instantiation_validation_noisy.py
@@ -11,10 +11,11 @@
# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
-import mock
+from __future__ import absolute_import
import unittest
-from yardstick.benchmark.scenarios.networking import vtc_instantiation_validation_noisy
+from yardstick.benchmark.scenarios.networking import \
+ vtc_instantiation_validation_noisy
class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase):
@@ -37,7 +38,9 @@ class VtcInstantiationValidationiNoisyTestCase(unittest.TestCase):
scenario['options']['amount_of_ram'] = '1G'
scenario['options']['number_of_cores'] = '1'
- self.vt = vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(scenario, '')
+ self.vt = \
+ vtc_instantiation_validation_noisy.VtcInstantiationValidationNoisy(
+ scenario, '')
def test_run_for_success(self):
result = {}
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
index ecdf555d2..a73fad5a8 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+from __future__ import absolute_import
import mock
import unittest
diff --git a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
index 98957b1de..e1b162c79 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vtc_throughput_noisy_test.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.scenarios.networking.iperf3.Iperf
+from __future__ import absolute_import
import mock
import unittest
diff --git a/tests/unit/benchmark/scenarios/parser/test_parser.py b/tests/unit/benchmark/scenarios/parser/test_parser.py
index d11a6d5c8..59b98a092 100644
--- a/tests/unit/benchmark/scenarios/parser/test_parser.py
+++ b/tests/unit/benchmark/scenarios/parser/test_parser.py
@@ -11,12 +11,16 @@
# Unittest for yardstick.benchmark.scenarios.parser.Parser
-import mock
+from __future__ import absolute_import
+
import unittest
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.parser import parser
+
@mock.patch('yardstick.benchmark.scenarios.parser.parser.subprocess')
class ParserTestCase(unittest.TestCase):
@@ -32,8 +36,8 @@ class ParserTestCase(unittest.TestCase):
def test_parser_successful(self, mock_subprocess):
args = {
- 'options': {'yangfile':'/root/yardstick/samples/yang.yaml',
- 'toscafile':'/root/yardstick/samples/tosca.yaml'},
+ 'options': {'yangfile': '/root/yardstick/samples/yang.yaml',
+ 'toscafile': '/root/yardstick/samples/tosca.yaml'},
}
p = parser.Parser(args, {})
result = {}
@@ -41,7 +45,7 @@ class ParserTestCase(unittest.TestCase):
sample_output = '{"yangtotosca": "success"}'
p.run(result)
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
def test_parser_teardown_successful(self, mock_subprocess):
diff --git a/tests/unit/benchmark/scenarios/storage/test_fio.py b/tests/unit/benchmark/scenarios/storage/test_fio.py
index 153d15052..603ff389e 100644
--- a/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/tests/unit/benchmark/scenarios/storage/test_fio.py
@@ -11,10 +11,13 @@
# Unittest for yardstick.benchmark.scenarios.storage.fio.Fio
-import mock
-import unittest
-import json
+from __future__ import absolute_import
+
import os
+import unittest
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import fio
@@ -74,7 +77,7 @@ class FioTestCase(unittest.TestCase):
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
'"write_lat": 233.55}'
- expected_result = json.loads(expected_result)
+ expected_result = jsonutils.loads(expected_result)
self.assertEqual(result, expected_result)
def test_fio_successful_read_no_sla(self, mock_ssh):
@@ -98,7 +101,7 @@ class FioTestCase(unittest.TestCase):
expected_result = '{"read_bw": 36113, "read_iops": 9028,' \
'"read_lat": 108.7}'
- expected_result = json.loads(expected_result)
+ expected_result = jsonutils.loads(expected_result)
self.assertEqual(result, expected_result)
def test_fio_successful_write_no_sla(self, mock_ssh):
@@ -122,7 +125,7 @@ class FioTestCase(unittest.TestCase):
expected_result = '{"write_bw": 35107, "write_iops": 8776,'\
'"write_lat": 111.74}'
- expected_result = json.loads(expected_result)
+ expected_result = jsonutils.loads(expected_result)
self.assertEqual(result, expected_result)
def test_fio_successful_lat_sla(self, mock_ssh):
@@ -150,10 +153,9 @@ class FioTestCase(unittest.TestCase):
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
'"write_lat": 233.55}'
- expected_result = json.loads(expected_result)
+ expected_result = jsonutils.loads(expected_result)
self.assertEqual(result, expected_result)
-
def test_fio_unsuccessful_lat_sla(self, mock_ssh):
options = {
@@ -200,7 +202,7 @@ class FioTestCase(unittest.TestCase):
expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
'"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
'"write_lat": 233.55}'
- expected_result = json.loads(expected_result)
+ expected_result = jsonutils.loads(expected_result)
self.assertEqual(result, expected_result)
def test_fio_unsuccessful_bw_iops_sla(self, mock_ssh):
@@ -248,8 +250,10 @@ class FioTestCase(unittest.TestCase):
sample_output = f.read()
return sample_output
+
def main():
unittest.main()
+
if __name__ == '__main__':
main()
diff --git a/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py b/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
index ace0ca374..6fb5f5686 100644
--- a/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
+++ b/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
@@ -9,35 +9,41 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.storage.storagecapacity.StorageCapacity
+# Unittest for
+# yardstick.benchmark.scenarios.storage.storagecapacity.StorageCapacity
+
+from __future__ import absolute_import
-import mock
import unittest
-import os
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import storagecapacity
-DISK_SIZE_SAMPLE_OUTPUT = '{"Numberf of devides": "2", "Total disk size in bytes": "1024000000"}'
+DISK_SIZE_SAMPLE_OUTPUT = \
+ '{"Numberf of devides": "2", "Total disk size in bytes": "1024000000"}'
BLOCK_SIZE_SAMPLE_OUTPUT = '{"/dev/sda": 1024, "/dev/sdb": 4096}'
DISK_UTIL_RAW_OUTPUT = "vda 10.00\nvda 0.00"
-DISK_UTIL_SAMPLE_OUTPUT = '{"vda": {"avg_util": 5.0, "max_util": 10.0, "min_util": 0.0}}'
+DISK_UTIL_SAMPLE_OUTPUT = \
+ '{"vda": {"avg_util": 5.0, "max_util": 10.0, "min_util": 0.0}}'
+
@mock.patch('yardstick.benchmark.scenarios.storage.storagecapacity.ssh')
class StorageCapacityTestCase(unittest.TestCase):
def setUp(self):
self.scn = {
- "options": {
- 'test_type': 'disk_size'
- }
+ "options": {
+ 'test_type': 'disk_size'
+ }
}
self.ctx = {
- "host": {
- 'ip': '172.16.0.137',
- 'user': 'cirros',
- 'password': "root"
- }
+ "host": {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'password': "root"
+ }
}
self.result = {}
@@ -54,7 +60,8 @@ class StorageCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, DISK_SIZE_SAMPLE_OUTPUT, '')
c.run(self.result)
- expected_result = json.loads(DISK_SIZE_SAMPLE_OUTPUT)
+ expected_result = jsonutils.loads(
+ DISK_SIZE_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_block_size_successful(self, mock_ssh):
@@ -67,7 +74,8 @@ class StorageCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, BLOCK_SIZE_SAMPLE_OUTPUT, '')
c.run(self.result)
- expected_result = json.loads(BLOCK_SIZE_SAMPLE_OUTPUT)
+ expected_result = jsonutils.loads(
+ BLOCK_SIZE_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_disk_utilization_successful(self, mock_ssh):
@@ -82,7 +90,8 @@ class StorageCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (0, DISK_UTIL_RAW_OUTPUT, '')
c.run(self.result)
- expected_result = json.loads(DISK_UTIL_SAMPLE_OUTPUT)
+ expected_result = jsonutils.loads(
+ DISK_UTIL_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_unsuccessful_script_error(self, mock_ssh):
@@ -91,6 +100,7 @@ class StorageCapacityTestCase(unittest.TestCase):
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, self.result)
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/scenarios/storage/test_storperf.py b/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 8fc97d2ed..adc9d47c6 100644
--- a/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -11,43 +11,58 @@
# Unittest for yardstick.benchmark.scenarios.storage.storperf.StorPerf
-import mock
+from __future__ import absolute_import
+
import unittest
-import requests
-import json
+
+import mock
+from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import storperf
def mocked_requests_config_post(*args, **kwargs):
class MockResponseConfigPost:
+
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
- return MockResponseConfigPost('{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622","stack_created": "false"}', 200)
+ return MockResponseConfigPost(
+ '{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
+ '"stack_created": "false"}',
+ 200)
def mocked_requests_config_get(*args, **kwargs):
class MockResponseConfigGet:
+
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
- return MockResponseConfigGet('{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622","stack_created": "true"}', 200)
+ return MockResponseConfigGet(
+ '{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
+ '"stack_created": "true"}',
+ 200)
def mocked_requests_job_get(*args, **kwargs):
class MockResponseJobGet:
+
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
- return MockResponseJobGet('{"status": "completed", "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}', 200)
+ return MockResponseJobGet(
+ '{"status": "completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}',
+ 200)
def mocked_requests_job_post(*args, **kwargs):
class MockResponseJobPost:
+
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
@@ -58,6 +73,7 @@ def mocked_requests_job_post(*args, **kwargs):
def mocked_requests_job_delete(*args, **kwargs):
class MockResponseJobDelete:
+
def __init__(self, json_data, status_code):
self.content = json_data
self.status_code = status_code
@@ -67,6 +83,7 @@ def mocked_requests_job_delete(*args, **kwargs):
def mocked_requests_delete(*args, **kwargs):
class MockResponseDelete:
+
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
@@ -76,6 +93,7 @@ def mocked_requests_delete(*args, **kwargs):
def mocked_requests_delete_failed(*args, **kwargs):
class MockResponseDeleteFailed:
+
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
@@ -130,8 +148,9 @@ class StorPerfTestCase(unittest.TestCase):
side_effect=mocked_requests_job_post)
@mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
side_effect=mocked_requests_job_get)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete',
- side_effect=mocked_requests_job_delete)
+ @mock.patch(
+ 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
+ side_effect=mocked_requests_job_delete)
def test_successful_run(self, mock_post, mock_get, mock_delete):
options = {
"agent_count": 8,
@@ -152,15 +171,18 @@ class StorPerfTestCase(unittest.TestCase):
s = storperf.StorPerf(args, self.ctx)
s.setup_done = True
- sample_output = '{"status": "completed", "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+ sample_output = '{"status": "completed",\
+ "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
- expected_result = json.loads(sample_output)
+ expected_result = jsonutils.loads(sample_output)
s.run(self.result)
self.assertEqual(self.result, expected_result)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete', side_effect=mocked_requests_delete)
+ @mock.patch(
+ 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
+ side_effect=mocked_requests_delete)
def test_successful_teardown(self, mock_delete):
options = {
"agent_count": 8,
@@ -184,7 +206,9 @@ class StorPerfTestCase(unittest.TestCase):
self.assertFalse(s.setup_done)
- @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.delete', side_effect=mocked_requests_delete_failed)
+ @mock.patch(
+ 'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
+ side_effect=mocked_requests_delete_failed)
def test_failed_teardown(self, mock_delete):
options = {
"agent_count": 8,