diff options
author | Emma Foley <emma.l.foley@intel.com> | 2018-01-17 11:23:27 +0000 |
---|---|---|
committer | Emma Foley <emma.l.foley@intel.com> | 2018-01-17 11:26:37 +0000 |
commit | 87edac9472000016ee51cff1f50eeb82377e2d0e (patch) | |
tree | 703493bead487913527dc33e694799038d903399 | |
parent | a75bc1531caa213cdd0a335b2b3332f844c88ef1 (diff) |
Move tests: unit/orchestrator/
* Fix pylint errors
* Add notes where additional action is needed.
Some errors are ignored locally, as they were a symptom of other problems.
These issues have been flagged for follow-up, and should be fixed later.
JIRA: YARDSTICK-837
Signed-off-by: Emma Foley <emma.l.foley@intel.com>
Change-Id: Idbf56d0cd3b29b70691acd6af4d5ac3c400d2ccc
-rw-r--r-- | yardstick/tests/unit/orchestrator/__init__.py (renamed from tests/unit/orchestrator/__init__.py) | 0 | ||||
-rw-r--r-- | yardstick/tests/unit/orchestrator/test_heat.py (renamed from tests/unit/orchestrator/test_heat.py) | 311 | ||||
-rw-r--r-- | yardstick/tests/unit/orchestrator/test_kubernetes.py (renamed from tests/unit/orchestrator/test_kubernetes.py) | 4 |
3 files changed, 231 insertions, 84 deletions
diff --git a/tests/unit/orchestrator/__init__.py b/yardstick/tests/unit/orchestrator/__init__.py index e69de29bb..e69de29bb 100644 --- a/tests/unit/orchestrator/__init__.py +++ b/yardstick/tests/unit/orchestrator/__init__.py diff --git a/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py index c34ea53fc..faf70cdbc 100644 --- a/tests/unit/orchestrator/test_heat.py +++ b/yardstick/tests/unit/orchestrator/test_heat.py @@ -13,10 +13,11 @@ from contextlib import contextmanager from itertools import count from tempfile import NamedTemporaryFile -import unittest -import uuid import time +import uuid + import mock +import unittest from yardstick.benchmark.contexts import node from yardstick.orchestrator import heat @@ -65,6 +66,7 @@ class HeatContextTestCase(unittest.TestCase): self.assertEqual(heat.HEAT_KEY_UUID_LENGTH, len(k)) self.assertIn(k, str(u)) + class HeatTemplateTestCase(unittest.TestCase): def setUp(self): @@ -73,38 +75,63 @@ class HeatTemplateTestCase(unittest.TestCase): def test_add_tenant_network(self): self.template.add_network('some-network') - self.assertEqual(self.template.resources['some-network']['type'], 'OS::Neutron::Net') + self.assertEqual( + self.template.resources['some-network']['type'], + 'OS::Neutron::Net') def test_add_provider_network(self): self.template.add_network('some-network', 'physnet2', 'sriov') - self.assertEqual(self.template.resources['some-network']['type'], 'OS::Neutron::ProviderNet') - self.assertEqual(self.template.resources['some-network']['properties']['physical_network'], 'physnet2') + self.assertEqual( + self.template.resources['some-network']['type'], + 'OS::Neutron::ProviderNet') + self.assertEqual( + self.template.resources['some-network']['properties']['physical_network'], + 'physnet2') def test_add_subnet(self): - netattrs = {'cidr': '10.0.0.0/24', 'provider': None, 'external_network': 'ext_net'} - self.template.add_subnet('some-subnet', "some-network", netattrs['cidr']) - - self.assertEqual(self.template.resources['some-subnet']['type'], 'OS::Neutron::Subnet') - self.assertEqual(self.template.resources['some-subnet']['properties']['cidr'], '10.0.0.0/24') + netattrs = {'cidr': '10.0.0.0/24', + 'provider': None, 'external_network': 'ext_net'} + self.template.add_subnet( + 'some-subnet', "some-network", netattrs['cidr']) + + self.assertEqual( + self.template.resources['some-subnet']['type'], + 'OS::Neutron::Subnet') + self.assertEqual( + self.template.resources['some-subnet']['properties']['cidr'], + '10.0.0.0/24') def test_add_router(self): self.template.add_router('some-router', 'ext-net', 'some-subnet') - self.assertEqual(self.template.resources['some-router']['type'], 'OS::Neutron::Router') - self.assertIn('some-subnet', self.template.resources['some-router']['depends_on']) + self.assertEqual( + self.template.resources['some-router']['type'], + 'OS::Neutron::Router') + self.assertIn( + 'some-subnet', + self.template.resources['some-router']['depends_on']) def test_add_router_interface(self): - self.template.add_router_interface('some-router-if', 'some-router', 'some-subnet') + self.template.add_router_interface( + 'some-router-if', 'some-router', 'some-subnet') - self.assertEqual(self.template.resources['some-router-if']['type'], 'OS::Neutron::RouterInterface') - self.assertIn('some-subnet', self.template.resources['some-router-if']['depends_on']) + self.assertEqual( + self.template.resources['some-router-if']['type'], + 'OS::Neutron::RouterInterface') + self.assertIn( + 'some-subnet', + self.template.resources['some-router-if']['depends_on']) def test_add_servergroup(self): self.template.add_servergroup('some-server-group', 'anti-affinity') - self.assertEqual(self.template.resources['some-server-group']['type'], 'OS::Nova::ServerGroup') - self.assertEqual(self.template.resources['some-server-group']['properties']['policies'], ['anti-affinity']) + self.assertEqual( + self.template.resources['some-server-group']['type'], + 'OS::Nova::ServerGroup') + self.assertEqual( + self.template.resources['some-server-group']['properties']['policies'], + ['anti-affinity']) def test__add_resources_to_template_raw(self): test_context = node.NodeContext() @@ -136,49 +163,136 @@ class HeatTemplateTestCase(unittest.TestCase): heat_template.add_router("router1", "gw1", "subnet1") heat_template.add_router_interface("router_if1", "router1", "subnet1") heat_template.add_port("port1", "network1", "subnet1", "normal") - heat_template.add_port("port2", "network2", "subnet2", "normal", sec_group_id="sec_group1",provider="not-sriov") - heat_template.add_port("port3", "network2", "subnet2", "normal", sec_group_id="sec_group1",provider="sriov") - heat_template.add_floating_ip("floating_ip1", "network1", "port1", "router_if1") - heat_template.add_floating_ip("floating_ip2", "network2", "port2", "router_if2", "foo-secgroup") - heat_template.add_floating_ip_association("floating_ip1_association", "floating_ip1", "port1") + heat_template.add_port( + "port2", + "network2", + "subnet2", + "normal", + sec_group_id="sec_group1", + provider="not-sriov") + heat_template.add_port( + "port3", + "network2", + "subnet2", + "normal", + sec_group_id="sec_group1", + provider="sriov") + heat_template.add_floating_ip( + "floating_ip1", "network1", "port1", "router_if1") + heat_template.add_floating_ip( + "floating_ip2", "network2", "port2", "router_if2", "foo-secgroup") + heat_template.add_floating_ip_association( + "floating_ip1_association", "floating_ip1", "port1") heat_template.add_servergroup("server_grp2", "affinity") heat_template.add_servergroup("server_grp3", "anti-affinity") heat_template.add_security_group("security_group") - heat_template.add_server(name="server1", image="image1", flavor="flavor1", flavors=[]) - heat_template.add_server_group(name="servergroup", policies=["policy1","policy2"]) + heat_template.add_server( + name="server1", image="image1", flavor="flavor1", flavors=[]) + heat_template.add_server_group( + name="servergroup", policies=["policy1", "policy2"]) heat_template.add_server_group(name="servergroup", policies="policy1") - heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=[], ports=["port1", "port2"], - networks=["network1", "network2"], scheduler_hints="hints1", user="user1", - key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2}, - additional_properties={"prop1": 1, "prop2": 2}) - heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor1", "flavor2"], - ports=["port1", "port2"], - networks=["network1", "network2"], scheduler_hints="hints1", user="user1", - key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2}, - additional_properties={"prop1": 1, "prop2": 2} ) - heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor3", "flavor4"], - ports=["port1", "port2"], - networks=["network1", "network2"], scheduler_hints="hints1", user="user1", - key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2}, - additional_properties={"prop1": 1, "prop2": 2}) - heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,extra_specs={"cat": 1, "dog": 2}) + heat_template.add_server( + name="server2", + image="image1", + flavor="flavor1", + flavors=[], + ports=[ + "port1", + "port2"], + networks=[ + "network1", + "network2"], + scheduler_hints="hints1", + user="user1", + key_name="foo-key", + user_data="user", + metadata={ + "cat": 1, + "doc": 2}, + additional_properties={ + "prop1": 1, + "prop2": 2}) + heat_template.add_server( + name="server2", + image="image1", + flavor="flavor1", + flavors=[ + "flavor1", + "flavor2"], + ports=[ + "port1", + "port2"], + networks=[ + "network1", + "network2"], + scheduler_hints="hints1", + user="user1", + key_name="foo-key", + user_data="user", + metadata={ + "cat": 1, + "doc": 2}, + additional_properties={ + "prop1": 1, + "prop2": 2}) + heat_template.add_server( + name="server2", + image="image1", + flavor="flavor1", + flavors=[ + "flavor3", + "flavor4"], + ports=[ + "port1", + "port2"], + networks=[ + "network1", + "network2"], + scheduler_hints="hints1", + user="user1", + key_name="foo-key", + user_data="user", + metadata={ + "cat": 1, + "doc": 2}, + additional_properties={ + "prop1": 1, + "prop2": 2}) + heat_template.add_flavor( + name="flavor1", + vcpus=1, + ram=2048, + disk=1, + extra_specs={ + "cat": 1, + "dog": 2}) heat_template.add_flavor(name=None, vcpus=1, ram=2048) - heat_template.add_server(name="server1", - image="image1", - flavor="flavor1", - flavors=[], - ports=["port1", "port2"], - networks=["network1", "network2"], - scheduler_hints="hints1", - user="user1", - key_name="foo-key", - user_data="user", - metadata={"cat": 1, "doc": 2}, - additional_properties= {"prop1": 1, "prop2": 2} ) + heat_template.add_server( + name="server1", + image="image1", + flavor="flavor1", + flavors=[], + ports=[ + "port1", + "port2"], + networks=[ + "network1", + "network2"], + scheduler_hints="hints1", + user="user1", + key_name="foo-key", + user_data="user", + metadata={ + "cat": 1, + "doc": 2}, + additional_properties={ + "prop1": 1, + "prop2": 2}) heat_template.add_network("network1") heat_template.add_flavor("test") - self.assertEqual(heat_template.resources['test']['type'], 'OS::Nova::Flavor') + self.assertEqual( + heat_template.resources['test']['type'], 'OS::Nova::Flavor') @mock_patch_target_module('op_utils') @mock_patch_target_module('heatclient') @@ -197,18 +311,25 @@ class HeatTemplateTestCase(unittest.TestCase): with mock.patch.object(self.template, 'status', return_value=None) as mock_status: # block with timeout hit timeout = 0 - with self.assertRaises(RuntimeError) as raised, timer() as time_data: + with self.assertRaises(RuntimeError) as raised, timer(): self.template.create(block=True, timeout=timeout) # ensure op_utils was used expected_op_utils_usage += 1 - self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_session.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_heat_api_version.call_count, + expected_op_utils_usage) # ensure the constructor and instance were used - self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls) - self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls) + self.assertEqual(mock_heat_client_class.call_count, + expected_constructor_calls) + self.assertEqual( + mock_heat_client.stacks.create.call_count, + expected_create_calls) # ensure that the status was used self.assertGreater(mock_status.call_count, expected_status_calls) @@ -222,22 +343,33 @@ class HeatTemplateTestCase(unittest.TestCase): # block with create failed timeout = 10 mock_status.side_effect = iter([None, None, u'CREATE_FAILED']) - with self.assertRaises(RuntimeError) as raised, timer() as time_data: + with self.assertRaises(RuntimeError) as raised, timer(): self.template.create(block=True, timeout=timeout) - # ensure the existing heat_client was used and op_utils was used again - self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage) + # ensure the existing heat_client was used and op_utils was used + # again + self.assertEqual( + mock_op_utils.get_session.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_heat_api_version.call_count, + expected_op_utils_usage) # ensure the constructor was not used but the instance was used - self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls) - self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls) + self.assertEqual(mock_heat_client_class.call_count, + expected_constructor_calls) + self.assertEqual( + mock_heat_client.stacks.create.call_count, + expected_create_calls) # ensure that the status was used three times expected_status_calls += 3 self.assertEqual(mock_status.call_count, expected_status_calls) + # NOTE(elfoley): This needs to be split into multiple tests. + # The lines where the template is reset should serve as a guide for where + # to split. @mock_patch_target_module('op_utils') @mock_patch_target_module('heatclient') def test_create(self, mock_heat_client_class, mock_op_utils): @@ -250,7 +382,7 @@ class HeatTemplateTestCase(unittest.TestCase): {'output_key': 'key2', 'output_value': 'value2'}, {'output_key': 'key3', 'output_value': 'value3'}, ] - expected_outputs = { + expected_outputs = { # pylint: disable=unused-variable 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', @@ -266,17 +398,25 @@ class HeatTemplateTestCase(unittest.TestCase): mock_status.return_value = None # no block - self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack) + self.assertIsInstance(self.template.create( + block=False, timeout=2), heat.HeatStack) # ensure op_utils was used expected_op_utils_usage += 1 - self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_session.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) + self.assertEqual( + mock_op_utils.get_heat_api_version.call_count, + expected_op_utils_usage) # ensure the constructor and instance were used - self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls) - self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls) + self.assertEqual(mock_heat_client_class.call_count, + expected_constructor_calls) + self.assertEqual( + mock_heat_client.stacks.create.call_count, + expected_create_calls) # ensure that the status was not used self.assertEqual(mock_status.call_count, expected_status_calls) @@ -288,11 +428,15 @@ class HeatTemplateTestCase(unittest.TestCase): self.template.name = 'block, immediate complete test' mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS - self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack) + self.assertIsInstance(self.template.create( + block=True, timeout=2), heat.HeatStack) # ensure existing instance was re-used and op_utils was not used - self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls) - self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls) + self.assertEqual(mock_heat_client_class.call_count, + expected_constructor_calls) + self.assertEqual( + mock_heat_client.stacks.create.call_count, + expected_create_calls) # ensure status was checked once expected_status_calls += 1 @@ -305,13 +449,17 @@ class HeatTemplateTestCase(unittest.TestCase): self.template.name = 'block, delayed complete test' success_index = 2 - mock_status.side_effect = index_value_iter(success_index, - self.template.HEAT_CREATE_COMPLETE_STATUS) - self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack) + mock_status.side_effect = index_value_iter( + success_index, self.template.HEAT_CREATE_COMPLETE_STATUS) + self.assertIsInstance(self.template.create( + block=True, timeout=2), heat.HeatStack) # ensure existing instance was re-used and op_utils was not used - self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls) - self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls) + self.assertEqual(mock_heat_client_class.call_count, + expected_constructor_calls) + self.assertEqual( + mock_heat_client.stacks.create.call_count, + expected_create_calls) # ensure status was checked three more times expected_status_calls += 1 + success_index @@ -328,8 +476,7 @@ class HeatStackTestCase(unittest.TestCase): # call once and then call again if uuid is not none self.assertGreater(delete_mock.call_count, 1) - @mock.patch('yardstick.orchestrator.heat.op_utils') - def test_delete_all_calls_delete(self, mock_op): + def test_delete_all_calls_delete(self): # we must patch the object before we create an instance # so we can override delete() in all the instances with mock.patch.object(heat.HeatStack, "delete") as delete_mock: diff --git a/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py index 1a3291c89..33fa1dca6 100644 --- a/tests/unit/orchestrator/test_kubernetes.py +++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py @@ -64,7 +64,7 @@ service ssh restart;while true ; do sleep 10000; done" } ], "nodeSelector": { - "kubernetes.io/hostname": "node-01" + "kubernetes.io/hostname": "node-01" } } } @@ -75,7 +75,7 @@ service ssh restart;while true ; do sleep 10000; done" 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \ service ssh restart;while true ; do sleep 10000; done'], 'ssh_key': 'k8s-86096c30-key', - 'nodeSelector': { 'kubernetes.io/hostname': 'node-01'} + 'nodeSelector': {'kubernetes.io/hostname': 'node-01'} } name = 'host-k8s-86096c30' output_r = KubernetesObject(name, **input_s).get_template() |