aboutsummaryrefslogtreecommitdiffstats
path: root/tests/unit
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unit')
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py111
-rw-r--r--tests/unit/benchmark/contexts/test_kubernetes.py165
-rw-r--r--tests/unit/benchmark/contexts/test_node.py44
-rw-r--r--tests/unit/benchmark/contexts/test_standalone.py45
-rw-r--r--tests/unit/benchmark/core/test_task.py67
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py26
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_command.py8
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_monitor_multi.py2
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vnf_generic.py497
-rw-r--r--tests/unit/benchmark/scenarios/storage/test_storperf.py4
-rw-r--r--tests/unit/common/test_utils.py32
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py44
-rw-r--r--tests/unit/orchestrator/test_heat.py46
-rw-r--r--tests/unit/orchestrator/test_kubernetes.py110
14 files changed, 949 insertions, 252 deletions
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index 3dadd48eb..c739f33ff 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -13,6 +13,7 @@
from __future__ import absolute_import
+import ipaddress
import logging
import os
import unittest
@@ -120,7 +121,8 @@ class HeatContextTestCase(unittest.TestCase):
mock_template.add_router_interface.assert_called_with("bar-fool-network-router-if0", "bar-fool-network-router", "bar-fool-network-subnet")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_deploy(self, mock_template):
+ @mock.patch('yardstick.benchmark.contexts.heat.get_neutron_client')
+ def test_deploy(self, mock_neutron, mock_template):
self.test_context.name = 'foo'
self.test_context.template_file = '/bar/baz/some-heat-file'
@@ -133,6 +135,59 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.heat_parameters)
self.assertIsNotNone(self.test_context.stack)
+ def test_add_server_port(self):
+ network1 = mock.MagicMock()
+ network1.vld_id = 'vld111'
+ network2 = mock.MagicMock()
+ network2.vld_id = 'vld777'
+ self.test_context.name = 'foo'
+ self.test_context.stack = mock.MagicMock()
+ self.test_context.networks = {
+ 'a': network1,
+ 'c': network2,
+ }
+ self.test_context.stack.outputs = {
+ 'b': '10.20.30.45',
+ 'b-subnet_id': 1,
+ 'foo-a-subnet-cidr': '10.20.0.0/15',
+ 'foo-a-subnet-gateway_ip': '10.20.30.1',
+ 'b-mac_address': '00:01',
+ 'b-device_id': 'dev21',
+ 'b-network_id': 'net789',
+ 'd': '40.30.20.15',
+ 'd-subnet_id': 2,
+ 'foo-c-subnet-cidr': '40.30.0.0/18',
+ 'foo-c-subnet-gateway_ip': '40.30.20.254',
+ 'd-mac_address': '00:10',
+ 'd-device_id': 'dev43',
+ 'd-network_id': 'net987',
+ }
+ server = mock.MagicMock()
+ server.ports = OrderedDict([
+ ('a', {'stack_name': 'b'}),
+ ('c', {'stack_name': 'd'}),
+ ])
+
+ expected = {
+ "private_ip": '10.20.30.45',
+ "subnet_id": 1,
+ "subnet_cidr": '10.20.0.0/15',
+ "network": '10.20.0.0',
+ "netmask": '255.254.0.0',
+ "gateway_ip": '10.20.30.1',
+ "mac_address": '00:01',
+ "device_id": 'dev21',
+ "network_id": 'net789',
+ "network_name": 'a',
+ "local_mac": '00:01',
+ "local_ip": '10.20.30.45',
+ "vld_id": 'vld111',
+ }
+ self.test_context.add_server_port(server)
+ self.assertEqual(server.private_ip, '10.20.30.45')
+ self.assertEqual(len(server.interfaces), 2)
+ self.assertDictEqual(server.interfaces['a'], expected)
+
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test_undeploy(self, mock_template):
@@ -155,3 +210,57 @@ class HeatContextTestCase(unittest.TestCase):
self.assertEqual(result['ip'], '127.0.0.1')
self.assertEqual(result['private_ip'], '10.0.0.1')
+
+ def test__get_network(self):
+ network1 = mock.MagicMock()
+ network1.name = 'net_1'
+ network1.vld_id = 'vld111'
+ network1.segmentation_id = 'seg54'
+ network1.network_type = 'type_a'
+ network1.physical_network = 'phys'
+
+ network2 = mock.MagicMock()
+ network2.name = 'net_2'
+ network2.vld_id = 'vld999'
+ network2.segmentation_id = 'seg45'
+ network2.network_type = 'type_b'
+ network2.physical_network = 'virt'
+
+ self.test_context.networks = {
+ 'a': network1,
+ 'b': network2,
+ }
+
+ attr_name = None
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld777'}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = 'vld777'
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld999'}
+ expected = {
+ "name": 'net_2',
+ "vld_id": 'vld999',
+ "segmentation_id": 'seg45',
+ "network_type": 'type_b',
+ "physical_network": 'virt',
+ }
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
+
+ attr_name = 'a'
+ expected = {
+ "name": 'net_1',
+ "vld_id": 'vld111',
+ "segmentation_id": 'seg54',
+ "network_type": 'type_a',
+ "physical_network": 'phys',
+ }
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
diff --git a/tests/unit/benchmark/contexts/test_kubernetes.py b/tests/unit/benchmark/contexts/test_kubernetes.py
new file mode 100644
index 000000000..f47c07a67
--- /dev/null
+++ b/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.contexts.kubernetes
+
+from __future__ import absolute_import
+import unittest
+import mock
+
+from yardstick.benchmark.contexts.kubernetes import KubernetesContext
+
+
+context_cfg = {
+ 'type': 'Kubernetes',
+ 'name': 'k8s',
+ 'servers': {
+ 'host': {
+ 'image': 'openretriever/yardstick',
+ 'command': '/bin/bash',
+ 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+ },
+ 'target': {
+ 'image': 'openretriever/yardstick',
+ 'command': '/bin/bash',
+ 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+ }
+ }
+}
+
+prefix = 'yardstick.benchmark.contexts.kubernetes'
+
+
+class UndeployTestCase(unittest.TestCase):
+
+ @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
+ @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
+ @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
+ def test_undeploy(self,
+ mock_delete_pods,
+ mock_delete_rcs,
+ mock_delete_ssh):
+
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context.undeploy()
+ self.assertTrue(mock_delete_ssh.called)
+ self.assertTrue(mock_delete_rcs.called)
+ self.assertTrue(mock_delete_pods.called)
+
+
+class DeployTestCase(unittest.TestCase):
+
+ @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
+ @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
+ @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
+ @mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
+ def test_deploy(self,
+ mock_set_ssh_key,
+ mock_create_rcs,
+ mock_get_rc_pods,
+ mock_wait_until_running):
+
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context.deploy()
+ self.assertTrue(mock_set_ssh_key.called)
+ self.assertTrue(mock_create_rcs.called)
+ self.assertTrue(mock_get_rc_pods.called)
+ self.assertTrue(mock_wait_until_running.called)
+
+
+class SSHKeyTestCase(unittest.TestCase):
+
+ @mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
+ @mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
+ def test_ssh_key(self, mock_create, mock_delete):
+
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context._set_ssh_key()
+ k8s_context._delete_ssh_key()
+ self.assertTrue(mock_create.called)
+ self.assertTrue(mock_delete.called)
+
+
+class WaitUntilRunningTestCase(unittest.TestCase):
+
+ @mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
+ def test_wait_until_running(self, mock_read_pod_status):
+
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context.template.pods = ['server']
+ mock_read_pod_status.return_value = 'Running'
+ k8s_context._wait_until_running()
+
+
+class GetServerTestCase(unittest.TestCase):
+
+ @mock.patch('{}.k8s_utils.get_pod_list'.format(prefix))
+ def test_get_server(self, mock_get_pod_list):
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+
+ mock_get_pod_list.return_value.items = []
+ server = k8s_context._get_server('server')
+ self.assertIsNone(server)
+
+
+class CreateRcsTestCase(unittest.TestCase):
+
+ @mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
+ def test_create_rcs(self, mock_create_rc):
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context._create_rcs()
+ self.assertTrue(mock_create_rc.called)
+
+
+class CreateRcTestCase(unittest.TestCase):
+
+ @mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
+ def test_create_rc(self, mock_create_replication_controller):
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context._create_rc({})
+ self.assertTrue(mock_create_replication_controller.called)
+
+
+class DeleteRcsTestCases(unittest.TestCase):
+
+ @mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
+ def test_delete_rcs(self, mock_delete_rc):
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context._delete_rcs()
+ self.assertTrue(mock_delete_rc.called)
+
+
+class DeleteRcTestCase(unittest.TestCase):
+
+ @mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
+ def test_delete_rc(self, mock_delete_replication_controller):
+ k8s_context = KubernetesContext()
+ k8s_context.init(context_cfg)
+ k8s_context._delete_rc({})
+ self.assertTrue(mock_delete_replication_controller.called)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/unit/benchmark/contexts/test_node.py b/tests/unit/benchmark/contexts/test_node.py
index 4b35ca421..d5ce8c5cb 100644
--- a/tests/unit/benchmark/contexts/test_node.py
+++ b/tests/unit/benchmark/contexts/test_node.py
@@ -208,6 +208,50 @@ class NodeContextTestCase(unittest.TestCase):
obj._get_client(node_name_args)
self.assertTrue(wait_mock.called)
+ def test__get_network(self):
+ network1 = {
+ 'name': 'net_1',
+ 'vld_id': 'vld111',
+ 'segmentation_id': 'seg54',
+ 'network_type': 'type_a',
+ 'physical_network': 'phys',
+ }
+ network2 = {
+ 'name': 'net_2',
+ 'vld_id': 'vld999',
+ }
+ self.test_context.networks = {
+ 'a': network1,
+ 'b': network2,
+ }
+
+ attr_name = {}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld777'}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ self.assertIsNone(self.test_context._get_network(None))
+
+ attr_name = 'vld777'
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld999'}
+ expected = {
+ "name": 'net_2',
+ "vld_id": 'vld999',
+ "segmentation_id": None,
+ "network_type": None,
+ "physical_network": None,
+ }
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
+
+ attr_name = 'a'
+ expected = network1
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
+
def main():
unittest.main()
diff --git a/tests/unit/benchmark/contexts/test_standalone.py b/tests/unit/benchmark/contexts/test_standalone.py
index 687ef7305..a6fd776e8 100644
--- a/tests/unit/benchmark/contexts/test_standalone.py
+++ b/tests/unit/benchmark/contexts/test_standalone.py
@@ -129,3 +129,48 @@ class StandaloneContextTestCase(unittest.TestCase):
curr_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(curr_path, filename)
return file_path
+
+ def test__get_network(self):
+ network1 = {
+ 'name': 'net_1',
+ 'vld_id': 'vld111',
+ 'segmentation_id': 'seg54',
+ 'network_type': 'type_a',
+ 'physical_network': 'phys',
+ }
+ network2 = {
+ 'name': 'net_2',
+ 'vld_id': 'vld999',
+ }
+ self.test_context.networks = {
+ 'a': network1,
+ 'b': network2,
+ }
+
+ attr_name = None
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld777'}
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = 'vld777'
+ self.assertIsNone(self.test_context._get_network(attr_name))
+
+ attr_name = {'vld_id': 'vld999'}
+ expected = {
+ "name": 'net_2',
+ "vld_id": 'vld999',
+ "segmentation_id": None,
+ "network_type": None,
+ "physical_network": None,
+ }
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
+
+ attr_name = 'a'
+ expected = network1
+ result = self.test_context._get_network(attr_name)
+ self.assertDictEqual(result, expected)
diff --git a/tests/unit/benchmark/core/test_task.py b/tests/unit/benchmark/core/test_task.py
index b64bb8eed..8d6d963c3 100644
--- a/tests/unit/benchmark/core/test_task.py
+++ b/tests/unit/benchmark/core/test_task.py
@@ -48,6 +48,73 @@ class TaskTestCase(unittest.TestCase):
self.assertEqual(context_cfg["target"], server_info)
@mock.patch('yardstick.benchmark.core.task.Context')
+ def test_parse_networks_from_nodes(self, mock_context):
+ nodes = {
+ 'node1': {
+ 'interfaces': {
+ 'eth0': {
+ 'name': 'mgmt',
+ },
+ 'eth1': {
+ 'name': 'external',
+ 'vld_id': '23',
+ },
+ 'eth10': {
+ 'name': 'internal',
+ 'vld_id': '55',
+ },
+ },
+ },
+ 'node2': {
+ 'interfaces': {
+ 'eth4': {
+ 'name': 'mgmt',
+ },
+ 'eth2': {
+ 'name': 'external',
+ 'vld_id': '32',
+ },
+ 'eth11': {
+ 'name': 'internal',
+ 'vld_id': '55',
+ },
+ },
+ },
+ }
+
+ mock_context.get_network.side_effect = iter([
+ None,
+ {
+ 'name': 'a',
+ 'network_type': 'private',
+ },
+ {},
+ {
+ 'name': 'b',
+ 'vld_id': 'y',
+ 'subnet_cidr': '10.20.0.0/16',
+ },
+ {
+ 'name': 'c',
+ 'vld_id': 'x',
+ },
+ {
+ 'name': 'd',
+ 'vld_id': 'w',
+ },
+ ])
+
+ expected_get_network_calls = 4 # once for each vld_id in the nodes dict
+ expected = {
+ 'a': {'name': 'a', 'network_type': 'private'},
+ 'b': {'name': 'b', 'vld_id': 'y', 'subnet_cidr': '10.20.0.0/16'},
+ }
+
+ networks = task.get_networks_from_nodes(nodes)
+ self.assertEqual(mock_context.get_network.call_count, expected_get_network_calls)
+ self.assertDictEqual(networks, expected)
+
+ @mock.patch('yardstick.benchmark.core.task.Context')
@mock.patch('yardstick.benchmark.core.task.base_runner')
def test_run(self, mock_base_runner, mock_ctx):
scenario = {
diff --git a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
index 28b27c78a..cc179602e 100644
--- a/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
+++ b/tests/unit/benchmark/scenarios/availability/test_attacker_baremetal.py
@@ -20,9 +20,7 @@ from yardstick.benchmark.scenarios.availability.attacker import \
attacker_baremetal
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
- '.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
@@ -31,17 +29,17 @@ class ExecuteShellTestCase(unittest.TestCase):
exitcode, output = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- def test__fun_execute_shell_command_fail_cmd_exception(self,
- mock_subprocess):
+ @mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG')
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
exitcode, output = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
+ mock_log.error.assert_called_once()
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal'
- '.ssh')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
+@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.ssh')
class AttackerBaremetalTestCase(unittest.TestCase):
def setUp(self):
@@ -59,28 +57,28 @@ class AttackerBaremetalTestCase(unittest.TestCase):
'host': 'node1',
}
- def test__attacker_baremetal_all_successful(self, mock_ssh):
+ def test__attacker_baremetal_all_successful(self, mock_ssh, mock_subprocess):
+ mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
- mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins.setup()
ins.inject_fault()
ins.recover()
- def test__attacker_baremetal_check_failuer(self, mock_ssh):
+ def test__attacker_baremetal_check_failuer(self, mock_ssh, mock_subprocess):
+ mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
- mock_ssh.SSH.from_node().execute.return_value = (0, "error check", '')
ins.setup()
- def test__attacker_baremetal_recover_successful(self, mock_ssh):
+ def test__attacker_baremetal_recover_successful(self, mock_ssh, mock_subprocess):
self.attacker_cfg["jump_host"] = 'node1'
self.context["node1"]["pwd"] = "123456"
+ mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins = attacker_baremetal.BaremetalAttacker(self.attacker_cfg,
self.context)
- mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins.setup()
ins.recover()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
index 2ed4be731..6a9b3b157 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_command.py
@@ -30,12 +30,14 @@ class ExecuteShellTestCase(unittest.TestCase):
exitcode, output = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- def test__fun_execute_shell_command_fail_cmd_exception(self,
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log,
mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
exitcode, output = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
+ mock_log.error.assert_called_once()
@mock.patch(
@@ -67,13 +69,15 @@ class MonitorOpenstackCmdTestCase(unittest.TestCase):
instance._result = {"outage_time": 0}
instance.verify_SLA()
- def test__monitor_command_monitor_func_failure(self, mock_subprocess):
+ @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
+ def test__monitor_command_monitor_func_failure(self, mock_log, mock_subprocess):
mock_subprocess.check_output.return_value = (1, 'unittest')
instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
mock_subprocess.check_output.side_effect = RuntimeError
ret = instance.monitor_func()
self.assertEqual(ret, False)
+ mock_log.error.assert_called_once()
instance._result = {"outage_time": 10}
instance.verify_SLA()
diff --git a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
index f8d12bd29..b59ec6cf1 100644
--- a/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
+++ b/tests/unit/benchmark/scenarios/availability/test_monitor_multi.py
@@ -36,7 +36,7 @@ class MultiMonitorServiceTestCase(unittest.TestCase):
'key': 'service-status',
'monitor_key': 'service-status',
'host': 'node1',
- 'monitor_time': 3,
+ 'monitor_time': 0.1,
'parameter': {'serviceName': 'haproxy'},
'sla': {'max_outage_time': 1}
}
diff --git a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 111e7812e..c9cd7fed5 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -91,68 +91,97 @@ STL_MOCKS = {
'stl.trex_stl_lib.zmq': mock.MagicMock(),
}
-COMPLETE_TREX_VNFD = \
- {'vnfd:vnfd-catalog':
- {'vnfd':
- [{'benchmark':
- {'kpi':
- ['rx_throughput_fps',
- 'tx_throughput_fps',
- 'tx_throughput_mbps',
- 'rx_throughput_mbps',
- 'tx_throughput_pc_linerate',
- 'rx_throughput_pc_linerate',
- 'min_latency',
- 'max_latency',
- 'avg_latency']},
- 'connection-point': [{'name': 'xe0',
- 'type': 'VPORT'},
- {'name': 'xe1',
- 'type': 'VPORT'}],
- 'description': 'TRex stateless traffic generator for RFC2544',
- 'id': 'TrexTrafficGen',
- 'mgmt-interface': {'ip': '1.1.1.1',
- 'password': 'berta',
- 'user': 'berta',
- 'vdu-id': 'trexgen-baremetal'},
- 'name': 'trexgen',
- 'short-name': 'trexgen',
- 'vdu': [{'description': 'TRex stateless traffic generator for RFC2544',
- 'external-interface':
- [{'name': 'xe0',
- 'virtual-interface': {'bandwidth': '10 Gbps',
- 'dst_ip': '1.1.1.1',
- 'dst_mac': '00:01:02:03:04:05',
- 'local_ip': '1.1.1.2',
- 'local_mac': '00:01:02:03:05:05',
- 'type': 'PCI-PASSTHROUGH',
- 'netmask': "255.255.255.0",
- 'driver': 'i40',
- 'vpci': '0000:00:10.2'},
- 'vnfd-connection-point-ref': 'xe0'},
- {'name': 'xe1',
- 'virtual-interface': {'bandwidth': '10 Gbps',
- 'dst_ip': '2.1.1.1',
- 'dst_mac': '00:01:02:03:04:06',
- 'local_ip': '2.1.1.2',
- 'local_mac': '00:01:02:03:05:06',
- 'type': 'PCI-PASSTHROUGH',
- 'netmask': "255.255.255.0",
- 'driver': 'i40',
- 'vpci': '0000:00:10.1'},
- 'vnfd-connection-point-ref': 'xe1'}],
- 'id': 'trexgen-baremetal',
- 'name': 'trexgen-baremetal'}]}]}}
+COMPLETE_TREX_VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ {
+ 'benchmark': {
+ 'kpi': [
+ 'rx_throughput_fps',
+ 'tx_throughput_fps',
+ 'tx_throughput_mbps',
+ 'rx_throughput_mbps',
+ 'tx_throughput_pc_linerate',
+ 'rx_throughput_pc_linerate',
+ 'min_latency',
+ 'max_latency',
+ 'avg_latency',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'name': 'xe0',
+ 'type': 'VPORT',
+ },
+ {
+ 'name': 'xe1',
+ 'type': 'VPORT',
+ },
+ ],
+ 'description': 'TRex stateless traffic generator for RFC2544',
+ 'id': 'TrexTrafficGen',
+ 'mgmt-interface': {
+ 'ip': '1.1.1.1',
+ 'password': 'berta',
+ 'user': 'berta',
+ 'vdu-id': 'trexgen-baremetal',
+ },
+ 'name': 'trexgen',
+ 'short-name': 'trexgen',
+ 'class-name': 'TrexTrafficGen',
+ 'vdu': [
+ {
+ 'description': 'TRex stateless traffic generator for RFC2544',
+ 'external-interface': [
+ {
+ 'name': 'xe0',
+ 'virtual-interface': {
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '1.1.1.1',
+ 'dst_mac': '00:01:02:03:04:05',
+ 'local_ip': '1.1.1.2',
+ 'local_mac': '00:01:02:03:05:05',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': "255.255.255.0",
+ 'driver': 'i40',
+ 'vpci': '0000:00:10.2',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ },
+ {
+ 'name': 'xe1',
+ 'virtual-interface': {
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '2.1.1.1',
+ 'dst_mac': '00:01:02:03:04:06',
+ 'local_ip': '2.1.1.2',
+ 'local_mac': '00:01:02:03:05:06',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': "255.255.255.0",
+ 'driver': 'i40',
+ 'vpci': '0000:00:10.1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ },
+ ],
+ 'id': 'trexgen-baremetal',
+ 'name': 'trexgen-baremetal',
+ },
+ ],
+ },
+ ],
+ },
+}
IP_ADDR_SHOW = """
-28: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP """
-"""group default qlen 1000
+28: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP \
+group default qlen 1000
link/ether 90:e2:ba:a7:6a:c8 brd ff:ff:ff:ff:ff:ff
inet 1.1.1.1/8 brd 1.255.255.255 scope global eth1
inet6 fe80::92e2:baff:fea7:6ac8/64 scope link
valid_lft forever preferred_lft forever
-29: eth5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP """
-"""group default qlen 1000
+29: eth5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP \
+group default qlen 1000
link/ether 90:e2:ba:a7:6a:c9 brd ff:ff:ff:ff:ff:ff
inet 2.1.1.1/8 brd 2.255.255.255 scope global eth5
inet6 fe80::92e2:baff:fea7:6ac9/64 scope link tentative
@@ -160,10 +189,10 @@ IP_ADDR_SHOW = """
"""
SYS_CLASS_NET = """
-lrwxrwxrwx 1 root root 0 sie 10 14:16 eth1 -> """
-"""../../devices/pci0000:80/0000:80:02.2/0000:84:00.1/net/eth1
-lrwxrwxrwx 1 root root 0 sie 3 10:37 eth2 -> """
-"""../../devices/pci0000:00/0000:00:01.1/0000:84:00.2/net/eth5
+lrwxrwxrwx 1 root root 0 sie 10 14:16 eth1 -> \
+../../devices/pci0000:80/0000:80:02.2/0000:84:00.1/net/eth1
+lrwxrwxrwx 1 root root 0 sie 3 10:37 eth2 -> \
+../../devices/pci0000:00/0000:00:01.1/0000:84:00.2/net/eth5
"""
TRAFFIC_PROFILE = {
@@ -174,137 +203,195 @@ TRAFFIC_PROFILE = {
"traffic_type": "FixedTraffic",
"frame_rate": 100, # pps
"flow_number": 10,
- "frame_size": 64}}
+ "frame_size": 64,
+ },
+}
class TestNetworkServiceTestCase(unittest.TestCase):
def setUp(self):
- self.context_cfg = \
- {'nodes':
- {'trexgen__1': {'role': 'TrafficGen',
- 'name': 'trafficgen_1.yardstick',
- 'ip': '10.10.10.11',
- 'interfaces':
- {'xe0':
- {'netmask': '255.255.255.0',
- 'local_ip': '152.16.100.20',
- 'local_mac': '00:00:00:00:00:01',
- 'driver': 'i40e',
- 'vpci': '0000:07:00.0',
- 'dpdk_port_num': 0},
- 'xe1':
- {'netmask': '255.255.255.0',
- 'local_ip': '152.16.40.20',
- 'local_mac': '00:00:00:00:00:02',
- 'driver': 'i40e',
- 'vpci': '0000:07:00.1',
- 'dpdk_port_num': 1}},
- 'password': 'r00t',
- 'user': 'root'},
- 'trexvnf__1': {'name': 'vnf.yardstick',
- 'ip': '10.10.10.12',
- 'interfaces':
- {'xe0':
- {'netmask': '255.255.255.0',
- 'local_ip': '152.16.100.19',
- 'local_mac': '00:00:00:00:00:03',
- 'driver': 'i40e',
- 'vpci': '0000:07:00.0',
- 'dpdk_port_num': 0},
- 'xe1': {'netmask': '255.255.255.0',
- 'local_ip': '152.16.40.19',
- 'local_mac': '00:00:00:00:00:04',
- 'driver': 'i40e',
- 'vpci': '0000:07:00.1',
- 'dpdk_port_num': 1}},
- 'routing_table': [{'netmask': '255.255.255.0',
- 'gateway': '152.16.100.20',
- 'network': '152.16.100.20',
- 'if': 'xe0'},
- {'netmask': '255.255.255.0',
- 'gateway': '152.16.40.20',
- 'network': '152.16.40.20',
- 'if': 'xe1'}],
- 'host': '10.223.197.164',
- 'role': 'vnf',
- 'user': 'root',
- 'nd_route_tbl':
- [{'netmask': '112',
- 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
- 'network': '0064:ff9b:0:0:0:0:9810:6414',
- 'if': 'xe0'},
- {'netmask': '112',
- 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
- 'network': '0064:ff9b:0:0:0:0:9810:2814',
- 'if': 'xe1'}],
- 'password': 'r00t'}}}
+ self.trexgen__1 = {
+ 'name': 'trafficgen_1.yardstick',
+ 'ip': '10.10.10.11',
+ 'role': 'TrafficGen',
+ 'user': 'root',
+ 'password': 'r00t',
+ 'interfaces': {
+ 'xe0': {
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:07:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'driver': 'i40e',
+ 'vpci': '0000:07:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ }
+
+ self.trexvnf__1 = {
+ 'name': 'vnf.yardstick',
+ 'ip': '10.10.10.12',
+ 'host': '10.223.197.164',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'password': 'r00t',
+ 'interfaces': {
+ 'xe0': {
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'local_mac': '00:00:00:00:00:03',
+ 'driver': 'i40e',
+ 'vpci': '0000:07:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'local_mac': '00:00:00:00:00:04',
+ 'driver': 'i40e',
+ 'vpci': '0000:07:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ }
+
+ self.context_cfg = {
+ 'nodes': {
+ 'trexgen__1': self.trexgen__1,
+ 'trexvnf__1': self.trexvnf__1,
+ },
+ 'networks': {
+ 'private': {
+ 'vld_id': 'private',
+ },
+ 'public': {
+ 'vld_id': 'public',
+ },
+ },
+ }
+
+ self.vld0 = {
+ 'vnfd-connection-point-ref': [
+ {
+ 'vnfd-connection-point-ref': 'xe0',
+ 'member-vnf-index-ref': '1',
+ 'vnfd-id-ref': 'trexgen'
+ },
+ {
+ 'vnfd-connection-point-ref': 'xe0',
+ 'member-vnf-index-ref': '2',
+ 'vnfd-id-ref': 'trexgen'
+ }
+ ],
+ 'type': 'ELAN',
+ 'id': 'private',
+ 'name': 'trexgen__1 to trexvnf__1 link 1'
+ }
+
+ self.vld1 = {
+ 'vnfd-connection-point-ref': [
+ {
+ 'vnfd-connection-point-ref': 'xe1',
+ 'member-vnf-index-ref': '1',
+ 'vnfd-id-ref': 'trexgen'
+ },
+ {
+ 'vnfd-connection-point-ref': 'xe1',
+ 'member-vnf-index-ref': '2',
+ 'vnfd-id-ref': 'trexgen'
+ }
+ ],
+ 'type': 'ELAN',
+ 'id': 'public',
+ 'name': 'trexvnf__1 to trexgen__1 link 2'
+ }
self.topology = {
+ 'id': 'trex-tg-topology',
'short-name': 'trex-tg-topology',
- 'constituent-vnfd':
- [{'member-vnf-index': '1',
- 'VNF model': 'tg_trex_tpl.yaml',
- 'vnfd-id-ref': 'trexgen__1'},
- {'member-vnf-index': '2',
- 'VNF model': 'tg_trex_tpl.yaml',
- 'vnfd-id-ref': 'trexvnf__1'}],
- 'description': 'trex-tg-topology',
'name': 'trex-tg-topology',
- 'vld': [
+ 'description': 'trex-tg-topology',
+ 'constituent-vnfd': [
{
- 'vnfd-connection-point-ref': [
- {
- 'vnfd-connection-point-ref': 'xe0',
- 'member-vnf-index-ref': '1',
- 'vnfd-id-ref': 'trexgen'
- },
- {
- 'vnfd-connection-point-ref': 'xe0',
- 'member-vnf-index-ref': '2',
- 'vnfd-id-ref': 'trexgen'
- }
- ],
- 'type': 'ELAN',
- 'id': 'private',
- 'name': 'trexgen__1 to trexvnf__1 link 1'
+ 'member-vnf-index': '1',
+ 'VNF model': 'tg_trex_tpl.yaml',
+ 'vnfd-id-ref': 'trexgen__1',
},
{
- 'vnfd-connection-point-ref': [
- {
- 'vnfd-connection-point-ref': 'xe1',
- 'member-vnf-index-ref': '1',
- 'vnfd-id-ref': 'trexgen'
- },
- {
- 'vnfd-connection-point-ref': 'xe1',
- 'member-vnf-index-ref': '2',
- 'vnfd-id-ref': 'trexgen'
- }
- ],
- 'type': 'ELAN',
- 'id': 'public',
- 'name': 'trexvnf__1 to trexgen__1 link 2'
- }],
- 'id': 'trex-tg-topology',
+ 'member-vnf-index': '2',
+ 'VNF model': 'tg_trex_tpl.yaml',
+ 'vnfd-id-ref': 'trexvnf__1',
+ },
+ ],
+ 'vld': [self.vld0, self.vld1],
}
self.scenario_cfg = {
'task_path': "",
- 'tc_options': {'rfc2544': {'allowed_drop_rate': '0.8 - 1'}},
+ "topology": self._get_file_abspath("vpe_vnf_topology.yaml"),
'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
'tc': 'tc_ipv4_1Mflow_64B_packetsize',
- 'runner': {'object': 'NetworkServiceTestCase',
- 'interval': 35,
- 'output_filename': 'yardstick.out',
- 'runner_id': 74476,
- 'duration': 400, 'type': 'Duration'},
'traffic_profile': 'ipv4_throughput_vpe.yaml',
- 'traffic_options': {'flow': 'ipv4_1flow_Packets_vpe.yaml',
- 'imix': 'imix_voice.yaml'}, 'type': 'ISB',
- 'nodes': {'tg__2': 'trafficgen_2.yardstick',
- 'tg__1': 'trafficgen_1.yardstick',
- 'vnf__1': 'vnf.yardstick'},
- "topology": self._get_file_abspath("vpe_vnf_topology.yaml")}
+ 'type': 'ISB',
+ 'tc_options': {
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ },
+ 'runner': {
+ 'object': 'NetworkServiceTestCase',
+ 'interval': 35,
+ 'output_filename': 'yardstick.out',
+ 'runner_id': 74476,
+ 'duration': 400,
+ 'type': 'Duration',
+ },
+ 'traffic_options': {
+ 'flow': 'ipv4_1flow_Packets_vpe.yaml',
+ 'imix': 'imix_voice.yaml'
+ },
+ 'nodes': {
+ 'tg__2': 'trafficgen_2.yardstick',
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick',
+ },
+ }
self.s = NetworkServiceTestCase(self.scenario_cfg, self.context_cfg)
@@ -339,10 +426,18 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.assertEqual({}, self.s._get_traffic_flow(self.scenario_cfg))
def test_get_vnf_imp(self):
- vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]['class-name']
with mock.patch.dict("sys.modules", STL_MOCKS):
self.assertIsNotNone(self.s.get_vnf_impl(vnfd))
+ with self.assertRaises(IncorrectConfig) as raised:
+ self.s.get_vnf_impl('NonExistentClass')
+
+ exc_str = str(raised.exception)
+ print(exc_str)
+ self.assertIn('No implementation', exc_str)
+ self.assertIn('found in', exc_str)
+
def test_load_vnf_models_invalid(self):
self.context_cfg["nodes"]['trexgen__1']['VNF model'] = \
self._get_file_abspath("tg_trex_tpl.yaml")
@@ -363,10 +458,10 @@ class TestNetworkServiceTestCase(unittest.TestCase):
ssh.from_node.return_value = ssh_mock
self.s.map_topology_to_infrastructure(self.context_cfg,
self.topology)
- self.assertEqual("tg_trex_tpl.yaml",
- self.context_cfg["nodes"]['trexgen__1']['VNF model'])
- self.assertEqual("tg_trex_tpl.yaml",
- self.context_cfg["nodes"]['trexvnf__1']['VNF model'])
+
+ nodes = self.context_cfg["nodes"]
+ self.assertEqual("tg_trex_tpl.yaml", nodes['trexgen__1']['VNF model'])
+ self.assertEqual("tg_trex_tpl.yaml", nodes['trexvnf__1']['VNF model'])
def test_map_topology_to_infrastructure_insufficient_nodes(self):
del self.context_cfg['nodes']['trexvnf__1']
@@ -376,9 +471,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
mock.Mock(return_value=(1, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
ssh.from_node.return_value = ssh_mock
- self.assertRaises(IncorrectSetup,
- self.s.map_topology_to_infrastructure,
- self.context_cfg, self.topology)
+ with self.assertRaises(IncorrectSetup):
+ self.s.map_topology_to_infrastructure(self.context_cfg, self.topology)
def test_map_topology_to_infrastructure_config_invalid(self):
cfg = dict(self.context_cfg)
@@ -389,9 +483,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
ssh.from_node.return_value = ssh_mock
- self.assertRaises(IncorrectConfig,
- self.s.map_topology_to_infrastructure,
- self.context_cfg, self.topology)
+ with self.assertRaises(IncorrectConfig):
+ self.s.map_topology_to_infrastructure(self.context_cfg, self.topology)
def test__resolve_topology_invalid_config(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -400,14 +493,32 @@ class TestNetworkServiceTestCase(unittest.TestCase):
mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
ssh.from_node.return_value = ssh_mock
- del self.context_cfg['nodes']
- self.assertRaises(IncorrectConfig, self.s._resolve_topology,
- self.context_cfg, self.topology)
+ # purge an important key from the data structure
+ for interface in self.trexgen__1['interfaces'].values():
+ del interface['local_mac']
+
+ with self.assertRaises(IncorrectConfig) as raised:
+ self.s._resolve_topology(self.context_cfg, self.topology)
+
+ self.assertIn('not found', str(raised.exception))
+
+ # make a connection point ref with 3 points
+ self.vld0['vnfd-connection-point-ref'].append(
+ self.vld0['vnfd-connection-point-ref'][0])
+
+ with self.assertRaises(IncorrectConfig) as raised:
+ self.s._resolve_topology(self.context_cfg, self.topology)
+
+ self.assertIn('wrong number of endpoints', str(raised.exception))
+
+ # make a connection point ref with 1 point
+ self.vld0['vnfd-connection-point-ref'] = \
+ self.vld0['vnfd-connection-point-ref'][:1]
+
+ with self.assertRaises(IncorrectConfig) as raised:
+ self.s._resolve_topology(self.context_cfg, self.topology)
- self.topology['vld'][0]['vnfd-connection-point-ref'].append(
- self.topology['vld'][0]['vnfd-connection-point-ref'])
- self.assertRaises(IncorrectConfig, self.s._resolve_topology,
- self.context_cfg, self.topology)
+ self.assertIn('wrong number of endpoints', str(raised.exception))
def test_run(self):
tgen = mock.Mock(autospec=GenericTrafficGen)
@@ -462,8 +573,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
def test__get_traffic_profile_exception(self):
cfg = dict(self.scenario_cfg)
cfg["traffic_profile"] = ""
- self.assertRaises(IOError, self.s._get_traffic_profile, cfg,
- self.context_cfg)
+ with self.assertRaises(IOError):
+ self.s._get_traffic_profile(cfg, self.context_cfg)
def test___get_traffic_imix_exception(self):
cfg = dict(self.scenario_cfg)
diff --git a/tests/unit/benchmark/scenarios/storage/test_storperf.py b/tests/unit/benchmark/scenarios/storage/test_storperf.py
index 00054d531..7b16bb37d 100644
--- a/tests/unit/benchmark/scenarios/storage/test_storperf.py
+++ b/tests/unit/benchmark/scenarios/storage/test_storperf.py
@@ -130,7 +130,7 @@ class StorPerfTestCase(unittest.TestCase):
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
- "query_interval": 10,
+ "query_interval": 0,
"timeout": 60
}
@@ -160,7 +160,7 @@ class StorPerfTestCase(unittest.TestCase):
"queue_depths": 4,
"workload": "rs",
"StorPerf_ip": "192.168.23.2",
- "query_interval": 10,
+ "query_interval": 0,
"timeout": 60
}
diff --git a/tests/unit/common/test_utils.py b/tests/unit/common/test_utils.py
index 7f260cfe6..e21e5fa3a 100644
--- a/tests/unit/common/test_utils.py
+++ b/tests/unit/common/test_utils.py
@@ -163,6 +163,38 @@ class TranslateToStrTestCase(unittest.TestCase):
self.assertEqual(result, output_str)
+class ChangeObjToDictTestCase(unittest.TestCase):
+
+ def test_change_obj_to_dict(self):
+ class A(object):
+ def __init__(self):
+ self.name = 'yardstick'
+
+ obj = A()
+ obj_r = utils.change_obj_to_dict(obj)
+ obj_s = {'name': 'yardstick'}
+ self.assertEqual(obj_r, obj_s)
+
+
+class SetDictValueTestCase(unittest.TestCase):
+
+ def test_set_dict_value(self):
+ input_dic = {
+ 'hello': 'world'
+ }
+ output_dic = utils.set_dict_value(input_dic, 'welcome.to', 'yardstick')
+ self.assertEqual(output_dic.get('welcome', {}).get('to'), 'yardstick')
+
+
+class RemoveFileTestCase(unittest.TestCase):
+
+ def test_remove_file(self):
+ try:
+ utils.remove_file('notexistfile.txt')
+ except Exception as e:
+ self.assertTrue(isinstance(e, OSError))
+
+
def main():
unittest.main()
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
index b69e537aa..54934c2fe 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -16,17 +16,20 @@
#
from __future__ import absolute_import
+
+import os
import unittest
+
import mock
-import os
-from yardstick.network_services.vnf_generic.vnf.vpe_vnf import VpeApproxVnf
-from yardstick.network_services.vnf_generic.vnf import vpe_vnf
from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.vnf_generic.vnf import vpe_vnf
from yardstick.network_services.vnf_generic.vnf.base import \
QueueFileWrapper
+from yardstick.network_services.vnf_generic.vnf.vpe_vnf import VpeApproxVnf
+@mock.patch('yardstick.network_services.vnf_generic.vnf.vpe_vnf.time')
class TestVpeApproxVnf(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
@@ -218,12 +221,12 @@ class TestVpeApproxVnf(unittest.TestCase):
'password': 'r00t',
'VNF model': 'vpe_vnf.yaml'}}}
- def test___init__(self):
+ def test___init__(self, mock_time):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
vpe_approx_vnf = VpeApproxVnf(vnfd)
self.assertIsNone(vpe_approx_vnf._vnf_process)
- def test_collect_kpi(self):
+ def test_collect_kpi(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -235,15 +238,17 @@ class TestVpeApproxVnf(unittest.TestCase):
vpe_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
vpe_approx_vnf.resource.check_if_sa_running = \
mock.Mock(return_value=[0, 1])
- vpe_approx_vnf.resource.amqp_collect_nfvi_kpi= \
+ vpe_approx_vnf.resource.amqp_collect_nfvi_kpi = \
mock.Mock(return_value={})
result = {'pkt_in_down_stream': 0,
'pkt_in_up_stream': 0,
'collect_stats': {'core': {}},
'pkt_drop_down_stream': 0, 'pkt_drop_up_stream': 0}
- self.assertEqual(result, vpe_approx_vnf.collect_kpi())
+ # mock execute_command because it sleeps for 3 seconds.
+ with mock.patch.object(vpe_approx_vnf, "execute_command", return_value=""):
+ self.assertEqual(result, vpe_approx_vnf.collect_kpi())
- def test_execute_command(self):
+ def test_execute_command(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -255,7 +260,7 @@ class TestVpeApproxVnf(unittest.TestCase):
cmd = "quit"
self.assertEqual("", vpe_approx_vnf.execute_command(cmd))
- def test_get_stats_vpe(self):
+ def test_get_stats_vpe(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -270,7 +275,7 @@ class TestVpeApproxVnf(unittest.TestCase):
'pkt_drop_down_stream': 400, 'pkt_drop_up_stream': 600}
self.assertEqual(result, vpe_approx_vnf.get_stats_vpe())
- def test_run_vpe(self):
+ def test_run_vpe(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
@@ -288,7 +293,7 @@ class TestVpeApproxVnf(unittest.TestCase):
self.assertEqual(None,
vpe_approx_vnf._run_vpe(queue_wrapper, vpe_vnf))
- def test_instantiate(self):
+ def test_instantiate(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -301,11 +306,12 @@ class TestVpeApproxVnf(unittest.TestCase):
vpe_approx_vnf._run_vpe = mock.Mock(return_value=0)
vpe_approx_vnf._resource_collect_start = mock.Mock(return_value=0)
vpe_approx_vnf.q_out.put("pipeline>")
- vpe_vnf.WAIT_TIME = 3
- self.assertEqual(0, vpe_approx_vnf.instantiate(self.scenario_cfg,
- self.context_cfg))
+ vpe_vnf.WAIT_TIME = 0.1
+ # if process it still running exitcode will be None
+ self.assertIn(vpe_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg),
+ {0, None})
- def test_instantiate_panic(self):
+ def test_instantiate_panic(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -316,17 +322,17 @@ class TestVpeApproxVnf(unittest.TestCase):
vpe_approx_vnf = VpeApproxVnf(vnfd)
self.scenario_cfg['vnf_options'] = {'vpe': {'cfg': ""}}
vpe_approx_vnf._run_vpe = mock.Mock(return_value=0)
- vpe_vnf.WAIT_TIME = 1
+ vpe_vnf.WAIT_TIME = 0.1
self.assertRaises(RuntimeError, vpe_approx_vnf.instantiate,
self.scenario_cfg, self.context_cfg)
- def test_scale(self):
+ def test_scale(self, mock_time):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
vpe_approx_vnf = VpeApproxVnf(vnfd)
flavor = ""
self.assertRaises(NotImplementedError, vpe_approx_vnf.scale, flavor)
- def test_setup_vnf_environment(self):
+ def test_setup_vnf_environment(self, mock_time):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@@ -338,7 +344,7 @@ class TestVpeApproxVnf(unittest.TestCase):
self.assertEqual(None,
vpe_approx_vnf.setup_vnf_environment(ssh_mock))
- def test_terminate(self):
+ def test_terminate(self, mock_time):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
vpe_approx_vnf = VpeApproxVnf(vnfd)
self.assertEqual(None, vpe_approx_vnf.terminate())
diff --git a/tests/unit/orchestrator/test_heat.py b/tests/unit/orchestrator/test_heat.py
index 3b3873301..3dc8ad7e7 100644
--- a/tests/unit/orchestrator/test_heat.py
+++ b/tests/unit/orchestrator/test_heat.py
@@ -11,6 +11,7 @@
# Unittest for yardstick.benchmark.orchestrator.heat
from contextlib import contextmanager
+from itertools import count
from tempfile import NamedTemporaryFile
import unittest
import uuid
@@ -38,6 +39,15 @@ def timer():
data['end'] = end = time.time()
data['delta'] = end - start
+
+def index_value_iter(index, index_value, base_value=None):
+ for current_index in count():
+ if current_index == index:
+ yield index_value
+ else:
+ yield base_value
+
+
def get_error_message(error):
try:
# py2
@@ -249,7 +259,7 @@ class HeatTemplateTestCase(unittest.TestCase):
@mock_patch_target_module('op_utils')
@mock_patch_target_module('heatclient.client.Client')
def test_create(self, mock_heat_client_class, mock_op_utils):
- self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+ self.template.HEAT_WAIT_LOOP_INTERVAL = 0.2
mock_heat_client = mock_heat_client_class()
# populate attributes of the constructed mock
@@ -270,12 +280,11 @@ class HeatTemplateTestCase(unittest.TestCase):
expected_op_utils_usage = 0
with mock.patch.object(self.template, 'status') as mock_status:
- # no block
- with timer() as time_data:
- self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
+ self.template.name = 'no block test'
+ mock_status.return_value = None
- # ensure runtime is much less than one interval
- self.assertLess(time_data['delta'], interval * 0.2)
+ # no block
+ self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
# ensure op_utils was used
expected_op_utils_usage += 1
@@ -296,12 +305,10 @@ class HeatTemplateTestCase(unittest.TestCase):
self.assertEqual(self.template.outputs, {})
# block with immediate complete
- mock_status.return_value = u'CREATE_COMPLETE'
- with timer() as time_data:
- self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+ self.template.name = 'block, immediate complete test'
- # ensure runtime is less than one interval
- self.assertLess(time_data['delta'], interval * 0.2)
+ mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
# ensure existing instance was re-used and op_utils was not used
expected_create_calls += 1
@@ -319,14 +326,12 @@ class HeatTemplateTestCase(unittest.TestCase):
self.template.outputs = None
# block with delayed complete
- mock_status.side_effect = iter([None, None, u'CREATE_COMPLETE'])
- with timer() as time_data:
- self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+ self.template.name = 'block, delayed complete test'
- # ensure runtime is approximately two intervals
- expected_time_low = interval * 1.8
- expected_time_high = interval * 2.2
- self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+ success_index = 2
+ mock_status.side_effect = index_value_iter(success_index,
+ self.template.HEAT_CREATE_COMPLETE_STATUS)
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
# ensure existing instance was re-used and op_utils was not used
expected_create_calls += 1
@@ -334,7 +339,7 @@ class HeatTemplateTestCase(unittest.TestCase):
self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
# ensure status was checked three more times
- expected_status_calls += 3
+ expected_status_calls += 1 + success_index
self.assertEqual(mock_status.call_count, expected_status_calls)
@@ -348,7 +353,8 @@ class HeatStackTestCase(unittest.TestCase):
# call once and then call again if uuid is not none
self.assertGreater(delete_mock.call_count, 1)
- def test_delete_all_calls_delete(self):
+ @mock.patch('yardstick.orchestrator.heat.op_utils')
+ def test_delete_all_calls_delete(self, mock_op):
stack = heat.HeatStack('test')
stack.uuid = 1
with mock.patch.object(stack, "delete") as delete_mock:
diff --git a/tests/unit/orchestrator/test_kubernetes.py b/tests/unit/orchestrator/test_kubernetes.py
new file mode 100644
index 000000000..51718ab86
--- /dev/null
+++ b/tests/unit/orchestrator/test_kubernetes.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Intel Corporation
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.orchestrator.heat
+import unittest
+import mock
+
+from yardstick.orchestrator.kubernetes import KubernetesObject
+from yardstick.orchestrator.kubernetes import KubernetesTemplate
+
+
+class GetTemplateTestCase(unittest.TestCase):
+
+ def test_get_template(self):
+ output_t = {
+ "apiVersion": "v1",
+ "kind": "ReplicationController",
+ "metadata": {
+ "name": "host-k8s-86096c30"
+ },
+ "spec": {
+ "replicas": 1,
+ "template": {
+ "metadata": {
+ "labels": {
+ "app": "host-k8s-86096c30"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "args": [
+ "-c",
+ "chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done"
+ ],
+ "command": [
+ "/bin/bash"
+ ],
+ "image": "openretriever/yardstick",
+ "name": "host-k8s-86096c30-container",
+ "volumeMounts": [
+ {
+ "mountPath": "/root/.ssh/",
+ "name": "k8s-86096c30-key"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "name": "k8s-86096c30-key"
+ },
+ "name": "k8s-86096c30-key"
+ }
+ ]
+ }
+ }
+ }
+ }
+ input_s = {
+ 'command': '/bin/bash',
+ 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done'],
+ 'ssh_key': 'k8s-86096c30-key'
+ }
+ name = 'host-k8s-86096c30'
+ output_r = KubernetesObject(name, **input_s).get_template()
+ self.assertEqual(output_r, output_t)
+
+
+class GetRcPodsTestCase(unittest.TestCase):
+
+ @mock.patch('yardstick.orchestrator.kubernetes.k8s_utils.get_pod_list')
+ def test_get_rc_pods(self, mock_get_pod_list):
+ servers = {
+ 'host': {
+ 'image': 'openretriever/yardstick',
+ 'command': '/bin/bash',
+ 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+ },
+ 'target': {
+ 'image': 'openretriever/yardstick',
+ 'command': '/bin/bash',
+ 'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
+service ssh restart;while true ; do sleep 10000; done']
+ }
+ }
+ k8s_template = KubernetesTemplate('k8s-86096c30', servers)
+ mock_get_pod_list.return_value.items = []
+ pods = k8s_template.get_rc_pods()
+ self.assertEqual(pods, [])
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()