aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/unit/network_services/helpers/test_dpdkbindnic_helper.py402
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py19
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py27
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py2
-rw-r--r--yardstick/benchmark/contexts/base.py23
-rw-r--r--yardstick/benchmark/contexts/dummy.py24
-rw-r--r--yardstick/benchmark/contexts/heat.py80
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py3
-rw-r--r--yardstick/benchmark/contexts/node.py8
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py3
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py3
-rw-r--r--yardstick/benchmark/core/__init__.py1
-rw-r--r--yardstick/benchmark/core/task.py236
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py183
-rw-r--r--yardstick/cmd/commands/task.py9
-rw-r--r--yardstick/common/exceptions.py20
-rw-r--r--yardstick/common/utils.py15
-rw-r--r--yardstick/error.py48
-rw-r--r--yardstick/network_services/constants.py17
-rw-r--r--yardstick/network_services/helpers/dpdkbindnic_helper.py313
-rw-r--r--yardstick/network_services/utils.py1
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py3
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py82
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py61
-rw-r--r--yardstick/orchestrator/heat.py37
-rw-r--r--yardstick/ssh.py68
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py70
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py58
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_dummy.py52
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py275
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_kubernetes.py9
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_node.py191
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py277
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py189
-rw-r--r--yardstick/tests/unit/common/test_utils.py62
-rw-r--r--yardstick/tests/unit/orchestrator/test_heat.py90
-rw-r--r--yardstick/tests/unit/test_ssh.py54
38 files changed, 2098 insertions, 921 deletions
diff --git a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
index e30aee854..cc980640c 100644
--- a/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
+++ b/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
@@ -16,6 +16,14 @@
import mock
import unittest
+
+import os
+
+from yardstick.error import IncorrectConfig, SSHError
+from yardstick.error import IncorrectNodeSetup
+from yardstick.error import IncorrectSetup
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkInterface
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkNode
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelperException
from yardstick.network_services.helpers.dpdkbindnic_helper import NETWORK_KERNEL
@@ -26,7 +34,269 @@ from yardstick.network_services.helpers.dpdkbindnic_helper import NETWORK_OTHER
from yardstick.network_services.helpers.dpdkbindnic_helper import CRYPTO_OTHER
+NAME = "tg_0"
+
+
+class TestDpdkInterface(unittest.TestCase):
+
+ SAMPLE_NETDEVS = {
+ 'enp11s0': {
+ 'address': '0a:de:ad:be:ef:f5',
+ 'device': '0x1533',
+ 'driver': 'igb',
+ 'ifindex': '2',
+ 'interface_name': 'enp11s0',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:0b:00.0',
+ 'subsystem_device': '0x1533',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ },
+ 'lan': {
+ 'address': '0a:de:ad:be:ef:f4',
+ 'device': '0x153a',
+ 'driver': 'e1000e',
+ 'ifindex': '3',
+ 'interface_name': 'lan',
+ 'operstate': 'up',
+ 'pci_bus_id': '0000:00:19.0',
+ 'subsystem_device': '0x153a',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ }
+ }
+
+ SAMPLE_VM_NETDEVS = {
+ 'eth1': {
+ 'address': 'fa:de:ad:be:ef:5b',
+ 'device': '0x0001',
+ 'driver': 'virtio_net',
+ 'ifindex': '3',
+ 'interface_name': 'eth1',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:00:04.0',
+ 'vendor': '0x1af4'
+ }
+ }
+
+ def test_parse_netdev_info(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/ifindex:2
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/address:0a:de:ad:be:ef:f5
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/operstate:down
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/driver:igb
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/pci_bus_id:0000:0b:00.0
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/ifindex:3
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/address:0a:de:ad:be:ef:f4
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/operstate:up
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/driver:e1000e
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/pci_bus_id:0000:00:19.0
+"""
+ res = DpdkBindHelper.parse_netdev_info(output)
+ self.assertDictEqual(res, self.SAMPLE_NETDEVS)
+
+ def test_parse_netdev_info_virtio(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/ifindex:3
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/address:fa:de:ad:be:ef:5b
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/operstate:down
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/vendor:0x1af4
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/device:0x0001
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/driver:virtio_net
+"""
+ res = DpdkBindHelper.parse_netdev_info(output)
+ self.assertDictEqual(res, self.SAMPLE_VM_NETDEVS)
+
+ def test_probe_missing_values(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs = self.SAMPLE_NETDEVS.copy()
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ dpdk_intf.probe_missing_values()
+ self.assertEqual(interface['vpci'], '0000:0b:00.0')
+
+ interface['local_mac'] = '0a:de:ad:be:ef:f4'
+ dpdk_intf.probe_missing_values()
+ self.assertEqual(interface['vpci'], '0000:00:19.0')
+
+ def test_probe_missing_values_no_update(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs = self.SAMPLE_NETDEVS.copy()
+ del mock_dpdk_node.netdevs['enp11s0']['driver']
+ del mock_dpdk_node.netdevs['lan']['driver']
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ dpdk_intf.probe_missing_values()
+ self.assertNotIn('vpci', interface)
+ self.assertNotIn('driver', interface)
+
+ def test_probe_missing_values_negative(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs.values.side_effect = IncorrectNodeSetup
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ with self.assertRaises(IncorrectConfig):
+ dpdk_intf.probe_missing_values()
+
+
+class TestDpdkNode(unittest.TestCase):
+
+ INTERFACES = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci10',
+ }},
+ {'name': 'name2',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci2',
+ }},
+ {'name': 'name3',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'some-pci1',
+ }},
+ ]
+
+ def test_probe_dpdk_drivers(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ interfaces = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci10',
+ }},
+ {'name': 'name2',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci2',
+ }},
+ {'name': 'name3',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'some-pci1',
+ }},
+ ]
+
+ dpdk_node = DpdkNode(NAME, interfaces, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+
+ dpdk_helper.probe_real_kernel_drivers = mock.Mock()
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'pci1': 'driver1',
+ 'pci2': 'driver2',
+ 'pci3': 'driver3',
+ 'pci4': 'driver1',
+ 'pci6': 'driver3',
+ }
+
+ dpdk_node._probe_dpdk_drivers()
+ self.assertNotIn('driver', interfaces[0]['virtual-interface'])
+ self.assertEqual(interfaces[1]['virtual-interface']['driver'], 'driver2')
+ self.assertEqual(interfaces[2]['virtual-interface']['driver'], 'driver1')
+
+ def test_check(self):
+ def update():
+ if not mock_force_rebind.called:
+ raise IncorrectConfig
+
+ interfaces[0]['virtual-interface'].update({
+ 'vpci': '0000:01:02.1',
+ 'local_ip': '10.20.30.40',
+ 'netmask': '255.255.0.0',
+ 'driver': 'ixgbe',
+ })
+
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ interfaces = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ }},
+ ]
+
+ dpdk_node = DpdkNode(NAME, interfaces, mock_ssh_helper)
+ dpdk_node._probe_missing_values = mock_probe_missing = mock.Mock(side_effect=update)
+ dpdk_node._force_rebind = mock_force_rebind = mock.Mock()
+
+ self.assertIsNone(dpdk_node.check())
+ self.assertEqual(mock_probe_missing.call_count, 2)
+
+ @mock.patch('yardstick.network_services.helpers.dpdkbindnic_helper.DpdkInterface')
+ def test_check_negative(self, mock_intf_type):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ mock_intf_type().check.side_effect = SSHError
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+
+ with self.assertRaises(IncorrectSetup):
+ dpdk_node.check()
+
+ def test_probe_netdevs(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ expected = {'key1': 500, 'key2': 'hello world'}
+ update = {'key1': 1000, 'key3': []}
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.find_net_devices = mock.Mock(side_effect=[expected, update])
+
+ self.assertDictEqual(dpdk_node.netdevs, {})
+ dpdk_node._probe_netdevs()
+ self.assertDictEqual(dpdk_node.netdevs, expected)
+
+ expected = {'key1': 1000, 'key2': 'hello world', 'key3': []}
+ dpdk_node._probe_netdevs()
+ self.assertDictEqual(dpdk_node.netdevs, expected)
+
+ def test_probe_netdevs_setup_negative(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.find_net_devices = mock.Mock(side_effect=DpdkBindHelperException)
+
+ with self.assertRaises(DpdkBindHelperException):
+ dpdk_node._probe_netdevs()
+
+ def test_force_rebind(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.force_dpdk_rebind = mock_helper_func = mock.Mock()
+
+ dpdk_node._force_rebind()
+ self.assertEqual(mock_helper_func.call_count, 1)
+
+
class TestDpdkBindHelper(unittest.TestCase):
+ bin_path = "/opt/nsb_bin"
EXAMPLE_OUTPUT = """
Network devices using DPDK-compatible driver
@@ -111,13 +381,15 @@ Other crypto devices
def test___init__(self):
conn = mock.Mock()
conn.provision_tool = mock.Mock(return_value='path_to_tool')
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
dpdk_bind_helper = DpdkBindHelper(conn)
self.assertEqual(conn, dpdk_bind_helper.ssh_helper)
self.assertEqual(self.CLEAN_STATUS, dpdk_bind_helper.dpdk_status)
self.assertIsNone(dpdk_bind_helper.status_nic_row_re)
- self.assertIsNone(dpdk_bind_helper._dpdk_devbind)
+ self.assertEqual(dpdk_bind_helper.dpdk_devbind,
+ os.path.join(self.bin_path, dpdk_bind_helper.DPDK_DEVBIND))
self.assertIsNone(dpdk_bind_helper._status_cmd_attr)
def test__dpdk_execute(self):
@@ -125,8 +397,7 @@ Other crypto devices
conn.execute = mock.Mock(return_value=(0, 'output', 'error'))
conn.provision_tool = mock.Mock(return_value='tool_path')
dpdk_bind_helper = DpdkBindHelper(conn)
- self.assertEqual((0, 'output', 'error'),
- dpdk_bind_helper._dpdk_execute('command'))
+ self.assertEqual((0, 'output', 'error'), dpdk_bind_helper._dpdk_execute('command'))
def test__dpdk_execute_failure(self):
conn = mock.Mock()
@@ -141,7 +412,7 @@ Other crypto devices
dpdk_bind_helper = DpdkBindHelper(conn)
- dpdk_bind_helper._addline(NETWORK_KERNEL, self.ONE_INPUT_LINE)
+ dpdk_bind_helper._add_line(NETWORK_KERNEL, self.ONE_INPUT_LINE)
self.assertIsNotNone(dpdk_bind_helper.dpdk_status)
self.assertEqual(self.ONE_INPUT_LINE_PARSED, dpdk_bind_helper.dpdk_status[NETWORK_KERNEL])
@@ -161,11 +432,35 @@ Other crypto devices
dpdk_bind_helper = DpdkBindHelper(conn)
- dpdk_bind_helper.parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
self.maxDiff = None
self.assertEqual(self.PARSED_EXAMPLE, dpdk_bind_helper.dpdk_status)
+ def test_kernel_bound_pci_addresses(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ expected = ['a', 'b', 3]
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+ dpdk_helper.dpdk_status = {
+ NETWORK_DPDK: [{'vpci': 4}, {'vpci': 5}, {'vpci': 'g'}],
+ NETWORK_KERNEL: [{'vpci': 'a'}, {'vpci': 'b'}, {'vpci': 3}],
+ CRYPTO_DPDK: [],
+ }
+
+ result = dpdk_helper.kernel_bound_pci_addresses
+ self.assertEqual(result, expected)
+
+ def test_find_net_devices_negative(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 1, 'error', 'debug'
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+
+ self.assertDictEqual(dpdk_helper.find_net_devices(), {})
+
def test_read_status(self):
conn = mock.Mock()
conn.execute = mock.Mock(return_value=(0, self.EXAMPLE_OUTPUT, ''))
@@ -180,7 +475,7 @@ Other crypto devices
dpdk_bind_helper = DpdkBindHelper(conn)
- dpdk_bind_helper.parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
self.assertEqual(['0000:00:04.0', '0000:00:05.0'],
dpdk_bind_helper._get_bound_pci_addresses(NETWORK_DPDK))
@@ -192,18 +487,18 @@ Other crypto devices
dpdk_bind_helper = DpdkBindHelper(conn)
- dpdk_bind_helper.parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
self.assertEqual({'0000:00:04.0': 'igb_uio',
- '0000:00:03.0': 'virtio-pci',
- '0000:00:05.0': 'igb_uio',
- },
- dpdk_bind_helper.interface_driver_map)
+ '0000:00:03.0': 'virtio-pci',
+ '0000:00:05.0': 'igb_uio',
+ },
+ dpdk_bind_helper.interface_driver_map)
def test_bind(self):
conn = mock.Mock()
conn.execute = mock.Mock(return_value=(0, '', ''))
- conn.provision_tool = mock.Mock(return_value='/opt/nsb_bin/dpdk-devbind.py')
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
dpdk_bind_helper = DpdkBindHelper(conn)
dpdk_bind_helper.read_status = mock.Mock()
@@ -217,7 +512,7 @@ Other crypto devices
def test_bind_single_pci(self):
conn = mock.Mock()
conn.execute = mock.Mock(return_value=(0, '', ''))
- conn.provision_tool = mock.Mock(return_value='/opt/nsb_bin/dpdk-devbind.py')
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
dpdk_bind_helper = DpdkBindHelper(conn)
dpdk_bind_helper.read_status = mock.Mock()
@@ -257,3 +552,84 @@ Other crypto devices
}
self.assertDictEqual(expected, dpdk_bind_helper.used_drivers)
+
+ def test_force_dpdk_rebind(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper, 'driver2')
+ dpdk_helper.dpdk_status = {
+ NETWORK_DPDK: [
+ {
+ 'vpci': 'pci1',
+ },
+ {
+ 'vpci': 'pci3',
+ },
+ {
+ 'vpci': 'pci6',
+ },
+ {
+ 'vpci': 'pci3',
+ },
+ ]
+ }
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'pci1': 'real_driver1',
+ 'pci2': 'real_driver2',
+ 'pci3': 'real_driver1',
+ 'pci4': 'real_driver4',
+ 'pci6': 'real_driver6',
+ }
+ dpdk_helper.load_dpdk_driver = mock.Mock()
+ dpdk_helper.read_status = mock.Mock()
+ dpdk_helper.save_real_kernel_interface_driver_map = mock.Mock()
+ dpdk_helper.save_used_drivers = mock.Mock()
+ dpdk_helper.bind = mock_bind = mock.Mock()
+
+ dpdk_helper.force_dpdk_rebind()
+ self.assertEqual(mock_bind.call_count, 2)
+
+ def test_save_real_kernel_drivers(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+ dpdk_helper.real_kernel_drivers = {
+ 'abc': '123',
+ }
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'abc': 'AAA',
+ 'def': 'DDD',
+ 'abs': 'AAA',
+ 'ghi': 'GGG',
+ }
+
+ # save_used_drivers must be called before save_real_kernel_drivers can be
+ with self.assertRaises(AttributeError):
+ dpdk_helper.save_real_kernel_drivers()
+
+ dpdk_helper.save_used_drivers()
+
+ expected_used_drivers = {
+ 'AAA': ['abc', 'abs'],
+ 'DDD': ['def'],
+ 'GGG': ['ghi'],
+ }
+ dpdk_helper.save_real_kernel_drivers()
+ self.assertDictEqual(dpdk_helper.used_drivers, expected_used_drivers)
+ self.assertDictEqual(dpdk_helper.real_kernel_drivers, {})
+
+ def test_get_real_kernel_driver(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.side_effect = [
+ (0, 'non-matching text', ''),
+ (0, 'pre Kernel modules: real_driver1', ''),
+ (0, 'before Ethernet middle Virtio network device after', ''),
+ ]
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+
+ self.assertIsNone(dpdk_helper.get_real_kernel_driver('abc'))
+ self.assertEqual(dpdk_helper.get_real_kernel_driver('abc'), 'real_driver1')
+ self.assertEqual(dpdk_helper.get_real_kernel_driver('abc'), DpdkBindHelper.VIRTIO_DRIVER)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
index 08be4865b..46786a304 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -379,6 +379,25 @@ class TestProxApproxVnf(unittest.TestCase):
file_path = os.path.join(curr_path, filename)
return file_path
+ @mock.patch('yardstick.common.utils.open', create=True)
+ @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True)
+ @mock.patch('yardstick.network_services.helpers.iniparser.open', create=True)
+ @mock.patch(SSH_HELPER)
+ def test_run_prox(self, ssh, *_):
+ mock_ssh(ssh)
+
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+ prox_approx_vnf.ssh_helper.join_bin_path.return_value = '/tool_path12/tool_file34'
+ prox_approx_vnf.setup_helper.remote_path = 'configs/file56.cfg'
+
+ expected = "sudo bash -c 'cd /tool_path12; " \
+ "/tool_path12/tool_file34 -o cli -t -f /tmp/l3-swap-2.cfg '"
+
+ prox_approx_vnf._run()
+ result = prox_approx_vnf.ssh_helper.run.call_args[0][0]
+ self.assertEqual(result, expected)
+
@mock.patch(SSH_HELPER)
def bad_test_instantiate(self, *args):
prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index f8f8eb604..b2e3fd85b 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-# Copyright (c) 2017 Intel Corporation
+# Copyright (c) 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,7 +40,7 @@ stl_patch.start()
if stl_patch:
from yardstick.network_services.vnf_generic.vnf import sample_vnf
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import VnfSshHelper
+ from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFDeployHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ResourceHelper
@@ -616,7 +616,9 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
ssh_helper = mock.Mock()
ssh_helper.execute = execute
- dpdk_vnf_setup_env_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, mock.Mock())
+ scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
+ dpdk_vnf_setup_env_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
dpdk_vnf_setup_env_helper._validate_cpu_cfg = mock.Mock(return_value=[])
with mock.patch.object(dpdk_vnf_setup_env_helper, '_setup_dpdk'):
@@ -638,21 +640,6 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
mock.call('lsmod | grep -i igb_uio')
])
- def test__setup_dpdk_igb_uio_not_loaded(self):
- ssh_helper = mock.Mock()
- ssh_helper.execute = mock.Mock()
- ssh_helper.execute.side_effect = [(0, 0, 0), (1, 0, 0)]
- dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY)
- with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \
- mock_setup_hp:
- with self.assertRaises(y_exceptions.DPDKSetupDriverError):
- dpdk_setup_helper._setup_dpdk()
- mock_setup_hp.assert_called_once()
- ssh_helper.execute.assert_has_calls([
- mock.call('sudo modprobe uio && sudo modprobe igb_uio'),
- mock.call('lsmod | grep -i igb_uio')
- ])
-
@mock.patch('yardstick.ssh.SSH')
def test__setup_resources(self, _):
vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0))
@@ -690,6 +677,7 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
# ssh_helper.execute = mock.Mock(return_value = (0, 'text', ''))
# ssh_helper.execute.return_value = 0, 'output', ''
scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
rv = ['0000:05:00.1', '0000:05:00.0']
dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
@@ -708,6 +696,7 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
vnfd_helper = VnfdHelper(self.VNFD_0)
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock()
dpdk_setup_helper.dpdk_bind_helper.used_drivers = {
@@ -1386,7 +1375,7 @@ class TestSampleVNFDeployHelper(unittest.TestCase):
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
@mock.patch('subprocess.check_output')
- def test_deploy_vnfs_disabled(self, *args):
+ def test_deploy_vnfs_disabled(self, *_):
vnfd_helper = mock.Mock()
ssh_helper = mock.Mock()
ssh_helper.join_bin_path.return_value = 'joined_path'
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
index fb26f20b5..ed2274e79 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
@@ -36,7 +36,7 @@ if stl_patch:
from yardstick.network_services.vnf_generic.vnf.tg_ping import PingTrafficGen
from yardstick.network_services.vnf_generic.vnf.tg_ping import PingResourceHelper
from yardstick.network_services.vnf_generic.vnf.tg_ping import PingSetupEnvHelper
- from yardstick.network_services.vnf_generic.vnf.sample_vnf import VnfSshHelper
+ from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
class TestPingResourceHelper(unittest.TestCase):
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index 20c160cfb..ae8319e37 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -40,6 +40,7 @@ class Flags(object):
class Context(object):
"""Class that represents a context in the logical model"""
list = []
+ SHORT_TASK_ID_LEN = 8
@staticmethod
def split_name(name, sep='.'):
@@ -52,10 +53,28 @@ class Context(object):
def __init__(self):
Context.list.append(self)
+ self._flags = Flags()
+ self._name = None
+ self._task_id = None
- @abc.abstractmethod
def init(self, attrs):
- """Initiate context."""
+ """Initiate context"""
+ self._name = attrs['name']
+ self._task_id = attrs['task_id']
+ self._flags.parse(**attrs.get('flags', {}))
+ self._name_task_id = '{}-{}'.format(
+ self._name, self._task_id[:self.SHORT_TASK_ID_LEN])
+
+ @property
+ def name(self):
+ if self._flags.no_setup or self._flags.no_teardown:
+ return self._name
+ else:
+ return self._name_task_id
+
+ @property
+ def assigned_name(self):
+ return self._name
@staticmethod
def get_cls(context_type):
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index 8ae4b65b8..a9e4564fe 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -7,33 +7,25 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-import logging
-
from yardstick.benchmark.contexts.base import Context
-LOG = logging.getLogger(__name__)
-
-
class DummyContext(Context):
- """Class that handle dummy info"""
-
- __context_type__ = "Dummy"
+ """Class that handle dummy info.
- def __init__(self):
- super(DummyContext, self).__init__()
+ This class is also used to test the abstract class Context because it
+ provides a minimal concrete implementation of a subclass.
+ """
- def init(self, attrs):
- pass
+ __context_type__ = "Dummy"
def deploy(self):
- """don't need to deploy"""
+ """Don't need to deploy"""
pass
def undeploy(self):
- """don't need to undeploy"""
- super(DummyContext, self).undeploy()
+ """Don't need to undeploy"""
+ pass
def _get_server(self, attr_name):
return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 7b7f1be32..75e26e06f 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -13,7 +13,6 @@ from __future__ import print_function
import collections
import logging
import os
-import uuid
import errno
from collections import OrderedDict
@@ -25,8 +24,10 @@ from yardstick.benchmark.contexts.model import Network
from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
from yardstick.benchmark.contexts.model import Server
from yardstick.benchmark.contexts.model import update_scheduler_hints
+from yardstick.common import exceptions as y_exc
from yardstick.common.openstack_utils import get_neutron_client
-from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
+from yardstick.orchestrator.heat import HeatStack
+from yardstick.orchestrator.heat import HeatTemplate
from yardstick.common import constants as consts
from yardstick.common.utils import source_env
from yardstick.ssh import SSH
@@ -50,7 +51,6 @@ class HeatContext(Context):
__context_type__ = "Heat"
def __init__(self):
- self.name = None
self.stack = None
self.networks = OrderedDict()
self.heat_timeout = None
@@ -68,13 +68,8 @@ class HeatContext(Context):
self.template_file = None
self.heat_parameters = None
self.neutron_client = None
- # generate an uuid to identify yardstick_key
- # the first 8 digits of the uuid will be used
- self.key_uuid = uuid.uuid4()
self.heat_timeout = None
- self.key_filename = ''.join(
- [consts.YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
- get_short_key_uuid(self.key_uuid)])
+ self.key_filename = None
super(HeatContext, self).__init__()
@staticmethod
@@ -95,10 +90,10 @@ class HeatContext(Context):
return sorted_networks
def init(self, attrs):
- """initializes itself from the supplied arguments"""
- self.check_environment()
- self.name = attrs["name"]
+ """Initializes itself from the supplied arguments"""
+ super(HeatContext, self).init(attrs)
+ self.check_environment()
self._user = attrs.get("user")
self.template_file = attrs.get("heat_template")
@@ -137,7 +132,16 @@ class HeatContext(Context):
self._server_map[server.dn] = server
self.attrs = attrs
- SSH.gen_keys(self.key_filename)
+
+ self.key_filename = ''.join(
+ [consts.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/files/yardstick_key-',
+ self.name])
+ # Permissions may have changed since creation; this can be fixed. If we
+ # overwrite the file, we lose future access to VMs using this key.
+ # As long as the file exists, even if it is unreadable, keep it intact
+ if not os.path.exists(self.key_filename):
+ SSH.gen_keys(self.key_filename)
def check_environment(self):
try:
@@ -176,7 +180,7 @@ class HeatContext(Context):
template.add_flavor(**self.flavor)
self.flavors.add(flavor)
- template.add_keypair(self.keypair_name, self.key_uuid)
+ template.add_keypair(self.keypair_name, self.name)
template.add_security_group(self.secgroup_name)
for network in self.networks.values():
@@ -298,6 +302,25 @@ class HeatContext(Context):
network.network_type = neutron_net.get('provider:network_type')
network.neutron_info = neutron_net
+ def _create_new_stack(self, heat_template):
+ try:
+ return heat_template.create(block=True,
+ timeout=self.heat_timeout)
+ except KeyboardInterrupt:
+ raise y_exc.StackCreationInterrupt
+ except:
+ LOG.exception("stack failed")
+ # let the other failures happen, we want stack trace
+ raise
+
+ def _retrieve_existing_stack(self, stack_name):
+ stack = HeatStack(stack_name)
+ if stack.get():
+ return stack
+ else:
+ LOG.warning("Stack %s does not exist", self.name)
+ return None
+
def deploy(self):
"""deploys template into a stack using cloud"""
LOG.info("Deploying context '%s' START", self.name)
@@ -308,15 +331,14 @@ class HeatContext(Context):
if self.template_file is None:
self._add_resources_to_template(heat_template)
- try:
- self.stack = heat_template.create(block=True,
- timeout=self.heat_timeout)
- except KeyboardInterrupt:
- raise SystemExit("\nStack create interrupted")
- except:
- LOG.exception("stack failed")
- # let the other failures happen, we want stack trace
- raise
+ if self._flags.no_setup:
+ # Try to get an existing stack, returns a stack or None
+ self.stack = self._retrieve_existing_stack(self.name)
+ if not self.stack:
+ self.stack = self._create_new_stack(heat_template)
+
+ else:
+ self.stack = self._create_new_stack(heat_template)
# TODO: use Neutron to get segmentation-id
self.get_neutron_info()
@@ -382,6 +404,10 @@ class HeatContext(Context):
def undeploy(self):
"""undeploys stack from cloud"""
+ if self._flags.no_teardown:
+ LOG.info("Undeploying context '%s' SKIP", self.name)
+ return
+
if self.stack:
LOG.info("Undeploying context '%s' START", self.name)
self.stack.delete()
@@ -429,13 +455,17 @@ class HeatContext(Context):
server.private_ip = self.stack.outputs.get(
attr_name.get("private_ip_attr", object()), None)
else:
- server = self._server_map.get(attr_name, None)
+ try:
+ server = self._server_map[attr_name]
+ except KeyError:
+ attr_name_no_suffix = attr_name.split("-")[0]
+ server = self._server_map.get(attr_name_no_suffix, None)
if server is None:
return None
pkey = pkg_resources.resource_string(
'yardstick.resources',
- h_join('files/yardstick_key', get_short_key_uuid(self.key_uuid))).decode('utf-8')
+ h_join('files/yardstick_key', self.name)).decode('utf-8')
result = {
"user": server.context.user,
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 2334e5076..4bea991ea 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -29,7 +29,6 @@ class KubernetesContext(Context):
__context_type__ = "Kubernetes"
def __init__(self):
- self.name = ''
self.ssh_key = ''
self.key_path = ''
self.public_key_path = ''
@@ -38,7 +37,7 @@ class KubernetesContext(Context):
super(KubernetesContext, self).__init__()
def init(self, attrs):
- self.name = attrs.get('name', '')
+ super(KubernetesContext, self).init(attrs)
template_cfg = attrs.get('servers', {})
self.template = KubernetesTemplate(self.name, template_cfg)
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index ffc82c8ed..fa619a9aa 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -35,7 +35,6 @@ class NodeContext(Context):
__context_type__ = "Node"
def __init__(self):
- self.name = None
self.file_path = None
self.nodes = []
self.networks = {}
@@ -60,7 +59,8 @@ class NodeContext(Context):
def init(self, attrs):
"""initializes itself from the supplied arguments"""
- self.name = attrs["name"]
+ super(NodeContext, self).init(attrs)
+
self.file_path = file_path = attrs.get("file", "pod.yaml")
try:
@@ -157,7 +157,7 @@ class NodeContext(Context):
except StopIteration:
pass
else:
- raise ValueError("Duplicate nodes!!! Nodes: %s %s",
+ raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
(node, duplicate))
node["name"] = attr_name
@@ -204,7 +204,7 @@ class NodeContext(Context):
self.client._put_file_shell(script_file, '~/{}'.format(script))
cmd = 'sudo bash {} {}'.format(script, options)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index c931d85d0..a18b42ea5 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -59,7 +59,6 @@ class OvsDpdkContext(Context):
self.first_run = True
self.dpdk_devbind = ''
self.vm_names = []
- self.name = None
self.nfvi_host = []
self.nodes = []
self.networks = {}
@@ -74,8 +73,8 @@ class OvsDpdkContext(Context):
def init(self, attrs):
"""initializes itself from the supplied arguments"""
+ super(OvsDpdkContext, self).init(attrs)
- self.name = attrs["name"]
self.file_path = attrs.get("file", "pod.yaml")
self.nodes, self.nfvi_host, self.host_mgmt = \
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 9cca3e15c..d7620552b 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -43,7 +43,6 @@ class SriovContext(Context):
self.first_run = True
self.dpdk_devbind = ''
self.vm_names = []
- self.name = None
self.nfvi_host = []
self.nodes = []
self.networks = {}
@@ -57,8 +56,8 @@ class SriovContext(Context):
def init(self, attrs):
"""initializes itself from the supplied arguments"""
+ super(SriovContext, self).init(attrs)
- self.name = attrs["name"]
self.file_path = attrs.get("file", "pod.yaml")
self.nodes, self.nfvi_host, self.host_mgmt = \
diff --git a/yardstick/benchmark/core/__init__.py b/yardstick/benchmark/core/__init__.py
index 3e3aa99a1..3914e3237 100644
--- a/yardstick/benchmark/core/__init__.py
+++ b/yardstick/benchmark/core/__init__.py
@@ -23,6 +23,7 @@ class Param(object):
self.task_args_file = kwargs.get('task-args-file')
self.keep_deploy = kwargs.get('keep-deploy')
self.parse_only = kwargs.get('parse-only')
+ self.render_only = kwargs.get('render-only')
self.output_file = kwargs.get('output-file', '/tmp/yardstick.out')
self.suite = kwargs.get('suite')
self.task_id = kwargs.get('task_id')
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index f5d2b18ac..7f6309a7e 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -7,10 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-""" Handler for yardstick command 'task' """
-
-from __future__ import absolute_import
-from __future__ import print_function
import sys
import os
from collections import OrderedDict
@@ -31,9 +27,10 @@ from yardstick.benchmark.runners import base as base_runner
from yardstick.common.constants import CONF_FILE
from yardstick.common.yaml_loader import yaml_load
from yardstick.dispatcher.base import Base as DispatcherBase
-from yardstick.common.task_template import TaskTemplate
-from yardstick.common import utils
from yardstick.common import constants
+from yardstick.common import exceptions as y_exc
+from yardstick.common import task_template
+from yardstick.common import utils
from yardstick.common.html_template import report_template
output_file_default = "/tmp/yardstick.out"
@@ -57,7 +54,7 @@ class Task(object): # pragma: no cover
out_types = [s.strip() for s in dispatchers.split(',')]
output_config['DEFAULT']['dispatcher'] = out_types
- def start(self, args):
+ def start(self, args, **kwargs): # pylint: disable=unused-argument
"""Start a benchmark scenario."""
atexit.register(self.atexit_handler)
@@ -89,8 +86,7 @@ class Task(object): # pragma: no cover
if args.suite:
# 1.parse suite, return suite_params info
- task_files, task_args, task_args_fnames = \
- parser.parse_suite()
+ task_files, task_args, task_args_fnames = parser.parse_suite()
else:
task_files = [parser.path]
task_args = [args.task_args]
@@ -103,32 +99,33 @@ class Task(object): # pragma: no cover
sys.exit(0)
testcases = {}
- # parse task_files
- for i in range(0, len(task_files)):
- one_task_start_time = time.time()
- parser.path = task_files[i]
- scenarios, run_in_parallel, meet_precondition, contexts = \
- parser.parse_task(self.task_id, task_args[i],
- task_args_fnames[i])
-
- self.contexts.extend(contexts)
+ tasks = self._parse_tasks(parser, task_files, args, task_args,
+ task_args_fnames)
- if not meet_precondition:
- LOG.info("meet_precondition is %s, please check envrionment",
- meet_precondition)
+ # Execute task files.
+ for i, _ in enumerate(task_files):
+ one_task_start_time = time.time()
+ self.contexts.extend(tasks[i]['contexts'])
+ if not tasks[i]['meet_precondition']:
+ LOG.info('"meet_precondition" is %s, please check environment',
+ tasks[i]['meet_precondition'])
continue
- case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
try:
- data = self._run(scenarios, run_in_parallel, output_config)
+ data = self._run(tasks[i]['scenarios'],
+ tasks[i]['run_in_parallel'],
+ output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
- LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
- testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+ LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+ exc_info=True)
+ testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+ 'tc_data': []}
else:
- LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
- testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
+ LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+ testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+ 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -151,9 +148,8 @@ class Task(object): # pragma: no cover
LOG.info("Total finished in %d secs",
total_end_time - total_start_time)
- scenario = scenarios[0]
- LOG.info("To generate report, execute command "
- "'yardstick report generate %(task_id)s %(tc)s'", scenario)
+ LOG.info('To generate report, execute command "yardstick report '
+ 'generate %(task_id)s <yaml_name>s"', self.task_id)
LOG.info("Task ALL DONE, exiting")
return result
@@ -314,6 +310,30 @@ class Task(object): # pragma: no cover
else:
return op
+ def _parse_tasks(self, parser, task_files, args, task_args,
+ task_args_fnames):
+ tasks = []
+
+ # Parse task_files.
+ for i, _ in enumerate(task_files):
+ parser.path = task_files[i]
+ tasks.append(parser.parse_task(self.task_id, task_args[i],
+ task_args_fnames[i]))
+ tasks[i]['case_name'] = os.path.splitext(
+ os.path.basename(task_files[i]))[0]
+
+ if args.render_only:
+ utils.makedirs(args.render_only)
+ for idx, task in enumerate(tasks):
+ output_file_name = os.path.abspath(os.path.join(
+ args.render_only,
+ '{0:03d}-{1}.yml'.format(idx, task['case_name'])))
+ utils.write_file(output_file_name, task['rendered'])
+
+ sys.exit(0)
+
+ return tasks
+
def run_one_scenario(self, scenario_cfg, output_config):
"""run one scenario using context"""
runner_cfg = scenario_cfg["runner"]
@@ -345,7 +365,7 @@ class Task(object): # pragma: no cover
try:
config_context_target(item)
except KeyError:
- pass
+ LOG.debug("Got a KeyError in config_context_target(%s)", item)
else:
break
@@ -479,33 +499,42 @@ class TaskParser(object): # pragma: no cover
return valid_task_files, valid_task_args, valid_task_args_fnames
- def parse_task(self, task_id, task_args=None, task_args_file=None):
- """parses the task file and return an context and scenario instances"""
- LOG.info("Parsing task config: %s", self.path)
+ def _render_task(self, task_args, task_args_file):
+ """Render the input task with the given arguments
+ :param task_args: (dict) arguments to render the task
+ :param task_args_file: (str) file containing the arguments to render
+ the task
+ :return: (str) task file rendered
+ """
try:
kw = {}
if task_args_file:
with open(task_args_file) as f:
- kw.update(parse_task_args("task_args_file", f.read()))
- kw.update(parse_task_args("task_args", task_args))
+ kw.update(parse_task_args('task_args_file', f.read()))
+ kw.update(parse_task_args('task_args', task_args))
except TypeError:
- raise TypeError()
+ raise y_exc.TaskRenderArgumentError()
+ input_task = None
try:
with open(self.path) as f:
- try:
- input_task = f.read()
- rendered_task = TaskTemplate.render(input_task, **kw)
- except Exception as e:
- LOG.exception('Failed to render template:\n%s\n', input_task)
- raise e
- LOG.debug("Input task is:\n%s\n", rendered_task)
-
- cfg = yaml_load(rendered_task)
- except IOError as ioerror:
- sys.exit(ioerror)
+ input_task = f.read()
+ rendered_task = task_template.TaskTemplate.render(input_task, **kw)
+ LOG.debug('Input task is:\n%s', rendered_task)
+ parsed_task = yaml_load(rendered_task)
+ except (IOError, OSError):
+ raise y_exc.TaskReadError(task_file=self.path)
+ except Exception:
+ raise y_exc.TaskRenderError(input_task=input_task)
+
+ return parsed_task, rendered_task
+ def parse_task(self, task_id, task_args=None, task_args_file=None):
+ """parses the task file and return an context and scenario instances"""
+ LOG.info("Parsing task config: %s", self.path)
+
+ cfg, rendered = self._render_task(task_args, task_args_file)
self._check_schema(cfg["schema"], "task")
meet_precondition = self._check_precondition(cfg)
@@ -519,17 +548,15 @@ class TaskParser(object): # pragma: no cover
context_cfgs = [{"type": "Dummy"}]
contexts = []
- name_suffix = '-{}'.format(task_id[:8])
for cfg_attrs in context_cfgs:
- try:
- cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'],
- name_suffix)
- except KeyError:
- pass
+
+ cfg_attrs['task_id'] = task_id
# default to Heat context because we are testing OpenStack
context_type = cfg_attrs.get("type", "Heat")
context = Context.get(context_type)
context.init(cfg_attrs)
+ # Update the name in case the context has used the name_suffix
+ cfg_attrs['name'] = context.name
contexts.append(context)
run_in_parallel = cfg.get("run_in_parallel", False)
@@ -543,16 +570,74 @@ class TaskParser(object): # pragma: no cover
# relative to task path
scenario["task_path"] = os.path.dirname(self.path)
- change_server_name(scenario, name_suffix)
-
- try:
- for node in scenario['nodes']:
- scenario['nodes'][node] += name_suffix
- except KeyError:
- pass
+ self._change_node_names(scenario, contexts)
# TODO we need something better here, a class that represent the file
- return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
+ return {'scenarios': cfg['scenarios'],
+ 'run_in_parallel': run_in_parallel,
+ 'meet_precondition': meet_precondition,
+ 'contexts': contexts,
+ 'rendered': rendered}
+
+ @staticmethod
+ def _change_node_names(scenario, contexts):
+ """Change the node names in a scenario, depending on the context config
+
+ The nodes (VMs or physical servers) are referred in the context section
+ with the name of the server and the name of the context:
+ <server name>.<context name>
+
+ If the context is going to be undeployed at the end of the test, the
+ task ID is suffixed to the name to avoid interferences with previous
+ deployments. If the context needs to be deployed at the end of the
+ test, the name assigned is kept.
+
+ There are several places where a node name could appear in the scenario
+ configuration:
+ scenario:
+ host: athena.demo
+ target: kratos.demo
+ targets:
+ - athena.demo
+ - kratos.demo
+
+ scenario:
+ options:
+ server_name: # JIRA: YARDSTICK-810
+ host: athena.demo
+ target: kratos.demo
+
+ scenario:
+ nodes:
+ tg__0: tg_0.yardstick
+ vnf__0: vnf_0.yardstick
+ """
+ def qualified_name(name):
+ node_name, context_name = name.split('.')
+ try:
+ ctx = next((context for context in contexts
+ if context.assigned_name == context_name))
+ except StopIteration:
+ raise y_exc.ScenarioConfigContextNameNotFound(
+ context_name=context_name)
+
+ return '{}.{}'.format(node_name, ctx.name)
+
+ if 'host' in scenario:
+ scenario['host'] = qualified_name(scenario['host'])
+ if 'target' in scenario:
+ scenario['target'] = qualified_name(scenario['target'])
+ server_name = scenario.get('options', {}).get('server_name', {})
+ if 'host' in server_name:
+ server_name['host'] = qualified_name(server_name['host'])
+ if 'target' in server_name:
+ server_name['target'] = qualified_name(server_name['target'])
+ if 'targets' in scenario:
+ for idx, target in enumerate(scenario['targets']):
+ scenario['targets'][idx] = qualified_name(target)
+ if 'nodes' in scenario:
+ for scenario_node, target in scenario['nodes'].items():
+ scenario['nodes'][scenario_node] = qualified_name(target)
def _check_schema(self, cfg_schema, schema_type):
"""Check if config file is using the correct schema type"""
@@ -686,30 +771,3 @@ def parse_task_args(src_name, args):
% {"src": src_name, "src_type": type(kw)})
raise TypeError()
return kw
-
-
-def change_server_name(scenario, suffix):
-
- def add_suffix(cfg, key):
- try:
- value = cfg[key]
- except KeyError:
- pass
- else:
- try:
- value['name'] += suffix
- except TypeError:
- cfg[key] += suffix
-
- server_name = scenario.get('options', {}).get('server_name', {})
-
- add_suffix(scenario, 'host')
- add_suffix(scenario, 'target')
- add_suffix(server_name, 'host')
- add_suffix(server_name, 'target')
-
- try:
- key = 'targets'
- scenario[key] = ['{}{}'.format(a, suffix) for a in scenario[key]]
- except KeyError:
- pass
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 45c151b59..0e4785294 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -12,19 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from collections import defaultdict
import copy
import logging
+
import ipaddress
from itertools import chain
import os
-import re
import sys
import six
import yaml
from yardstick.benchmark.scenarios import base as scenario_base
+from yardstick.error import IncorrectConfig
from yardstick.common.constants import LOG_DIR
from yardstick.common.process import terminate_children
from yardstick.common import utils
@@ -36,58 +36,12 @@ from yardstick.network_services.traffic_profile import base as tprofile_base
from yardstick.network_services.utils import get_nsb_option
from yardstick import ssh
-
traffic_profile.register_modules()
LOG = logging.getLogger(__name__)
-class SSHError(Exception):
- """Class handles ssh connection error exception"""
- pass
-
-
-class SSHTimeout(SSHError):
- """Class handles ssh connection timeout exception"""
- pass
-
-
-class IncorrectConfig(Exception):
- """Class handles incorrect configuration during setup"""
- pass
-
-
-class IncorrectSetup(Exception):
- """Class handles incorrect setup during setup"""
- pass
-
-
-class SshManager(object):
- def __init__(self, node, timeout=120):
- super(SshManager, self).__init__()
- self.node = node
- self.conn = None
- self.timeout = timeout
-
- def __enter__(self):
- """
- args -> network device mappings
- returns -> ssh connection ready to be used
- """
- try:
- self.conn = ssh.SSH.from_node(self.node)
- self.conn.wait(timeout=self.timeout)
- except SSHError as error:
- LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
- # self.conn defaults to None
- return self.conn
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- if self.conn:
- self.conn.close()
-
-
class NetworkServiceTestCase(scenario_base.Scenario):
"""Class handles Generic framework to do pre-deployment VNF &
Network service testing """
@@ -104,6 +58,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.collector = None
self.traffic_profile = None
self.node_netdevs = {}
+ self.bin_path = get_nsb_option('bin_path', '')
def _get_ip_flow_range(self, ip_start_range):
@@ -212,16 +167,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
- @staticmethod
- def get_vld_networks(networks):
- # network name is vld_id
- vld_map = {}
- for name, n in networks.items():
- try:
- vld_map[n['vld_id']] = n
- except KeyError:
- vld_map[name] = n
- return vld_map
+ def _find_vnfd_from_vnf_idx(self, vnf_id):
+ return next((vnfd
+ for vnfd in self.topology["constituent-vnfd"]
+ if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
def find_node_if(nodes, name, if_name, vld_id):
@@ -273,7 +222,9 @@ class NetworkServiceTestCase(scenario_base.Scenario):
node1_if["peer_ifname"] = node0_if_name
# just load the network
- vld_networks = self.get_vld_networks(self.context_cfg["networks"])
+ vld_networks = {n.get('vld_id', name): n for name, n in
+ self.context_cfg["networks"].items()}
+
node0_if["network"] = vld_networks.get(vld["id"], {})
node1_if["network"] = vld_networks.get(vld["id"], {})
@@ -312,10 +263,6 @@ class NetworkServiceTestCase(scenario_base.Scenario):
node0_if["peer_intf"] = node1_copy
node1_if["peer_intf"] = node0_copy
- def _find_vnfd_from_vnf_idx(self, vnf_idx):
- return next((vnfd for vnfd in self.topology["constituent-vnfd"]
- if vnf_idx == vnfd["member-vnf-index"]), None)
-
def _update_context_with_topology(self):
for vnfd in self.topology["constituent-vnfd"]:
vnf_idx = vnfd["member-vnf-index"]
@@ -323,43 +270,6 @@ class NetworkServiceTestCase(scenario_base.Scenario):
vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
self.context_cfg["nodes"][vnf_name].update(vnfd)
- def _probe_netdevs(self, node, node_dict, timeout=120):
- try:
- return self.node_netdevs[node]
- except KeyError:
- pass
-
- netdevs = {}
- cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
-
- with SshManager(node_dict, timeout=timeout) as conn:
- if conn:
- exit_status = conn.execute(cmd)[0]
- if exit_status != 0:
- raise IncorrectSetup("Node's %s lacks ip tool." % node)
- exit_status, stdout, _ = conn.execute(
- self.FIND_NETDEVICE_STRING)
- if exit_status != 0:
- raise IncorrectSetup(
- "Cannot find netdev info in sysfs" % node)
- netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
-
- self.node_netdevs[node] = netdevs
- return netdevs
-
- @classmethod
- def _probe_missing_values(cls, netdevs, network):
-
- mac_lower = network['local_mac'].lower()
- for netdev in netdevs.values():
- if netdev['address'].lower() != mac_lower:
- continue
- network.update({
- 'driver': netdev['driver'],
- 'vpci': netdev['pci_bus_id'],
- 'ifindex': netdev['ifindex'],
- })
-
def _generate_pod_yaml(self):
context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
# convert OrderedDict to a list
@@ -385,84 +295,16 @@ class NetworkServiceTestCase(scenario_base.Scenario):
pass
return new_node
- TOPOLOGY_REQUIRED_KEYS = frozenset({
- "vpci", "local_ip", "netmask", "local_mac", "driver"})
-
def map_topology_to_infrastructure(self):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
:return: None. Side effect: context_cfg is updated
"""
- num_nodes = len(self.context_cfg["nodes"])
- # OpenStack instance creation time is probably proportional to the number
- # of instances
- timeout = 120 * num_nodes
- for node, node_dict in self.context_cfg["nodes"].items():
-
- for network in node_dict["interfaces"].values():
- missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
- if not missing:
- continue
-
- # only ssh probe if there are missing values
- # ssh probe won't work on Ixia, so we had better define all our values
- try:
- netdevs = self._probe_netdevs(node, node_dict, timeout=timeout)
- except (SSHError, SSHTimeout):
- raise IncorrectConfig(
- "Unable to probe missing interface fields '%s', on node %s "
- "SSH Error" % (', '.join(missing), node))
- try:
- self._probe_missing_values(netdevs, network)
- except KeyError:
- pass
- else:
- missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
- network)
- if missing:
- raise IncorrectConfig(
- "Require interface fields '%s' not found, topology file "
- "corrupted" % ', '.join(missing))
-
- # we have to generate pod.yaml here so we have vpci and driver
- self._generate_pod_yaml()
# 3. Use topology file to find connections & resolve dest address
self._resolve_topology()
self._update_context_with_topology()
- FIND_NETDEVICE_STRING = (
- r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
- $1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
- $1/device/subsystem_vendor $1/device/subsystem_device ; \
- printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
- ' sh \{\}/* \;
- """)
-
- BASE_ADAPTER_RE = re.compile(
- '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
-
- @classmethod
- def parse_netdev_info(cls, stdout):
- network_devices = defaultdict(dict)
- matches = cls.BASE_ADAPTER_RE.findall(stdout)
- for bus_path, interface_name, name, value in matches:
- dirname, bus_id = os.path.split(bus_path)
- if 'virtio' in bus_id:
- # for some stupid reason VMs include virtio1/
- # in PCI device path
- bus_id = os.path.basename(dirname)
- # remove extra 'device/' from 'device/vendor,
- # device/subsystem_vendor', etc.
- if 'device/' in name:
- name = name.split('/')[1]
- network_devices[interface_name][name] = value
- network_devices[interface_name][
- 'interface_name'] = interface_name
- network_devices[interface_name]['pci_bus_id'] = bus_id
- # convert back to regular dict
- return dict(network_devices)
-
@classmethod
def get_vnf_impl(cls, vnf_model_id):
""" Find the implementing class from vnf_model["vnf"]["name"] field
@@ -530,7 +372,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
context_cfg = self.context_cfg
vnfs = []
- # we assume OrderedDict for consistenct in instantiation
+ # we assume OrderedDict for consistency in instantiation
for node_name, node in context_cfg["nodes"].items():
LOG.debug(node)
try:
@@ -589,6 +431,9 @@ class NetworkServiceTestCase(scenario_base.Scenario):
vnf.terminate()
raise
+ # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
+ self._generate_pod_yaml()
+
# 3. Run experiment
# Start listeners first to avoid losing packets
for traffic_gen in traffic_runners:
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index e2e8bf67d..a3488a23d 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -7,10 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-""" Handler for yardstick command 'task' """
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.core.task import Task
@@ -42,6 +38,8 @@ class TaskCommands(object): # pragma: no cover
action="store_true")
@cliargs("--parse-only", help="parse the config file and exit",
action="store_true")
+ @cliargs("--render-only", help="Render the tasks files, store the result "
+ "in the directory given and exit", type=str, dest="render_only")
@cliargs("--output-file", help="file where output is stored, default %s" %
output_file_default, default=output_file_default)
@cliargs("--suite", help="process test suite file instead of a task file",
@@ -54,9 +52,8 @@ class TaskCommands(object): # pragma: no cover
LOG.info('Task START')
try:
result = Task().start(param, **kwargs)
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
self._write_error_data(e)
- LOG.exception("")
if result.get('result', {}).get('criteria') == 'PASS':
LOG.info('Task SUCCESS')
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 4562c4825..68f9995a2 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -75,3 +75,23 @@ class TrafficProfileNotImplemented(YardstickException):
class DPDKSetupDriverError(YardstickException):
message = '"igb_uio" driver is not loaded'
+
+
+class ScenarioConfigContextNameNotFound(YardstickException):
+ message = 'Context name "%(context_name)s" not found'
+
+
+class StackCreationInterrupt(YardstickException):
+ message = 'Stack create interrupted.'
+
+
+class TaskRenderArgumentError(YardstickException):
+ message = 'Error reading the task input arguments'
+
+
+class TaskReadError(YardstickException):
+ message = 'Failed to read task %(task_file)s'
+
+
+class TaskRenderError(YardstickException):
+ message = 'Failed to render template:\n%(input_task)s'
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 3f2d546fc..357f66be8 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -93,6 +93,21 @@ def import_modules_from_package(package, raise_exception=False):
logger.exception('Unable to import module %s', module_name)
+NON_NONE_DEFAULT = object()
+
+
+def get_key_with_default(data, key, default=NON_NONE_DEFAULT):
+ value = data.get(key, default)
+ if value is NON_NONE_DEFAULT:
+ raise KeyError(key)
+ return value
+
+
+def make_dict_from_map(data, key_map):
+ return {dest_key: get_key_with_default(data, src_key, default)
+ for dest_key, (src_key, default) in key_map.items()}
+
+
def makedirs(d):
try:
os.makedirs(d)
diff --git a/yardstick/error.py b/yardstick/error.py
new file mode 100644
index 000000000..9b84de1af
--- /dev/null
+++ b/yardstick/error.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class SSHError(Exception):
+ """Class handles ssh connection error exception"""
+ pass
+
+
+class SSHTimeout(SSHError):
+ """Class handles ssh connection timeout exception"""
+ pass
+
+
+class IncorrectConfig(Exception):
+ """Class handles incorrect configuration during setup"""
+ pass
+
+
+class IncorrectSetup(Exception):
+ """Class handles incorrect setup during setup"""
+ pass
+
+
+class IncorrectNodeSetup(IncorrectSetup):
+ """Class handles incorrect setup during setup"""
+ pass
+
+
+class ErrorClass(object):
+
+ def __init__(self, *args, **kwargs):
+ if 'test' not in kwargs:
+ raise RuntimeError
+
+ def __getattr__(self, item):
+ raise AttributeError
diff --git a/yardstick/network_services/constants.py b/yardstick/network_services/constants.py
new file mode 100644
index 000000000..79951e353
--- /dev/null
+++ b/yardstick/network_services/constants.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+REMOTE_TMP = "/tmp"
+DEFAULT_VNF_TIMEOUT = 3600
+PROCESS_JOIN_TIMEOUT = 3
diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py
index 8c44b26c2..05b822c2e 100644
--- a/yardstick/network_services/helpers/dpdkbindnic_helper.py
+++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,11 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import os
import re
-import itertools
+from collections import defaultdict
+from itertools import chain
-import six
+from yardstick.common.utils import validate_non_string_sequence
+from yardstick.error import IncorrectConfig
+from yardstick.error import IncorrectSetup
+from yardstick.error import IncorrectNodeSetup
+from yardstick.error import SSHTimeout
+from yardstick.error import SSHError
NETWORK_KERNEL = 'network_kernel'
NETWORK_DPDK = 'network_dpdk'
@@ -25,7 +32,6 @@ CRYPTO_KERNEL = 'crypto_kernel'
CRYPTO_DPDK = 'crypto_dpdk'
CRYPTO_OTHER = 'crypto_other'
-
LOG = logging.getLogger(__name__)
@@ -33,6 +39,166 @@ class DpdkBindHelperException(Exception):
pass
+class DpdkInterface(object):
+ TOPOLOGY_REQUIRED_KEYS = frozenset({
+ "vpci", "local_ip", "netmask", "local_mac", "driver"})
+
+ def __init__(self, dpdk_node, interface):
+ super(DpdkInterface, self).__init__()
+ self.dpdk_node = dpdk_node
+ self.interface = interface
+
+ try:
+ assert self.local_mac
+ except (AssertionError, KeyError):
+ raise IncorrectConfig
+
+ @property
+ def local_mac(self):
+ return self.interface['local_mac']
+
+ @property
+ def mac_lower(self):
+ return self.local_mac.lower()
+
+ @property
+ def missing_fields(self):
+ return self.TOPOLOGY_REQUIRED_KEYS.difference(self.interface)
+
+ @staticmethod
+ def _detect_socket(netdev):
+ try:
+ socket = netdev['numa_node']
+ except KeyError:
+ # Where is this documented?
+ # It seems for dual-sockets systems the second socket PCI bridge
+ # will have an address > 0x0f, e.g.
+ # Bridge PCI->PCI (P#524320 busid=0000:80:02.0 id=8086:6f04
+ if netdev['pci_bus_id'][5] == "0":
+ socket = 0
+ else:
+ # this doesn't handle quad-sockets
+ # TODO: fix this for quad-socket
+ socket = 1
+ return socket
+
+ def probe_missing_values(self):
+ try:
+ for netdev in self.dpdk_node.netdevs.values():
+ if netdev['address'].lower() == self.mac_lower:
+ socket = self._detect_socket(netdev)
+ self.interface.update({
+ 'vpci': netdev['pci_bus_id'],
+ 'driver': netdev['driver'],
+ 'socket': socket,
+ # don't need ifindex
+ })
+
+ except KeyError:
+ # if we don't find all the keys then don't update
+ pass
+
+ except (IncorrectNodeSetup, SSHError, SSHTimeout):
+ raise IncorrectConfig(
+ "Unable to probe missing interface fields '%s', on node %s "
+ "SSH Error" % (', '.join(self.missing_fields), self.dpdk_node.node_key))
+
+
+class DpdkNode(object):
+
+ def __init__(self, node_name, interfaces, ssh_helper, timeout=120):
+ super(DpdkNode, self).__init__()
+ self.interfaces = interfaces
+ self.ssh_helper = ssh_helper
+ self.node_key = node_name
+ self.timeout = timeout
+ self._dpdk_helper = None
+ self.netdevs = {}
+
+ try:
+ self.dpdk_interfaces = {intf['name']: DpdkInterface(self, intf['virtual-interface'])
+ for intf in self.interfaces}
+ except IncorrectConfig:
+ template = "MAC address is required for all interfaces, missing on: {}"
+ errors = (intf['name'] for intf in self.interfaces if
+ 'local_mac' not in intf['virtual-interface'])
+ raise IncorrectSetup(template.format(", ".join(errors)))
+
+ @property
+ def dpdk_helper(self):
+ if not isinstance(self._dpdk_helper, DpdkBindHelper):
+ self._dpdk_helper = DpdkBindHelper(self.ssh_helper)
+ return self._dpdk_helper
+
+ @property
+ def _interface_missing_iter(self):
+ return chain.from_iterable(self._interface_missing_map.values())
+
+ @property
+ def _interface_missing_map(self):
+ return {name: intf.missing_fields for name, intf in self.dpdk_interfaces.items()}
+
+ def _probe_netdevs(self):
+ self.netdevs.update(self.dpdk_helper.find_net_devices())
+
+ def _force_rebind(self):
+ return self.dpdk_helper.force_dpdk_rebind()
+
+ def _probe_dpdk_drivers(self):
+ self.dpdk_helper.probe_real_kernel_drivers()
+ for pci, driver in self.dpdk_helper.real_kernel_interface_driver_map.items():
+ for intf in self.interfaces:
+ vintf = intf['virtual-interface']
+ # stupid substring matches
+ # don't use netdev use interface
+ if vintf['vpci'].endswith(pci):
+ vintf['driver'] = driver
+ # we can't update netdevs because we may not have netdev info
+
+ def _probe_missing_values(self):
+ for intf in self.dpdk_interfaces.values():
+ intf.probe_missing_values()
+
+ def check(self):
+ # only ssh probe if there are missing values
+ # ssh probe won't work on Ixia, so we had better define all our values
+ try:
+ missing_fields_set = set(self._interface_missing_iter)
+
+ # if we are only missing driver then maybe we can get kernel module
+ # this requires vpci
+ if missing_fields_set == {'driver'}:
+ self._probe_dpdk_drivers()
+ # we can't reprobe missing values because we may not have netdev info
+
+ # if there are any other missing then we have to netdev probe
+ if missing_fields_set.difference({'driver'}):
+ self._probe_netdevs()
+ try:
+ self._probe_missing_values()
+ except IncorrectConfig:
+ # ignore for now
+ pass
+
+ # check again and verify we have all the fields
+ if set(self._interface_missing_iter):
+ # last chance fallback, rebind everything and probe
+ # this probably won't work
+ self._force_rebind()
+ self._probe_netdevs()
+ self._probe_missing_values()
+
+ errors = ("{} missing: {}".format(name, ", ".join(missing_fields)) for
+ name, missing_fields in self._interface_missing_map.items() if
+ missing_fields)
+ errors = "\n".join(errors)
+ if errors:
+ raise IncorrectSetup(errors)
+
+ finally:
+ self._dpdk_helper = None
+
+
class DpdkBindHelper(object):
DPDK_STATUS_CMD = "{dpdk_devbind} --status"
DPDK_BIND_CMD = "sudo {dpdk_devbind} {force} -b {driver} {vpci}"
@@ -42,6 +208,8 @@ class DpdkBindHelper(object):
SKIP_RE = re.compile('(====|<none>|^$)')
NIC_ROW_FIELDS = ['vpci', 'dev_type', 'iface', 'driver', 'unused', 'active']
+ UIO_DRIVER = "uio"
+
HEADER_DICT_PAIRS = [
(re.compile('^Network.*DPDK.*$'), NETWORK_DPDK),
(re.compile('^Network.*kernel.*$'), NETWORK_KERNEL),
@@ -51,6 +219,42 @@ class DpdkBindHelper(object):
(re.compile('^Other crypto.*$'), CRYPTO_OTHER),
]
+ FIND_NETDEVICE_STRING = r"""\
+find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
+$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
+$1/device/subsystem_vendor $1/device/subsystem_device $1/device/numa_node ; \
+printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
+' sh \{\}/* \;
+"""
+
+ BASE_ADAPTER_RE = re.compile('^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
+ DPDK_DEVBIND = "dpdk-devbind.py"
+
+ @classmethod
+ def parse_netdev_info(cls, stdout):
+ network_devices = defaultdict(dict)
+ match_iter = (match.groups() for match in cls.BASE_ADAPTER_RE.finditer(stdout))
+ for bus_path, interface_name, name, value in match_iter:
+ dir_name, bus_id = os.path.split(bus_path)
+ if 'virtio' in bus_id:
+ # for some stupid reason VMs include virtio1/
+ # in PCI device path
+ bus_id = os.path.basename(dir_name)
+
+ # remove extra 'device/' from 'device/vendor,
+ # device/subsystem_vendor', etc.
+ if 'device' in name:
+ name = name.split('/')[1]
+
+ network_devices[interface_name].update({
+ name: value,
+ 'interface_name': interface_name,
+ 'pci_bus_id': bus_id,
+ })
+
+ # convert back to regular dict
+ return dict(network_devices)
+
def clean_status(self):
self.dpdk_status = {
NETWORK_KERNEL: [],
@@ -61,11 +265,17 @@ class DpdkBindHelper(object):
CRYPTO_OTHER: [],
}
- def __init__(self, ssh_helper):
+ # TODO: add support for driver other than igb_uio
+ def __init__(self, ssh_helper, dpdk_driver="igb_uio"):
+ self.ssh_helper = ssh_helper
+ self.real_kernel_interface_driver_map = {}
+ self.dpdk_driver = dpdk_driver
self.dpdk_status = None
self.status_nic_row_re = None
- self._dpdk_devbind = None
+ self.dpdk_devbind = self.ssh_helper.join_bin_path(self.DPDK_DEVBIND)
self._status_cmd_attr = None
+ self.used_drivers = None
+ self.real_kernel_drivers = {}
self.ssh_helper = ssh_helper
self.clean_status()
@@ -73,15 +283,16 @@ class DpdkBindHelper(object):
def _dpdk_execute(self, *args, **kwargs):
res = self.ssh_helper.execute(*args, **kwargs)
if res[0] != 0:
- raise DpdkBindHelperException('{} command failed with rc={}'.format(
- self.dpdk_devbind, res[0]))
+ template = '{} command failed with rc={}'
+ raise DpdkBindHelperException(template.format(self.dpdk_devbind, res[0]))
return res
- @property
- def dpdk_devbind(self):
- if self._dpdk_devbind is None:
- self._dpdk_devbind = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
- return self._dpdk_devbind
+ def load_dpdk_driver(self):
+ cmd_template = "sudo modprobe {} && sudo modprobe {}"
+ self.ssh_helper.execute(cmd_template.format(self.UIO_DRIVER, self.dpdk_driver))
+
+ def check_dpdk_driver(self):
+ return self.ssh_helper.execute("lsmod | grep -i {}".format(self.dpdk_driver))[0]
@property
def _status_cmd(self):
@@ -89,12 +300,14 @@ class DpdkBindHelper(object):
self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_devbind=self.dpdk_devbind)
return self._status_cmd_attr
- def _addline(self, active_list, line):
+ def _add_line(self, active_list, line):
if active_list is None:
return
+
res = self.NIC_ROW_RE.match(line)
if res is None:
return
+
new_data = {k: v for k, v in zip(self.NIC_ROW_FIELDS, res.groups())}
new_data['active'] = bool(new_data['active'])
self.dpdk_status[active_list].append(new_data)
@@ -106,14 +319,14 @@ class DpdkBindHelper(object):
return a_dict
return active_dict
- def parse_dpdk_status_output(self, input):
+ def _parse_dpdk_status_output(self, output):
active_dict = None
self.clean_status()
- for a_row in input.splitlines():
+ for a_row in output.splitlines():
if self.SKIP_RE.match(a_row):
continue
active_dict = self._switch_active_dict(a_row, active_dict)
- self._addline(active_dict, a_row)
+ self._add_line(active_dict, a_row)
return self.dpdk_status
def _get_bound_pci_addresses(self, active_dict):
@@ -130,31 +343,85 @@ class DpdkBindHelper(object):
@property
def interface_driver_map(self):
return {interface['vpci']: interface['driver']
- for interface in itertools.chain.from_iterable(self.dpdk_status.values())}
+ for interface in chain.from_iterable(self.dpdk_status.values())}
def read_status(self):
- return self.parse_dpdk_status_output(self._dpdk_execute(self._status_cmd)[1])
+ return self._parse_dpdk_status_output(self._dpdk_execute(self._status_cmd)[1])
+
+ def find_net_devices(self):
+ exit_status, stdout, _ = self.ssh_helper.execute(self.FIND_NETDEVICE_STRING)
+ if exit_status != 0:
+ return {}
+
+ return self.parse_netdev_info(stdout)
def bind(self, pci_addresses, driver, force=True):
- # accept single PCI or list of PCI
- if isinstance(pci_addresses, six.string_types):
- pci_addresses = [pci_addresses]
+ # accept single PCI or sequence of PCI
+ pci_addresses = validate_non_string_sequence(pci_addresses, [pci_addresses])
+
cmd = self.DPDK_BIND_CMD.format(dpdk_devbind=self.dpdk_devbind,
driver=driver,
vpci=' '.join(list(pci_addresses)),
force='--force' if force else '')
LOG.debug(cmd)
self._dpdk_execute(cmd)
+
# update the inner status dict
self.read_status()
+ def probe_real_kernel_drivers(self):
+ self.read_status()
+ self.save_real_kernel_interface_driver_map()
+
+ def force_dpdk_rebind(self):
+ self.load_dpdk_driver()
+ self.read_status()
+ self.save_real_kernel_interface_driver_map()
+ self.save_used_drivers()
+
+ real_driver_map = {}
+ # only rebind devices that are bound to DPDK
+ for pci in self.dpdk_bound_pci_addresses:
+ # messy
+ real_driver = self.real_kernel_interface_driver_map[pci]
+ real_driver_map.setdefault(real_driver, []).append(pci)
+ for real_driver, pcis in real_driver_map.items():
+ self.bind(pcis, real_driver, force=True)
+
def save_used_drivers(self):
# invert the map, so we can bind by driver type
self.used_drivers = {}
- # sort for stabililty
+ # sort for stability
for vpci, driver in sorted(self.interface_driver_map.items()):
self.used_drivers.setdefault(driver, []).append(vpci)
+ KERNEL_DRIVER_RE = re.compile(r"Kernel modules: (\S+)", re.M)
+ VIRTIO_DRIVER_RE = re.compile(r"Ethernet.*Virtio network device", re.M)
+ VIRTIO_DRIVER = "virtio-pci"
+
+ def save_real_kernel_drivers(self):
+ # invert the map, so we can bind by driver type
+ self.real_kernel_drivers = {}
+ # sort for stability
+ for vpci, driver in sorted(self.real_kernel_interface_driver_map.items()):
+ self.used_drivers.setdefault(driver, []).append(vpci)
+
+ def get_real_kernel_driver(self, pci):
+ out = self.ssh_helper.execute('lspci -k -s %s' % pci)[1]
+ match = self.KERNEL_DRIVER_RE.search(out)
+ if match:
+ return match.group(1)
+
+ match = self.VIRTIO_DRIVER_RE.search(out)
+ if match:
+ return self.VIRTIO_DRIVER
+
+ return None
+
+ def save_real_kernel_interface_driver_map(self):
+ iter1 = ((pci, self.get_real_kernel_driver(pci)) for pci in self.interface_driver_map)
+ self.real_kernel_interface_driver_map = {pci: driver for pci, driver in iter1 if driver}
+
def rebind_drivers(self, force=True):
for driver, vpcis in self.used_drivers.items():
self.bind(vpcis, driver, force)
diff --git a/yardstick/network_services/utils.py b/yardstick/network_services/utils.py
index 7a1815eb9..4b987fafe 100644
--- a/yardstick/network_services/utils.py
+++ b/yardstick/network_services/utils.py
@@ -121,7 +121,6 @@ def provision_tool(connection, tool_path, tool_file=None):
tool_path = get_nsb_option('tool_path')
if tool_file:
tool_path = os.path.join(tool_path, tool_file)
- bin_path = get_nsb_option("bin_path")
exit_status = connection.execute("which %s > /dev/null 2>&1" % tool_path)[0]
if exit_status == 0:
return encodeutils.safe_decode(tool_path, incoming='utf-8').rstrip()
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index ee7735972..2cdb3f904 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -21,7 +21,8 @@ import time
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, PROCESS_JOIN_TIMEOUT
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.constants import PROCESS_JOIN_TIMEOUT
LOG = logging.getLogger(__name__)
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index d6249a874..f16b4142d 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
from collections import Mapping
import logging
from multiprocessing import Queue, Value, Process
+
import os
import posixpath
import re
@@ -23,7 +24,6 @@ import subprocess
import time
import six
-from six.moves import cStringIO
from trex_stl_lib.trex_stl_client import LoggerApi
from trex_stl_lib.trex_stl_client import STLClient
@@ -32,64 +32,23 @@ from yardstick.benchmark.contexts.base import Context
from yardstick.common import exceptions as y_exceptions
from yardstick.common.process import check_if_process_failed
from yardstick.common import utils
-from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
+from yardstick.network_services.constants import DEFAULT_VNF_TIMEOUT
+from yardstick.network_services.constants import PROCESS_JOIN_TIMEOUT
+from yardstick.network_services.constants import REMOTE_TMP
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
+from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.utils import get_nsb_option
-from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
-from yardstick.ssh import AutoConnectSSH
-
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
-DPDK_VERSION = "dpdk-16.07"
LOG = logging.getLogger(__name__)
-REMOTE_TMP = "/tmp"
-DEFAULT_VNF_TIMEOUT = 3600
-PROCESS_JOIN_TIMEOUT = 3
-
-
-class VnfSshHelper(AutoConnectSSH):
-
- def __init__(self, node, bin_path, wait=None):
- self.node = node
- kwargs = self.args_from_node(self.node)
- if wait:
- kwargs.setdefault('wait', wait)
-
- super(VnfSshHelper, self).__init__(**kwargs)
- self.bin_path = bin_path
-
- @staticmethod
- def get_class():
- # must return static class name, anything else refers to the calling class
- # i.e. the subclass, not the superclass
- return VnfSshHelper
-
- def copy(self):
- # this copy constructor is different from SSH classes, since it uses node
- return self.get_class()(self.node, self.bin_path)
-
- def upload_config_file(self, prefix, content):
- cfg_file = os.path.join(REMOTE_TMP, prefix)
- LOG.debug(content)
- file_obj = cStringIO(content)
- self.put_file_obj(file_obj, cfg_file)
- return cfg_file
-
- def join_bin_path(self, *args):
- return os.path.join(self.bin_path, *args)
-
- def provision_tool(self, tool_path=None, tool_file=None):
- if tool_path is None:
- tool_path = self.bin_path
- return super(VnfSshHelper, self).provision_tool(tool_path, tool_file)
-
-
class SetupEnvHelper(object):
CFG_CONFIG = os.path.join(REMOTE_TMP, "sample_config")
@@ -245,7 +204,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def setup_vnf_environment(self):
self._setup_dpdk()
- self.bound_pci = [v['virtual-interface']["vpci"] for v in self.vnfd_helper.interfaces]
self.kill_vnf()
# bind before _setup_resources so we can use dpdk_port_num
self._detect_and_bind_drivers()
@@ -263,10 +221,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
def _setup_dpdk(self):
"""Setup DPDK environment needed for VNF to run"""
self._setup_hugepages()
- self.ssh_helper.execute('sudo modprobe uio && sudo modprobe igb_uio')
- exit_status = self.ssh_helper.execute('lsmod | grep -i igb_uio')[0]
- if exit_status:
- raise y_exceptions.DPDKSetupDriverError()
+ self.dpdk_bind_helper.load_dpdk_driver()
+
+ exit_status = self.dpdk_bind_helper.check_dpdk_driver()
+ if exit_status == 0:
+ return
def get_collectd_options(self):
options = self.scenario_helper.all_options.get("collectd", {})
@@ -293,9 +252,22 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
plugins=plugins, interval=collectd_options.get("interval"),
timeout=self.scenario_helper.timeout)
+ def _check_interface_fields(self):
+ num_nodes = len(self.scenario_helper.nodes)
+ # OpenStack instance creation time is probably proportional to the number
+ # of instances
+ timeout = 120 * num_nodes
+ dpdk_node = DpdkNode(self.scenario_helper.name, self.vnfd_helper.interfaces,
+ self.ssh_helper, timeout)
+ dpdk_node.check()
+
def _detect_and_bind_drivers(self):
interfaces = self.vnfd_helper.interfaces
+ self._check_interface_fields()
+ # check for bound after probe
+ self.bound_pci = [v['virtual-interface']["vpci"] for v in interfaces]
+
self.dpdk_bind_helper.read_status()
self.dpdk_bind_helper.save_used_drivers()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 068b19d8d..265d0b7a9 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -20,11 +20,11 @@ import logging
import sys
from yardstick.common import utils
+from yardstick import error
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
-
LOG = logging.getLogger(__name__)
WAIT_AFTER_CFG_LOAD = 10
@@ -36,7 +36,7 @@ sys.path.append(IXNET_LIB)
try:
from IxNet import IxNextgen
except ImportError:
- IxNextgen = utils.ErrorClass
+ IxNextgen = error.ErrorClass
class IxiaRfc2544Helper(Rfc2544ResourceHelper):
diff --git a/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
new file mode 100644
index 000000000..8e02cf3ac
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vnf_ssh_helper.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+
+from six.moves import StringIO
+
+from yardstick.network_services.constants import REMOTE_TMP
+from yardstick.ssh import AutoConnectSSH
+
+LOG = logging.getLogger(__name__)
+
+
+class VnfSshHelper(AutoConnectSSH):
+
+ def __init__(self, node, bin_path, wait=None):
+ self.node = node
+ kwargs = self.args_from_node(self.node)
+ if wait:
+ # if wait is defined here we want to override
+ kwargs['wait'] = wait
+
+ super(VnfSshHelper, self).__init__(**kwargs)
+ self.bin_path = bin_path
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return VnfSshHelper
+
+ def copy(self):
+ # this copy constructor is different from SSH classes, since it uses node
+ return self.get_class()(self.node, self.bin_path)
+
+ def upload_config_file(self, prefix, content):
+ cfg_file = os.path.join(REMOTE_TMP, prefix)
+ LOG.debug(content)
+ file_obj = StringIO(content)
+ self.put_file_obj(file_obj, cfg_file)
+ return cfg_file
+
+ def join_bin_path(self, *args):
+ return os.path.join(self.bin_path, *args)
+
+ def provision_tool(self, tool_path=None, tool_file=None):
+ if tool_path is None:
+ tool_path = self.bin_path
+ return super(VnfSshHelper, self).provision_tool(tool_path, tool_file)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 3c3d28146..20be89f57 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -30,17 +30,11 @@ from yardstick.common import template_format
log = logging.getLogger(__name__)
-HEAT_KEY_UUID_LENGTH = 8
-
PROVIDER_SRIOV = "sriov"
_DEPLOYED_STACKS = {}
-def get_short_key_uuid(uuid):
- return str(uuid)[:HEAT_KEY_UUID_LENGTH]
-
-
class HeatStack(object):
"""Represents a Heat stack (deployed template) """
@@ -50,6 +44,13 @@ class HeatStack(object):
self._cloud = shade.openstack_cloud()
self._stack = None
+ def _update_stack_tracking(self):
+ outputs = self._stack.outputs
+ self.outputs = {output['output_key']: output['output_value'] for output
+ in outputs}
+ if self.uuid:
+ _DEPLOYED_STACKS[self.uuid] = self._stack
+
def create(self, template, heat_parameters, wait, timeout):
"""Creates an OpenStack stack from a template"""
with tempfile.NamedTemporaryFile('wb', delete=False) as template_file:
@@ -58,11 +59,21 @@ class HeatStack(object):
self._stack = self._cloud.create_stack(
self.name, template_file=template_file.name, wait=wait,
timeout=timeout, **heat_parameters)
- outputs = self._stack.outputs
- self.outputs = {output['output_key']: output['output_value'] for output
- in outputs}
- if self.uuid:
- _DEPLOYED_STACKS[self.uuid] = self._stack
+
+ self._update_stack_tracking()
+
+ def get(self):
+ """Retrieves an existing stack from the target cloud
+
+ Returns a bool indicating whether the stack exists in the target cloud
+ If the stack exists, it will be stored as self._stack
+ """
+ self._stack = self._cloud.get_stack(self.name)
+ if not self._stack:
+ return False
+
+ self._update_stack_tracking()
+ return True
@staticmethod
def stacks_exist():
@@ -413,7 +424,7 @@ name (i.e. %s).
}
}
- def add_keypair(self, name, key_uuid):
+ def add_keypair(self, name, key_id):
"""add to the template a Nova KeyPair"""
log.debug("adding Nova::KeyPair '%s'", name)
self.resources[name] = {
@@ -425,7 +436,7 @@ name (i.e. %s).
pkg_resources.resource_string(
'yardstick.resources',
'files/yardstick_key-' +
- get_short_key_uuid(key_uuid) + '.pub'),
+ key_id + '.pub'),
'utf-8')
}
}
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index 6ddf327f2..d7adc0d05 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -78,7 +78,7 @@ from oslo_utils import encodeutils
from scp import SCPClient
import six
-from yardstick.common.utils import try_int
+from yardstick.common.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
from yardstick.network_services.utils import provision_tool
@@ -102,6 +102,7 @@ class SSH(object):
"""Represent ssh connection."""
SSH_PORT = paramiko.config.SSH_PORT
+ DEFAULT_WAIT_TIMEOUT = 120
@staticmethod
def gen_keys(key_filename, bit_count=2048):
@@ -120,6 +121,18 @@ class SSH(object):
# i.e. the subclass, not the superclass
return SSH
+ @classmethod
+ def get_arg_key_map(cls):
+ return {
+ 'user': ('user', NON_NONE_DEFAULT),
+ 'host': ('ip', NON_NONE_DEFAULT),
+ 'port': ('ssh_port', cls.SSH_PORT),
+ 'pkey': ('pkey', None),
+ 'key_filename': ('key_filename', None),
+ 'password': ('password', None),
+ 'name': ('name', None),
+ }
+
def __init__(self, user, host, port=None, pkey=None,
key_filename=None, password=None, name=None):
"""Initialize SSH client.
@@ -137,6 +150,7 @@ class SSH(object):
else:
self.log = logging.getLogger(__name__)
+ self.wait_timeout = self.DEFAULT_WAIT_TIMEOUT
self.user = user
self.host = host
# everybody wants to debug this in the caller, do it here instead
@@ -162,16 +176,9 @@ class SSH(object):
overrides = {}
if defaults is None:
defaults = {}
+
params = ChainMap(overrides, node, defaults)
- return {
- 'user': params['user'],
- 'host': params['ip'],
- 'port': params.get('ssh_port', cls.SSH_PORT),
- 'pkey': params.get('pkey'),
- 'key_filename': params.get('key_filename'),
- 'password': params.get('password'),
- 'name': params.get('name'),
- }
+ return make_dict_from_map(params, cls.get_arg_key_map())
@classmethod
def from_node(cls, node, overrides=None, defaults=None):
@@ -186,7 +193,7 @@ class SSH(object):
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
- raise SSHError("Invalid pkey: %s" % (errors))
+ raise SSHError("Invalid pkey: %s" % errors)
@property
def is_connected(self):
@@ -287,7 +294,7 @@ class SSH(object):
while True:
# Block until data can be read/write.
- r, w, e = select.select([session], writes, [session], 1)
+ e = select.select([session], writes, [session], 1)[2]
if session.recv_ready():
data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
@@ -361,17 +368,20 @@ class SSH(object):
stderr.seek(0)
return exit_status, stdout.read(), stderr.read()
- def wait(self, timeout=120, interval=1):
+ def wait(self, timeout=None, interval=1):
"""Wait for the host will be available via ssh."""
- start_time = time.time()
+ if timeout is None:
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
while True:
try:
return self.execute("uname")
except (socket.error, SSHError) as e:
self.log.debug("Ssh is still unavailable: %r", e)
time.sleep(interval)
- if time.time() > (start_time + timeout):
- raise SSHTimeout("Timeout waiting for '%s'", self.host)
+ if time.time() > end_time:
+ raise SSHTimeout("Timeout waiting for '%s'" % self.host)
def put(self, files, remote_path=b'.', recursive=False):
client = self._get_client()
@@ -447,24 +457,40 @@ class SSH(object):
class AutoConnectSSH(SSH):
+ @classmethod
+ def get_arg_key_map(cls):
+ arg_key_map = super(AutoConnectSSH, cls).get_arg_key_map()
+ arg_key_map['wait'] = ('wait', True)
+ return arg_key_map
+
# always wait or we will get OpenStack SSH errors
def __init__(self, user, host, port=None, pkey=None,
key_filename=None, password=None, name=None, wait=True):
super(AutoConnectSSH, self).__init__(user, host, port, pkey, key_filename, password, name)
- self._wait = wait
+ if wait and wait is not True:
+ self.wait_timeout = int(wait)
def _make_dict(self):
data = super(AutoConnectSSH, self)._make_dict()
data.update({
- 'wait': self._wait
+ 'wait': self.wait_timeout
})
return data
def _connect(self):
if not self.is_connected:
- self._get_client()
- if self._wait:
- self.wait()
+ interval = 1
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self._get_client()
+ except (socket.error, SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise SSHTimeout("Timeout waiting for '%s'" % self.host)
def drop_connection(self):
""" Don't close anything, just force creation of a new client """
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index 02a85525a..0223fd3ff 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -26,14 +26,6 @@ class OvsDpdkContextTestCase(unittest.TestCase):
NODES_ovs_dpdk_SAMPLE = "nodes_ovs_dpdk_sample.yaml"
NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample.yaml"
- ATTRS = {
- 'name': 'StandaloneOvsDpdk',
- 'file': 'pod',
- 'flavor': {},
- 'servers': {},
- 'networks': {},
- }
-
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
@@ -55,7 +47,17 @@ class OvsDpdkContextTestCase(unittest.TestCase):
}
def setUp(self):
+ self.attrs = {
+ 'name': 'foo',
+ 'task_id': '1234567890',
+ 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
+ }
self.ovs_dpdk = ovs_dpdk.OvsDpdkContext()
+ self.addCleanup(self._remove_contexts)
+
+ def _remove_contexts(self):
+ if self.ovs_dpdk in self.ovs_dpdk.list:
+ self.ovs_dpdk._delete_context()
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
@mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@@ -66,9 +68,18 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.assertTrue(self.ovs_dpdk.first_run)
def test_init(self):
+ ATTRS = {
+ 'name': 'StandaloneOvsDpdk',
+ 'task_id': '1234567890',
+ 'file': 'pod',
+ 'flavor': {},
+ 'servers': {},
+ 'networks': {},
+ }
+
self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
return_value=[{}, {}, {}])
- self.assertIsNone(self.ovs_dpdk.init(self.ATTRS))
+ self.assertIsNone(self.ovs_dpdk.init(ATTRS))
def test_setup_ovs(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -186,12 +197,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test__get_server_with_dic_attr_name(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
- }
-
- self.ovs_dpdk.init(attrs)
+ self.ovs_dpdk.init(self.attrs)
attr_name = {'name': 'foo.bar'}
result = self.ovs_dpdk._get_server(attr_name)
@@ -200,14 +206,9 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test__get_server_not_found(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
- }
-
self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
return_value=[{}, {}, {}])
- self.ovs_dpdk.init(attrs)
+ self.ovs_dpdk.init(self.attrs)
attr_name = 'bar.foo'
result = self.ovs_dpdk._get_server(attr_name)
@@ -216,12 +217,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test__get_server_mismatch(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
- }
-
- self.ovs_dpdk.init(attrs)
+ self.ovs_dpdk.init(self.attrs)
attr_name = 'bar.foo1'
result = self.ovs_dpdk._get_server(attr_name)
@@ -230,31 +226,23 @@ class OvsDpdkContextTestCase(unittest.TestCase):
def test__get_server_duplicate(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
- }
+ self.attrs['file'] = self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
- self.ovs_dpdk.init(attrs)
+ self.ovs_dpdk.init(self.attrs)
- attr_name = 'node1.foo'
+ attr_name = 'node1.foo-12345678'
with self.assertRaises(ValueError):
self.ovs_dpdk._get_server(attr_name)
def test__get_server_found(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
- }
-
- self.ovs_dpdk.init(attrs)
+ self.ovs_dpdk.init(self.attrs)
- attr_name = 'node1.foo'
+ attr_name = 'node1.foo-12345678'
result = self.ovs_dpdk._get_server(attr_name)
self.assertEqual(result['ip'], '10.229.47.137')
- self.assertEqual(result['name'], 'node1.foo')
+ self.assertEqual(result['name'], 'node1.foo-12345678')
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index f323fcd3c..f0953ef55 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -29,6 +29,7 @@ class SriovContextTestCase(unittest.TestCase):
ATTRS = {
'name': 'StandaloneSriov',
+ 'task_id': '1234567890',
'file': 'pod',
'flavor': {},
'servers': {},
@@ -56,7 +57,17 @@ class SriovContextTestCase(unittest.TestCase):
}
def setUp(self):
+ self.attrs = {
+ 'name': 'foo',
+ 'task_id': '1234567890',
+ 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
+ }
self.sriov = sriov.SriovContext()
+ self.addCleanup(self._remove_contexts)
+
+ def _remove_contexts(self):
+ if self.sriov in self.sriov.list:
+ self.sriov._delete_context()
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
@mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@@ -105,12 +116,7 @@ class SriovContextTestCase(unittest.TestCase):
def test__get_server_with_dic_attr_name(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
- }
-
- self.sriov.init(attrs)
+ self.sriov.init(self.attrs)
attr_name = {'name': 'foo.bar'}
result = self.sriov._get_server(attr_name)
@@ -119,13 +125,8 @@ class SriovContextTestCase(unittest.TestCase):
def test__get_server_not_found(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
- }
-
self.sriov.helper.parse_pod_file = mock.Mock(return_value=[{}, {}, {}])
- self.sriov.init(attrs)
+ self.sriov.init(self.attrs)
attr_name = 'bar.foo'
result = self.sriov._get_server(attr_name)
@@ -134,12 +135,7 @@ class SriovContextTestCase(unittest.TestCase):
def test__get_server_mismatch(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
- }
-
- self.sriov.init(attrs)
+ self.sriov.init(self.attrs)
attr_name = 'bar.foo1'
result = self.sriov._get_server(attr_name)
@@ -148,25 +144,29 @@ class SriovContextTestCase(unittest.TestCase):
def test__get_server_duplicate(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
- }
+ self.attrs['file'] = self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
- self.sriov.init(attrs)
+ self.sriov.init(self.attrs)
- attr_name = 'node1.foo'
+ attr_name = 'node1.foo-12345678'
with self.assertRaises(ValueError):
self.sriov._get_server(attr_name)
def test__get_server_found(self):
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SRIOV_SAMPLE)
- }
+ self.sriov.init(self.attrs)
+
+ attr_name = 'node1.foo-12345678'
+ result = self.sriov._get_server(attr_name)
+
+ self.assertEqual(result['ip'], '10.229.47.137')
+ self.assertEqual(result['name'], 'node1.foo-12345678')
+ self.assertEqual(result['user'], 'root')
+ self.assertEqual(result['key_filename'], '/root/.yardstick_key')
- self.sriov.init(attrs)
+ def test__get_server_no_task_id(self):
+ self.attrs['flags'] = {'no_setup': True}
+ self.sriov.init(self.attrs)
attr_name = 'node1.foo'
result = self.sriov._get_server(attr_name)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_dummy.py b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
index 1a54035df..e393001a1 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_dummy.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
@@ -9,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.dummy
-
-from __future__ import absolute_import
import unittest
from yardstick.benchmark.contexts import dummy
@@ -20,10 +15,55 @@ from yardstick.benchmark.contexts import dummy
class DummyContextTestCase(unittest.TestCase):
def setUp(self):
+ self.attrs = {
+ 'name': 'foo',
+ 'task_id': '1234567890',
+ }
self.test_context = dummy.DummyContext()
+ self.addCleanup(self.test_context._delete_context)
+
+ def test___init__(self):
+ self.assertFalse(self.test_context._flags.no_setup)
+ self.assertFalse(self.test_context._flags.no_teardown)
+ self.assertIsNone(self.test_context._name)
+ self.assertIsNone(self.test_context._task_id)
+
+ def test_init(self):
+ self.test_context.init(self.attrs)
+ self.assertEqual(self.test_context._name, 'foo')
+ self.assertEqual(self.test_context._task_id, '1234567890')
+ self.assertFalse(self.test_context._flags.no_setup)
+ self.assertFalse(self.test_context._flags.no_teardown)
+
+ self.assertEqual(self.test_context.name, 'foo-12345678')
+ self.assertEqual(self.test_context.assigned_name, 'foo')
+
+ def test_init_flags_no_setup(self):
+ self.attrs['flags'] = {'no_setup': True, 'no_teardown': False}
+
+ self.test_context.init(self.attrs)
+
+ self.assertEqual(self.test_context._name, 'foo')
+ self.assertEqual(self.test_context._task_id, '1234567890')
+ self.assertTrue(self.test_context._flags.no_setup)
+ self.assertFalse(self.test_context._flags.no_teardown)
+
+ self.assertEqual(self.test_context.name, 'foo')
+ self.assertEqual(self.test_context.assigned_name, 'foo')
+
+ def test_init_flags_no_teardown(self):
+ self.attrs['flags'] = {'no_setup': False, 'no_teardown': True}
+
+ self.test_context.init(self.attrs)
+
+ self.assertFalse(self.test_context._flags.no_setup)
+ self.assertTrue(self.test_context._flags.no_teardown)
+
+ self.assertEqual(self.test_context.name, 'foo')
+ self.assertEqual(self.test_context.assigned_name, 'foo')
def test__get_server(self):
- self.test_context.init(None)
+ self.test_context.init(self.attrs)
self.test_context.deploy()
result = self.test_context._get_server(None)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index 4348bb052..c449b2a28 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -12,14 +12,18 @@
from collections import OrderedDict
from itertools import count
import logging
-import os
-import uuid
import mock
import unittest
+import shade
+
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import heat
from yardstick.benchmark.contexts import model
+from yardstick.common import exceptions as y_exc
+from yardstick.orchestrator import heat as orch_heat
+from yardstick import ssh
LOG = logging.getLogger(__name__)
@@ -33,10 +37,18 @@ class HeatContextTestCase(unittest.TestCase):
def setUp(self):
self.test_context = heat.HeatContext()
+ self.addCleanup(self._remove_contexts)
self.mock_context = mock.Mock(spec=heat.HeatContext())
+ def _remove_contexts(self):
+ if self.test_context in self.test_context.list:
+ self.test_context._delete_context()
+
def test___init__(self):
- self.assertIsNone(self.test_context.name)
+ self.assertIsNone(self.test_context._name)
+ self.assertIsNone(self.test_context._task_id)
+ self.assertFalse(self.test_context._flags.no_setup)
+ self.assertFalse(self.test_context._flags.no_teardown)
self.assertIsNone(self.test_context.stack)
self.assertEqual(self.test_context.networks, OrderedDict())
self.assertEqual(self.test_context.servers, [])
@@ -50,20 +62,21 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(self.test_context._user)
self.assertIsNone(self.test_context.template_file)
self.assertIsNone(self.test_context.heat_parameters)
- self.assertIsNotNone(self.test_context.key_uuid)
- self.assertIsNotNone(self.test_context.key_filename)
+ self.assertIsNone(self.test_context.key_filename)
+ @mock.patch.object(ssh.SSH, 'gen_keys')
@mock.patch('yardstick.benchmark.contexts.heat.PlacementGroup')
@mock.patch('yardstick.benchmark.contexts.heat.ServerGroup')
@mock.patch('yardstick.benchmark.contexts.heat.Network')
@mock.patch('yardstick.benchmark.contexts.heat.Server')
- def test_init(self, mock_server, mock_network, mock_sg, mock_pg):
+ def test_init(self, mock_server, mock_network, mock_sg, mock_pg, mock_ssh_gen_keys):
pgs = {'pgrp1': {'policy': 'availability'}}
sgs = {'servergroup1': {'policy': 'affinity'}}
networks = {'bar': {'cidr': '10.0.1.0/24'}}
servers = {'baz': {'floating_ip': True, 'placement': 'pgrp1'}}
attrs = {'name': 'foo',
+ 'task_id': '1234567890',
'placement_groups': pgs,
'server_groups': sgs,
'networks': networks,
@@ -71,9 +84,13 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.init(attrs)
- self.assertEqual(self.test_context.name, "foo")
- self.assertEqual(self.test_context.keypair_name, "foo-key")
- self.assertEqual(self.test_context.secgroup_name, "foo-secgroup")
+ self.assertFalse(self.test_context._flags.no_setup)
+ self.assertFalse(self.test_context._flags.no_teardown)
+ self.assertEqual(self.test_context._name, "foo")
+ self.assertEqual(self.test_context._task_id, '1234567890')
+ self.assertEqual(self.test_context.name, "foo-12345678")
+ self.assertEqual(self.test_context.keypair_name, "foo-12345678-key")
+ self.assertEqual(self.test_context.secgroup_name, "foo-12345678-secgroup")
mock_pg.assert_called_with('pgrp1', self.test_context,
pgs['pgrp1']['policy'])
@@ -90,40 +107,79 @@ class HeatContextTestCase(unittest.TestCase):
servers['baz'])
self.assertEqual(len(self.test_context.servers), 1)
- if os.path.exists(self.test_context.key_filename):
- try:
- os.remove(self.test_context.key_filename)
- os.remove(self.test_context.key_filename + ".pub")
- except OSError:
- LOG.exception("key_filename: %s",
- self.test_context.key_filename)
+ mock_ssh_gen_keys.assert_called()
+
+ def test_init_no_name_or_task_id(self):
+ attrs = {}
+ self.assertRaises(KeyError, self.test_context.init, attrs)
+
+ def test_name(self):
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
+ self.assertEqual(self.test_context.name, 'foo-12345678')
+ self.assertEqual(self.test_context.assigned_name, 'foo')
+
+ def test_name_flags(self):
+ self.test_context._flags = base.Flags(
+ **{"no_setup": True, "no_teardown": True})
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+
+ self.assertEqual(self.test_context.name, 'foo')
+ self.assertEqual(self.test_context.assigned_name, 'foo')
+
+ @mock.patch('yardstick.ssh.SSH.gen_keys')
+ def test_init_no_setup_no_teardown(self, *args):
+
+ attrs = {'name': 'foo',
+ 'task_id': '1234567890',
+ 'placement_groups': {},
+ 'server_groups': {},
+ 'networks': {},
+ 'servers': {},
+ 'flags': {
+ 'no_setup': True,
+ 'no_teardown': True,
+ },
+ }
+
+ self.test_context.init(attrs)
+ self.assertTrue(self.test_context._flags.no_setup)
+ self.assertTrue(self.test_context._flags.no_teardown)
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test__add_resources_to_template_no_servers(self, mock_template):
-
- self.test_context.keypair_name = "foo-key"
- self.test_context.secgroup_name = "foo-secgroup"
+ self.test_context._name = 'ctx'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
+ self.test_context.keypair_name = "ctx-key"
+ self.test_context.secgroup_name = "ctx-secgroup"
self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
- netattrs = {'cidr': '10.0.0.0/24', 'provider': None, 'external_network': 'ext_net'}
- self.mock_context.name = 'bar'
+ netattrs = {'cidr': '10.0.0.0/24', 'provider': None,
+ 'external_network': 'ext_net'}
+
self.test_context.networks = OrderedDict(
- {"fool-network": model.Network("fool-network", self.mock_context,
+ {"mynet": model.Network("mynet", self.test_context,
netattrs)})
self.test_context._add_resources_to_template(mock_template)
mock_template.add_keypair.assert_called_with(
- "foo-key",
- "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b")
- mock_template.add_security_group.assert_called_with("foo-secgroup")
-# mock_template.add_network.assert_called_with("bar-fool-network", 'physnet1', None)
+ "ctx-key",
+ "ctx-12345678")
+ mock_template.add_security_group.assert_called_with("ctx-secgroup")
+ mock_template.add_network.assert_called_with(
+ "ctx-12345678-mynet", 'physnet1', None, None, None, None)
mock_template.add_router.assert_called_with(
- "bar-fool-network-router",
+ "ctx-12345678-mynet-router",
netattrs["external_network"],
- "bar-fool-network-subnet")
+ "ctx-12345678-mynet-subnet")
mock_template.add_router_interface.assert_called_with(
- "bar-fool-network-router-if0",
- "bar-fool-network-router",
- "bar-fool-network-subnet")
+ "ctx-12345678-mynet-router-if0",
+ "ctx-12345678-mynet-router",
+ "ctx-12345678-mynet-subnet")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test_attrs_get(self, *args):
@@ -148,23 +204,103 @@ class HeatContextTestCase(unittest.TestCase):
with self.assertRaises(AttributeError):
self.test_context.user = 'foo'
+ def test__create_new_stack(self):
+ template = mock.Mock()
+ self.test_context._create_new_stack(template)
+ template.create.assert_called_once()
+
+ def test__create_new_stack_stack_create_failed(self):
+ template = mock.Mock()
+ template.create.side_effect = y_exc.HeatTemplateError
+
+ self.assertRaises(y_exc.HeatTemplateError,
+ self.test_context._create_new_stack,
+ template)
+
+ def test__create_new_stack_keyboard_interrupt(self):
+ template = mock.Mock()
+ template.create.side_effect = KeyboardInterrupt
+ self.assertRaises(y_exc.StackCreationInterrupt,
+ self.test_context._create_new_stack,
+ template)
+
+ @mock.patch.object(orch_heat.HeatTemplate, 'add_keypair')
+ @mock.patch.object(heat.HeatContext, '_create_new_stack')
+ def test_deploy_stack_creation_failed(self, mock_create, *args):
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = 'foo-12345678'
+ mock_create.side_effect = y_exc.HeatTemplateError
+ self.assertRaises(y_exc.HeatTemplateError,
+ self.test_context.deploy)
+
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
def test_deploy(self, mock_template):
- self.test_context.name = 'foo'
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context.template_file = '/bar/baz/some-heat-file'
self.test_context.heat_parameters = {'image': 'cirros'}
self.test_context.get_neutron_info = mock.MagicMock()
self.test_context.deploy()
- mock_template.assert_called_with('foo',
+ mock_template.assert_called_with('foo-12345678',
'/bar/baz/some-heat-file',
{'image': 'cirros'})
self.assertIsNotNone(self.test_context.stack)
+ # TODO: patch objects
+ @mock.patch.object(heat, 'HeatTemplate')
+ @mock.patch.object(heat.HeatContext, '_retrieve_existing_stack')
+ @mock.patch.object(heat.HeatContext, '_create_new_stack')
+ def test_deploy_no_setup(self, mock_create_new_stack, mock_retrieve_existing_stack, *args):
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ # Might be able to get rid of these
+ self.test_context.template_file = '/bar/baz/some-heat-file'
+ self.test_context.heat_parameters = {'image': 'cirros'}
+ self.test_context.get_neutron_info = mock.MagicMock()
+ self.test_context._flags.no_setup = True
+ self.test_context.deploy()
+
+ # check that heat client is called...
+ mock_create_new_stack.assert_not_called()
+ mock_retrieve_existing_stack.assert_called_with(self.test_context.name)
+ self.assertIsNotNone(self.test_context.stack)
+
+ @mock.patch.object(shade, 'openstack_cloud')
+ @mock.patch.object(heat.HeatTemplate, 'add_keypair')
+ @mock.patch.object(heat.HeatContext, '_create_new_stack')
+ @mock.patch.object(heat.HeatStack, 'get')
+ def test_deploy_try_retrieve_context_does_not_exist(self,
+ mock_get_stack,
+ mock_create_new_stack,
+ *args):
+ self.test_context._name = 'demo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._flags.no_setup = True
+ self.test_context.get_neutron_info = mock.MagicMock()
+
+ # TODo: Check is this the right value to return, should it be None instead?
+ mock_get_stack.return_value = []
+
+ self.test_context.deploy()
+
+ mock_get_stack.assert_called()
+ mock_create_new_stack.assert_called()
+
+ def test_check_for_context(self):
+ pass
+ # check that the context exists
+
def test_add_server_port(self):
network1 = mock.MagicMock()
network2 = mock.MagicMock()
- self.test_context.name = 'foo'
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context.stack = mock.MagicMock()
self.test_context.networks = {
'a': network1,
@@ -173,15 +309,15 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.stack.outputs = {
u'b': u'10.20.30.45',
u'b-subnet_id': 1,
- u'foo-a-subnet-cidr': u'10.20.0.0/15',
- u'foo-a-subnet-gateway_ip': u'10.20.30.1',
+ u'foo-12345678-a-subnet-cidr': u'10.20.0.0/15',
+ u'foo-12345678-a-subnet-gateway_ip': u'10.20.30.1',
u'b-mac_address': u'00:01',
u'b-device_id': u'dev21',
u'b-network_id': u'net789',
u'd': u'40.30.20.15',
u'd-subnet_id': 2,
- u'foo-c-subnet-cidr': u'40.30.0.0/18',
- u'foo-c-subnet-gateway_ip': u'40.30.20.254',
+ u'foo-12345678-c-subnet-cidr': u'40.30.0.0/18',
+ u'foo-12345678-c-subnet-gateway_ip': u'40.30.20.254',
u'd-mac_address': u'00:10',
u'd-device_id': u'dev43',
u'd-network_id': u'net987',
@@ -218,17 +354,39 @@ class HeatContextTestCase(unittest.TestCase):
self.assertEqual(len(server.interfaces), 3)
self.assertDictEqual(server.interfaces['port_a'], expected)
+ @mock.patch('yardstick.benchmark.contexts.heat.os')
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_undeploy(self, mock_template):
+ def test_undeploy(self, mock_template, *args):
self.test_context.stack = mock_template
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
+ # mock_os.path.exists.return_value = True
+ self.test_context.key_filename = 'foo/bar/foobar'
self.test_context.undeploy()
self.assertTrue(mock_template.delete.called)
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
+ def test_undeploy_no_teardown(self, mock_template):
+ self.test_context.stack = mock_template
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._flags.no_teardown = True
+ self.test_context.undeploy()
+
+ mock_template.delete.assert_not_called()
+
+ @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
@mock.patch('yardstick.benchmark.contexts.heat.os')
def test_undeploy_key_filename(self, mock_os, mock_template):
self.test_context.stack = mock_template
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id)
mock_os.path.exists.return_value = True
+ self.test_context.key_filename = 'foo/bar/foobar'
self.assertIsNone(self.test_context.undeploy())
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
@@ -249,24 +407,27 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = '127.0.0.3'
baz3_server.context.user = 'zab'
- self.test_context.name = 'bar'
+ self.test_context._name = 'bar'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context._user = 'bot'
self.test_context.stack = mock.Mock()
self.test_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.test_context.key_uuid = uuid.uuid4()
self.test_context._server_map = {
'baz3': baz3_server,
'foo2': foo2_server,
}
attr_name = {
- 'name': 'foo.bar',
+ 'name': 'foo.bar-12345678',
'private_ip_attr': 'private_ip',
'public_ip_attr': 'public_ip',
}
+ self.test_context.key_uuid = 'foo-42'
result = self.test_context._get_server(attr_name)
self.assertEqual(result['user'], 'bot')
self.assertEqual(result['ip'], '127.0.0.1')
@@ -288,22 +449,26 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = '127.0.0.3'
baz3_server.context.user = 'zab'
- self.test_context.name = 'bar'
+ self.test_context._name = 'bar'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context._user = 'bot'
self.test_context.stack = mock.Mock()
self.test_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.test_context.key_uuid = uuid.uuid4()
self.test_context._server_map = {
'baz3': baz3_server,
'foo2': foo2_server,
}
attr_name = {
- 'name': 'foo.bar',
+ 'name': 'foo.bar-12345678',
}
+
+ self.test_context.key_uuid = 'foo-42'
result = self.test_context._get_server(attr_name)
self.assertEqual(result['user'], 'bot')
# no private ip attr mapping in the map results in None value in the result
@@ -327,13 +492,14 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
- self.test_context.name = 'bar1'
+ self.test_context._name = 'bar1'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = 'bar1-12345678'
self.test_context.stack = mock.Mock()
self.test_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.test_context.key_uuid = uuid.uuid4()
self.test_context.generate_routing_table = mock.MagicMock(return_value=[])
self.test_context._server_map = {
@@ -365,19 +531,19 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
- self.test_context.name = 'bar1'
+ self.test_context._name = 'bar1'
self.test_context.stack = mock.Mock()
self.test_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.test_context.key_uuid = uuid.uuid4()
self.test_context._server_map = {
'baz3': baz3_server,
'foo2': foo2_server,
'wow4': None,
}
+ self.test_context.key_uuid = 'foo-42'
attr_name = 'wow4'
result = self.test_context._get_server(attr_name)
self.assertIsNone(result)
@@ -398,18 +564,21 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
- self.test_context.name = 'bar1'
+ self.test_context._name = 'bar1'
+ self.test_context._task_id = '1235467890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context.stack = mock.Mock()
self.test_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.test_context.key_uuid = uuid.uuid4()
self.test_context._server_map = {
'baz3': baz3_server,
'foo2': foo2_server,
}
+ self.test_context.key_uuid = 'foo-42'
attr_name = {
'name': 'foo.wow4',
'private_ip_attr': 'private_ip',
@@ -434,18 +603,18 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
- self.mock_context.name = 'bar1'
+ self.mock_context._name = 'bar1'
self.test_context.stack = mock.Mock()
self.mock_context.stack.outputs = {
'private_ip': '10.0.0.1',
'public_ip': '127.0.0.1',
}
- self.mock_context.key_uuid = uuid.uuid4()
self.mock_context._server_map = {
'baz3': baz3_server,
'foo2': foo2_server,
}
+ self.test_context.key_uuid = 'foo-42'
attr_name = 'foo.wow4'
result = self.test_context._get_server(attr_name)
self.assertIsNone(result)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
index e149e0d18..73c59c1b8 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -10,13 +10,13 @@
import mock
import unittest
-from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts import kubernetes
context_cfg = {
'type': 'Kubernetes',
'name': 'k8s',
+ 'task_id': '1234567890',
'servers': {
'host': {
'image': 'openretriever/yardstick',
@@ -40,11 +40,12 @@ class KubernetesTestCase(unittest.TestCase):
def setUp(self):
self.k8s_context = kubernetes.KubernetesContext()
+ self.addCleanup(self._remove_contexts)
self.k8s_context.init(context_cfg)
- def tearDown(self):
- # clear kubernetes contexts from global list so we don't break other tests
- Context.list = []
+ def _remove_contexts(self):
+ if self.k8s_context in self.k8s_context.list:
+ self.k8s_context._delete_context()
@mock.patch.object(kubernetes.KubernetesContext, '_delete_services')
@mock.patch.object(kubernetes.KubernetesContext, '_delete_ssh_key')
diff --git a/yardstick/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py
index 5329d30f4..ae7c5d999 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_node.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_node.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015-2017 Huawei Technologies Co.,Ltd and others.
#
@@ -9,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.node
-
-from __future__ import absolute_import
import os
import unittest
import errno
@@ -21,10 +16,6 @@ from yardstick.common import constants as consts
from yardstick.benchmark.contexts import node
-# pylint: disable=unused-argument
-# disable this for now because I keep forgetting mock patch arg ordering
-
-
class NodeContextTestCase(unittest.TestCase):
PREFIX = 'yardstick.benchmark.contexts.node'
@@ -34,7 +25,17 @@ class NodeContextTestCase(unittest.TestCase):
def setUp(self):
self.test_context = node.NodeContext()
+ self.addCleanup(self._remove_contexts)
self.os_path_join = os.path.join
+ self.attrs = {
+ 'name': 'foo',
+ 'task_id': '1234567890',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ }
+
+ def _remove_contexts(self):
+ if self.test_context in self.test_context.list:
+ self.test_context._delete_context()
def _get_file_abspath(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
@@ -42,7 +43,7 @@ class NodeContextTestCase(unittest.TestCase):
return file_path
def test___init__(self):
- self.assertIsNone(self.test_context.name)
+ self.assertIsNone(self.test_context._name)
self.assertIsNone(self.test_context.file_path)
self.assertEqual(self.test_context.nodes, [])
self.assertEqual(self.test_context.controllers, [])
@@ -74,6 +75,7 @@ class NodeContextTestCase(unittest.TestCase):
attrs = {
'name': 'foo',
+ 'task_id': '1234567890',
'file': error_path,
}
read_mock.side_effect = IOError(errno.EBUSY, 'busy')
@@ -97,37 +99,19 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(str(raised.exception), str(read_mock.side_effect))
def test_read_config_file(self):
-
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
+ self.test_context.init(self.attrs)
self.assertIsNotNone(self.test_context.read_config_file())
def test__dispatch_script(self):
-
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
+ self.test_context.init(self.attrs)
self.test_context.env = {'bash': [{'script': 'dummy'}]}
self.test_context._execute_script = mock.Mock()
self.assertEqual(self.test_context._dispatch_script('bash'), None)
def test__dispatch_ansible(self):
-
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
+ self.test_context.init(self.attrs)
self.test_context.env = {'ansible': [{'script': 'dummy'}]}
self.test_context._do_ansible_job = mock.Mock()
@@ -136,19 +120,13 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(self.test_context._dispatch_ansible('ansible'), None)
@mock.patch("{}.AnsibleCommon".format(PREFIX))
- def test__do_ansible_job(self, mock_ansible):
+ def test__do_ansible_job(self, *args):
self.assertEqual(None, self.test_context._do_ansible_job('dummy'))
- def test_successful_init(self):
-
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
+ def test_init(self):
+ self.test_context.init(self.attrs)
- self.assertEqual(self.test_context.name, "foo")
+ self.assertEqual(self.test_context.name, "foo-12345678")
self.assertEqual(len(self.test_context.nodes), 4)
self.assertEqual(len(self.test_context.controllers), 2)
self.assertEqual(len(self.test_context.computes), 1)
@@ -156,81 +134,44 @@ class NodeContextTestCase(unittest.TestCase):
self.assertEqual(len(self.test_context.baremetals), 1)
self.assertEqual(self.test_context.baremetals[0]["name"], "node4")
- def test__get_server_with_dic_attr_name(self):
-
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
-
- attr_name = {'name': 'foo.bar'}
- result = self.test_context._get_server(attr_name)
+ def test__get_server_with_dict_attr_name(self):
+ self.test_context.init(self.attrs)
+ result = self.test_context._get_server({'name': 'node1.foo-12345678'})
- self.assertEqual(result, None)
+ self.assertIsNone(result, None)
def test__get_server_not_found(self):
+ self.test_context.init(self.attrs)
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
-
- attr_name = 'bar.foo'
- result = self.test_context._get_server(attr_name)
-
- self.assertEqual(result, None)
+ self.assertIsNone(self.test_context._get_server('bar.foo-12345678'))
def test__get_server_mismatch(self):
+ self.test_context.init(self.attrs)
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
-
- attr_name = 'bar.foo1'
- result = self.test_context._get_server(attr_name)
-
- self.assertEqual(result, None)
+ self.assertIsNone(self.test_context._get_server('bar.foo1'))
def test__get_server_duplicate(self):
+ self.attrs['file'] = self._get_file_abspath(
+ self.NODES_DUPLICATE_SAMPLE)
+ self.test_context.init(self.attrs)
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
- }
-
- self.test_context.init(attrs)
-
- attr_name = 'node1.foo'
with self.assertRaises(ValueError):
- self.test_context._get_server(attr_name)
+ self.test_context._get_server('node1.foo-12345678')
def test__get_server_found(self):
+ self.test_context.init(self.attrs)
- attrs = {
- 'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
- }
-
- self.test_context.init(attrs)
-
- attr_name = 'node1.foo'
- result = self.test_context._get_server(attr_name)
+ result = self.test_context._get_server('node1.foo-12345678')
self.assertEqual(result['ip'], '10.229.47.137')
- self.assertEqual(result['name'], 'node1.foo')
+ self.assertEqual(result['name'], 'node1.foo-12345678')
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
@mock.patch('{}.NodeContext._dispatch_script'.format(PREFIX))
def test_deploy(self, dispatch_script_mock):
obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
obj.env = {
'type': 'script'
}
@@ -240,6 +181,7 @@ class NodeContextTestCase(unittest.TestCase):
@mock.patch('{}.NodeContext._dispatch_ansible'.format(PREFIX))
def test_deploy_anisible(self, dispatch_ansible_mock):
obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
obj.env = {
'type': 'ansible'
}
@@ -268,6 +210,7 @@ class NodeContextTestCase(unittest.TestCase):
@mock.patch('{}.ssh.SSH.execute'.format(PREFIX))
def test_execute_remote_script(self, execute_mock, put_file_mock):
obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
obj.env = {'prefix': 'yardstick.benchmark.scenarios.compute'}
node_name_args = 'node5'
obj.nodes = [{
@@ -288,14 +231,18 @@ class NodeContextTestCase(unittest.TestCase):
def test_execute_script_local(self, local_execute_mock):
node_name = 'local'
info = {}
- node.NodeContext()._execute_script(node_name, info)
+ obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
+ obj._execute_script(node_name, info)
self.assertTrue(local_execute_mock.called)
@mock.patch('{}.NodeContext._execute_remote_script'.format(PREFIX))
def test_execute_script_remote(self, remote_execute_mock):
node_name = 'node5'
info = {}
- node.NodeContext()._execute_script(node_name, info)
+ obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
+ obj._execute_script(node_name, info)
self.assertTrue(remote_execute_mock.called)
def test_get_script(self):
@@ -303,13 +250,16 @@ class NodeContextTestCase(unittest.TestCase):
info_args = {
'script': script_args
}
- script, options = node.NodeContext()._get_script(info_args)
+ obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
+ script, options = obj._get_script(info_args)
self.assertEqual(script_args, script)
self.assertEqual('', options)
def test_node_info(self):
node_name_args = 'node5'
obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
obj.nodes = [{'name': node_name_args, 'check': node_name_args}]
node_info = obj._get_node_info(node_name_args)
self.assertEqual(node_info.get('check'), node_name_args)
@@ -318,6 +268,7 @@ class NodeContextTestCase(unittest.TestCase):
def test_get_client(self, wait_mock):
node_name_args = 'node5'
obj = node.NodeContext()
+ self.addCleanup(obj._delete_context)
obj.nodes = [{
'name': node_name_args,
'user': 'ubuntu',
@@ -328,26 +279,38 @@ class NodeContextTestCase(unittest.TestCase):
self.assertTrue(wait_mock.called)
def test_get_server(self):
- self.test_context.name = 'vnf1'
- self.test_context.nodes = [{'name': 'my', 'value': 100}]
+ self.test_context.init(self.attrs)
+ self.test_context._name = 'foo'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
+ self.assertEqual('foo-12345678', self.test_context.name)
+ self.assertIsNotNone(self.test_context._task_id)
- with self.assertRaises(ValueError):
- self.test_context.get_server('my.vnf2')
+ result = self.test_context.get_server('node1.foo-12345678')
- expected = {'name': 'my.vnf1', 'value': 100, 'interfaces': {}}
- result = self.test_context.get_server('my.vnf1')
- self.assertDictEqual(result, expected)
+ self.assertEqual(result['ip'], '10.229.47.137')
+ self.assertEqual(result['name'], 'node1.foo-12345678')
+ self.assertEqual(result['user'], 'root')
+ self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+
+ def test_get_server_server_not_in_context(self):
+ self.test_context.init(self.attrs)
+
+ with self.assertRaises(ValueError):
+ self.test_context.get_server('my2.foo-12345678')
def test_get_context_from_server(self):
- self.test_context.name = 'vnf1'
+ self.test_context._name = 'vnf1'
+ self.test_context._task_id = '1234567890'
+ self.test_context._name_task_id = '{}-{}'.format(
+ self.test_context._name, self.test_context._task_id[:8])
self.test_context.nodes = [{'name': 'my', 'value': 100}]
self.test_context.attrs = {'attr1': 200}
- with self.assertRaises(ValueError):
- self.test_context.get_context_from_server('my.vnf2')
-
- result = self.test_context.get_context_from_server('my.vnf1')
- self.assertIs(result, self.test_context)
+ self.assertIs(
+ self.test_context.get_context_from_server('my.vnf1-12345678'),
+ self.test_context)
# TODO: Split this into more granular tests
def test__get_network(self):
@@ -393,11 +356,3 @@ class NodeContextTestCase(unittest.TestCase):
expected = network1
result = self.test_context._get_network(attr_name)
self.assertDictEqual(result, expected)
-
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index ee00d8826..b3a51209a 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -7,13 +7,22 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import copy
+import io
import os
+import sys
import mock
+import six
import unittest
+import uuid
+from yardstick.benchmark.contexts import dummy
from yardstick.benchmark.core import task
from yardstick.common import constants as consts
+from yardstick.common import exceptions
+from yardstick.common import task_template
+from yardstick.common import utils
class TaskTestCase(unittest.TestCase):
@@ -174,7 +183,6 @@ class TaskTestCase(unittest.TestCase):
'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml'))
self.assertEqual(task_files[1], self.change_to_abspath(
'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml'))
-
self.assertIsNone(task_args[0])
self.assertIsNone(task_args[1])
self.assertIsNone(task_args_fnames[0])
@@ -256,30 +264,37 @@ class TaskTestCase(unittest.TestCase):
actual_result = t._parse_options(options)
self.assertEqual(expected_result, actual_result)
+ def test_parse_options_no_teardown(self):
+ options = {
+ 'openstack': {
+ 'EXTERNAL_NETWORK': '$network'
+ },
+ 'nodes': ['node1', '$node'],
+ 'host': '$host',
+ 'contexts' : {'name': "my-context",
+ 'no_teardown': True}
+ }
- def test_change_server_name_host_str(self):
- scenario = {'host': 'demo'}
- suffix = '-8'
- task.change_server_name(scenario, suffix)
- self.assertEqual('demo-8', scenario['host'])
-
- def test_change_server_name_host_dict(self):
- scenario = {'host': {'name': 'demo'}}
- suffix = '-8'
- task.change_server_name(scenario, suffix)
- self.assertEqual('demo-8', scenario['host']['name'])
+ t = task.Task()
+ t.outputs = {
+ 'network': 'ext-net',
+ 'node': 'node2',
+ 'host': 'server.yardstick'
+ }
- def test_change_server_name_target_str(self):
- scenario = {'target': 'demo'}
- suffix = '-8'
- task.change_server_name(scenario, suffix)
- self.assertEqual('demo-8', scenario['target'])
+ expected_result = {
+ 'openstack': {
+ 'EXTERNAL_NETWORK': 'ext-net'
+ },
+ 'nodes': ['node1', 'node2'],
+ 'host': 'server.yardstick',
+ 'contexts': {'name': 'my-context',
+ 'no_teardown': True,
+ }
+ }
- def test_change_server_name_target_dict(self):
- scenario = {'target': {'name': 'demo'}}
- suffix = '-8'
- task.change_server_name(scenario, suffix)
- self.assertEqual('demo-8', scenario['target']['name'])
+ actual_result = t._parse_options(options)
+ self.assertEqual(expected_result, actual_result)
@mock.patch('six.moves.builtins.open', side_effect=mock.mock_open())
@mock.patch.object(task, 'utils')
@@ -299,9 +314,219 @@ class TaskTestCase(unittest.TestCase):
return os.path.join(consts.YARDSTICK_ROOT_PATH, filepath)
-def main():
- unittest.main()
+class TaskParserTestCase(unittest.TestCase):
+
+ TASK = """
+{% set value1 = value1 or 'var1' %}
+{% set value2 = value2 or 'var2' %}
+key1: {{ value1 }}
+key2:
+ - {{ value2 }}"""
+ TASK_RENDERED_1 = u"""
-if __name__ == '__main__':
- main()
+
+key1: var1
+key2:
+ - var2"""
+
+ TASK_RENDERED_2 = u"""
+
+
+key1: var3
+key2:
+ - var4"""
+
+ def setUp(self):
+ self.parser = task.TaskParser('fake/path')
+ self.scenario = {
+ 'host': 'athena.demo',
+ 'target': 'kratos.demo',
+ 'targets': [
+ 'ares.demo', 'mars.demo'
+ ],
+ 'options': {
+ 'server_name': {
+ 'host': 'jupiter.demo',
+ 'target': 'saturn.demo',
+ },
+ },
+ 'nodes': {
+ 'tg__0': 'tg_0.demo',
+ 'vnf__0': 'vnf_0.demo',
+ }
+ }
+
+ def test__change_node_names(self):
+
+ ctx_attrs = {
+ 'name': 'demo',
+ 'task_id': '1234567890',
+ 'servers': [
+ 'athena', 'kratos',
+ 'ares', 'mars',
+ 'jupiter', 'saturn',
+ 'tg_0', 'vnf_0'
+ ]
+ }
+
+ my_context = dummy.DummyContext()
+ my_context.init(ctx_attrs)
+
+ expected_scenario = {
+ 'host': 'athena.demo-12345678',
+ 'target': 'kratos.demo-12345678',
+ 'targets': [
+ 'ares.demo-12345678', 'mars.demo-12345678'
+ ],
+ 'options': {
+ 'server_name': {
+ 'host': 'jupiter.demo-12345678',
+ 'target': 'saturn.demo-12345678',
+ },
+ },
+ 'nodes': {
+ 'tg__0': 'tg_0.demo-12345678',
+ 'vnf__0': 'vnf_0.demo-12345678',
+ }
+ }
+
+ scenario = copy.deepcopy(self.scenario)
+
+ self.parser._change_node_names(scenario, [my_context])
+ self.assertEqual(scenario, expected_scenario)
+
+ def test__change_node_names_context_not_found(self):
+ scenario = copy.deepcopy(self.scenario)
+ self.assertRaises(exceptions.ScenarioConfigContextNameNotFound,
+ self.parser._change_node_names,
+ scenario, [])
+
+ def test__change_node_names_context_name_unchanged(self):
+ ctx_attrs = {
+ 'name': 'demo',
+ 'task_id': '1234567890',
+ 'flags': {
+ 'no_setup': True,
+ 'no_teardown': True
+ }
+ }
+
+ my_context = dummy.DummyContext()
+ my_context.init(ctx_attrs)
+
+ scenario = copy.deepcopy(self.scenario)
+ expected_scenario = copy.deepcopy(self.scenario)
+
+ self.parser._change_node_names(scenario, [my_context])
+ self.assertEqual(scenario, expected_scenario)
+
+ def test__parse_tasks(self):
+ task_obj = task.Task()
+ _uuid = uuid.uuid4()
+ task_obj.task_id = _uuid
+ task_files = ['/directory/task_file_name.yml']
+ mock_parser = mock.Mock()
+ mock_parser.parse_task.return_value = {'rendered': 'File content'}
+ mock_args = mock.Mock()
+ mock_args.render_only = False
+
+ tasks = task_obj._parse_tasks(mock_parser, task_files, mock_args,
+ ['arg1'], ['file_arg1'])
+ self.assertEqual(
+ [{'rendered': 'File content', 'case_name': 'task_file_name'}],
+ tasks)
+ mock_parser.parse_task.assert_called_once_with(
+ _uuid, 'arg1', 'file_arg1')
+
+ @mock.patch.object(sys, 'exit')
+ @mock.patch.object(utils, 'write_file')
+ @mock.patch.object(utils, 'makedirs')
+ def test__parse_tasks_render_only(self, mock_makedirs, mock_write_file,
+ mock_exit):
+ task_obj = task.Task()
+ _uuid = uuid.uuid4()
+ task_obj.task_id = _uuid
+ task_files = ['/directory/task_file_name.yml']
+ mock_parser = mock.Mock()
+ mock_parser.parse_task.return_value = {'rendered': 'File content'}
+ mock_args = mock.Mock()
+ mock_args.render_only = '/output_directory'
+
+ task_obj._parse_tasks(mock_parser, task_files, mock_args,
+ ['arg1'], ['file_arg1'])
+ mock_makedirs.assert_called_once_with('/output_directory')
+ mock_write_file.assert_called_once_with(
+ '/output_directory/000-task_file_name.yml', 'File content')
+ mock_exit.assert_called_once_with(0)
+
+ def test__render_task_no_args(self):
+ task_parser = task.TaskParser('task_file')
+ task_str = io.StringIO(six.text_type(self.TASK))
+ with mock.patch.object(six.moves.builtins, 'open',
+ return_value=task_str) as mock_open:
+ parsed, rendered = task_parser._render_task(None, None)
+
+ self.assertEqual(self.TASK_RENDERED_1, rendered)
+ self.assertEqual({'key1': 'var1', 'key2': ['var2']}, parsed)
+ mock_open.assert_called_once_with('task_file')
+
+ def test__render_task_arguments(self):
+ task_parser = task.TaskParser('task_file')
+ task_str = io.StringIO(six.text_type(self.TASK))
+ with mock.patch.object(six.moves.builtins, 'open',
+ return_value=task_str) as mock_open:
+ parsed, rendered = task_parser._render_task('value1: "var1"', None)
+
+ self.assertEqual(self.TASK_RENDERED_1, rendered)
+ self.assertEqual({'key1': 'var1', 'key2': ['var2']}, parsed)
+ mock_open.assert_called_once_with('task_file')
+
+ def test__render_task_file_arguments(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ io.StringIO(six.text_type(self.TASK))
+ )
+ parsed, rendered = task_parser._render_task('value1: "var3"',
+ 'args_file')
+
+ self.assertEqual(self.TASK_RENDERED_2, rendered)
+ self.assertEqual({'key1': 'var3', 'key2': ['var4']}, parsed)
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+
+ def test__render_task_error_arguments(self):
+ with self.assertRaises(exceptions.TaskRenderArgumentError):
+ task.TaskParser('task_file')._render_task('value1="var3"', None)
+
+ def test__render_task_error_task_file(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ IOError()
+ )
+ with self.assertRaises(exceptions.TaskReadError):
+ task_parser._render_task('value1: "var3"', 'args_file')
+
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+
+ def test__render_task_render_error(self):
+ task_parser = task.TaskParser('task_file')
+ with mock.patch.object(six.moves.builtins, 'open') as mock_open, \
+ mock.patch.object(task_template.TaskTemplate, 'render',
+ side_effect=TypeError) as mock_render:
+ mock_open.side_effect = (
+ io.StringIO(six.text_type('value2: var4')),
+ io.StringIO(six.text_type(self.TASK))
+ )
+ with self.assertRaises(exceptions.TaskRenderError):
+ task_parser._render_task('value1: "var3"', 'args_file')
+
+ mock_open.assert_has_calls([mock.call('args_file'),
+ mock.call('task_file')])
+ mock_render.assert_has_calls(
+ [mock.call(self.TASK, value1='var3', value2='var4')])
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 83db6ae01..c7a29f27e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -13,12 +13,10 @@
# limitations under the License.
from copy import deepcopy
-import errno
import os
import sys
import mock
-import six
import unittest
from yardstick import tests
@@ -26,8 +24,9 @@ from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.traffic_profile import base
from yardstick.network_services.vnf_generic import vnfdgen
-from yardstick.network_services.vnf_generic.vnf.base import \
- GenericTrafficGen, GenericVNF
+from yardstick.error import IncorrectConfig
+from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
stl_patch = mock.patch.dict(sys.modules, tests.STL_MOCKS)
@@ -355,16 +354,6 @@ class TestNetworkServiceTestCase(unittest.TestCase):
file_path = os.path.join(curr_path, filename)
return file_path
- def test_ssh_manager(self):
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
- ssh.from_node.return_value = ssh_mock
- for node_dict in self.context_cfg["nodes"].values():
- with vnf_generic.SshManager(node_dict) as conn:
- self.assertIsNotNone(conn)
-
def test___init__(self):
assert self.topology
@@ -462,12 +451,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.load_vnf_models(self.scenario_cfg, self.context_cfg))
def test_map_topology_to_infrastructure(self):
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
- ssh.from_node.return_value = ssh_mock
- self.s.map_topology_to_infrastructure()
+ self.s.map_topology_to_infrastructure()
nodes = self.context_cfg["nodes"]
self.assertEqual('../../vnf_descriptors/tg_rfc2544_tpl.yaml',
@@ -476,26 +460,29 @@ class TestNetworkServiceTestCase(unittest.TestCase):
nodes['vnf__1']['VNF model'])
def test_map_topology_to_infrastructure_insufficient_nodes(self):
- del self.context_cfg['nodes']['vnf__1']
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(1, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
- ssh.from_node.return_value = ssh_mock
+ cfg = deepcopy(self.context_cfg)
+ del cfg['nodes']['vnf__1']
- with self.assertRaises(vnf_generic.IncorrectConfig):
+ cfg_patch = mock.patch.object(self.s, 'context_cfg', cfg)
+ with cfg_patch:
+ with self.assertRaises(IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test_map_topology_to_infrastructure_config_invalid(self):
- cfg = dict(self.context_cfg)
+ ssh_mock = mock.Mock()
+ ssh_mock.execute.return_value = 0, SYS_CLASS_NET + IP_ADDR_SHOW, ""
+
+ cfg = deepcopy(self.s.context_cfg)
+
+ # delete all, we don't know which will come first
del cfg['nodes']['vnf__1']['interfaces']['xe0']['local_mac']
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
- ssh.from_node.return_value = ssh_mock
+ del cfg['nodes']['vnf__1']['interfaces']['xe1']['local_mac']
+ del cfg['nodes']['tg__1']['interfaces']['xe0']['local_mac']
+ del cfg['nodes']['tg__1']['interfaces']['xe1']['local_mac']
- with self.assertRaises(vnf_generic.IncorrectConfig):
+ config_patch = mock.patch.object(self.s, 'context_cfg', cfg)
+ with config_patch:
+ with self.assertRaises(IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test__resolve_topology_invalid_config(self):
@@ -691,137 +678,3 @@ class TestNetworkServiceTestCase(unittest.TestCase):
mock.Mock(return_value=True)
with self.assertRaises(RuntimeError):
self.s.teardown()
-
- SAMPLE_NETDEVS = {
- 'enp11s0': {
- 'address': '0a:de:ad:be:ef:f5',
- 'device': '0x1533',
- 'driver': 'igb',
- 'ifindex': '2',
- 'interface_name': 'enp11s0',
- 'operstate': 'down',
- 'pci_bus_id': '0000:0b:00.0',
- 'subsystem_device': '0x1533',
- 'subsystem_vendor': '0x15d9',
- 'vendor': '0x8086'
- },
- 'lan': {
- 'address': '0a:de:ad:be:ef:f4',
- 'device': '0x153a',
- 'driver': 'e1000e',
- 'ifindex': '3',
- 'interface_name': 'lan',
- 'operstate': 'up',
- 'pci_bus_id': '0000:00:19.0',
- 'subsystem_device': '0x153a',
- 'subsystem_vendor': '0x15d9',
- 'vendor': '0x8086'
- }
- }
-
- SAMPLE_VM_NETDEVS = {
- 'eth1': {
- 'address': 'fa:de:ad:be:ef:5b',
- 'device': '0x0001',
- 'driver': 'virtio_net',
- 'ifindex': '3',
- 'interface_name': 'eth1',
- 'operstate': 'down',
- 'pci_bus_id': '0000:00:04.0',
- 'vendor': '0x1af4'
- }
- }
-
- def test_parse_netdev_info(self):
- output = """\
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/ifindex:2
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/address:0a:de:ad:be:ef:f5
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/operstate:down
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/vendor:0x8086
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/device:0x1533
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_vendor:0x15d9
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_device:0x1533
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/driver:igb
-/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/pci_bus_id:0000:0b:00.0
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/ifindex:3
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/address:0a:de:ad:be:ef:f4
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/operstate:up
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/vendor:0x8086
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/device:0x153a
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_vendor:0x15d9
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_device:0x153a
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/driver:e1000e
-/sys/devices/pci0000:00/0000:00:19.0/net/lan/pci_bus_id:0000:00:19.0
-"""
- res = vnf_generic.NetworkServiceTestCase.parse_netdev_info(output)
- assert res == self.SAMPLE_NETDEVS
-
- def test_parse_netdev_info_virtio(self):
- output = """\
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/ifindex:3
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/address:fa:de:ad:be:ef:5b
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/operstate:down
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/vendor:0x1af4
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/device:0x0001
-/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/driver:virtio_net
-"""
- res = vnf_generic.NetworkServiceTestCase.parse_netdev_info(output)
- assert res == self.SAMPLE_VM_NETDEVS
-
- def test_probe_missing_values(self):
- netdevs = self.SAMPLE_NETDEVS.copy()
- network = {'local_mac': '0a:de:ad:be:ef:f5'}
- vnf_generic.NetworkServiceTestCase._probe_missing_values(netdevs,
- network)
- assert network['vpci'] == '0000:0b:00.0'
-
- network = {'local_mac': '0a:de:ad:be:ef:f4'}
- vnf_generic.NetworkServiceTestCase._probe_missing_values(netdevs,
- network)
- assert network['vpci'] == '0000:00:19.0'
-
- @mock.patch.object(six.moves.builtins, 'open')
- def test_open_relative_path(self, mock_open):
- # NOTE(ralonsoh): the mocked function is not properly used and tested.
- mock_open_result = mock_open()
- mock_open_call_count = 1 # initial call to get result
- self.assertEqual(utils.open_relative_file('foo', 'bar'),
- mock_open_result)
-
- mock_open_call_count += 1 # one more call expected
- self.assertEqual(mock_open.call_count, mock_open_call_count)
- self.assertIn('foo', mock_open.call_args_list[-1][0][0])
- self.assertNotIn('bar', mock_open.call_args_list[-1][0][0])
-
- def open_effect(*args, **kwargs):
- if kwargs.get('name', args[0]) == os.path.join('bar', 'foo'):
- return mock_open_result
- raise IOError(errno.ENOENT, 'not found')
-
- mock_open.side_effect = open_effect
- self.assertEqual(utils.open_relative_file('foo', 'bar'),
- mock_open_result)
-
- mock_open_call_count += 2 # two more calls expected
- self.assertEqual(mock_open.call_count, mock_open_call_count)
- self.assertIn('foo', mock_open.call_args_list[-1][0][0])
- self.assertIn('bar', mock_open.call_args_list[-1][0][0])
-
- # test an IOError of type ENOENT
- mock_open.side_effect = IOError(errno.ENOENT, 'not found')
- with self.assertRaises(IOError):
- # the second call still raises
- utils.open_relative_file('foo', 'bar')
-
- mock_open_call_count += 2 # two more calls expected
- self.assertEqual(mock_open.call_count, mock_open_call_count)
- self.assertIn('foo', mock_open.call_args_list[-1][0][0])
- self.assertIn('bar', mock_open.call_args_list[-1][0][0])
-
- # test an IOError other than ENOENT
- mock_open.side_effect = IOError(errno.EBUSY, 'busy')
- with self.assertRaises(IOError):
- utils.open_relative_file('foo', 'bar')
-
- mock_open_call_count += 1 # one more call expected
- self.assertEqual(mock_open.call_count, mock_open_call_count)
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index b4907addc..e71d0ff0f 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -20,6 +20,7 @@ import unittest
import yardstick
from yardstick import ssh
+import yardstick.error
from yardstick.common import utils
from yardstick.common import constants
@@ -126,6 +127,63 @@ class CommonUtilTestCase(unittest.TestCase):
("=".join(item) for item in sorted(flattened_data.items())))
self.assertEqual(result, line)
+ def test_get_key_with_default_negative(self):
+ with self.assertRaises(KeyError):
+ utils.get_key_with_default({}, 'key1')
+
+ @mock.patch('yardstick.common.utils.open', create=True)
+ def test_(self, mock_open):
+ mock_open.side_effect = IOError
+
+ with self.assertRaises(IOError):
+ utils.find_relative_file('my/path', 'task/path')
+
+ self.assertEqual(mock_open.call_count, 2)
+
+ @mock.patch('yardstick.common.utils.open', create=True)
+ def test_open_relative_path(self, mock_open):
+ mock_open_result = mock_open()
+ mock_open_call_count = 1 # initial call to get result
+
+ self.assertEqual(utils.open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertNotIn('bar', mock_open.call_args_list[-1][0][0])
+
+ def open_effect(*args, **kwargs):
+ if kwargs.get('name', args[0]) == os.path.join('bar', 'foo'):
+ return mock_open_result
+ raise IOError(errno.ENOENT, 'not found')
+
+ mock_open.side_effect = open_effect
+ self.assertEqual(utils.open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError of type ENOENT
+ mock_open.side_effect = IOError(errno.ENOENT, 'not found')
+ with self.assertRaises(IOError):
+ # the second call still raises
+ utils.open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError other than ENOENT
+ mock_open.side_effect = IOError(errno.EBUSY, 'busy')
+ with self.assertRaises(IOError):
+ utils.open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+
class TestMacAddressToHex(unittest.TestCase):
@@ -931,9 +989,9 @@ class TestUtils(unittest.TestCase):
def test_error_class(self):
with self.assertRaises(RuntimeError):
- utils.ErrorClass()
+ yardstick.error.ErrorClass()
- error_instance = utils.ErrorClass(test='')
+ error_instance = yardstick.error.ErrorClass(test='')
with self.assertRaises(AttributeError):
error_instance.get_name()
diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py
index e0a353812..9ab8740e3 100644
--- a/yardstick/tests/unit/orchestrator/test_heat.py
+++ b/yardstick/tests/unit/orchestrator/test_heat.py
@@ -9,6 +9,7 @@
import tempfile
+import munch
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
@@ -40,12 +41,16 @@ class HeatStackTestCase(unittest.TestCase):
self._mock_stack_delete = mock.patch.object(self.heatstack._cloud,
'delete_stack')
self.mock_stack_delete = self._mock_stack_delete.start()
+ self._mock_stack_get = mock.patch.object(self.heatstack._cloud,
+ 'get_stack')
+ self.mock_stack_get = self._mock_stack_get.start()
self.addCleanup(self._cleanup)
def _cleanup(self):
self._mock_stack_create.stop()
self._mock_stack_delete.stop()
+ self._mock_stack_get.stop()
heat._DEPLOYED_STACKS = {}
def test_create(self):
@@ -101,6 +106,88 @@ class HeatStackTestCase(unittest.TestCase):
self.assertFalse(heat._DEPLOYED_STACKS)
self.mock_stack_delete.assert_called_once_with(id, wait=True)
+ def test_get(self):
+ # make sure shade/get_stack is called with the appropriate vars
+ self.mock_stack_get.return_value = munch.Munch(
+ id="my-existing-stack-id",
+ outputs=[
+ {
+ u'output_value': u'b734d06a-dec7-...',
+ u'output_key': u'ares.demo-test-port-network_id',
+ u'description': u''
+ },
+ {u'output_value': u'b08da78c-2218-...',
+ u'output_key': u'ares.demo-test-port-subnet_id',
+ u'description': u''
+ },
+ {u'output_value': u'10.0.1.0/24',
+ u'output_key': u'demo-test-subnet-cidr',
+ u'description': u''
+ },
+ {u'output_value': u'b08da78c-2218-...',
+ u'output_key': u'demo-test-subnet',
+ u'description': u''
+ },
+ {u'output_value': u'b1a03624-aefc-...',
+ u'output_key': u'ares.demo',
+ u'description': u''
+ },
+ {u'output_value': u'266a8088-c630-...',
+ u'output_key': u'demo-secgroup',
+ u'description': u''
+ },
+ {u'output_value': u'10.0.1.5',
+ u'output_key': u'ares.demo-test-port',
+ u'description': u''
+ },
+ {u'output_value': u'10.0.1.1',
+ u'output_key': u'demo-test-subnet-gateway_ip',
+ u'description': u''
+ },
+ {u'output_value': u'',
+ u'output_key': u'ares.demo-test-port-device_id',
+ u'description': u''
+ },
+ {u'output_value': u'172.24.4.7',
+ u'output_key': u'ares.demo-fip',
+ u'description': u''
+ },
+ {u'output_value': u'fa:16:3e:6c:c3:0f',
+ u'output_key': u'ares.demo-test-port-mac_address',
+ u'description': u''}
+ ]
+ )
+ expected_outputs = {
+ 'ares.demo-test-port-network_id': 'b734d06a-dec7-...',
+ 'ares.demo-test-port-subnet_id': 'b08da78c-2218-...',
+ 'demo-test-subnet-cidr': '10.0.1.0/24',
+ 'demo-test-subnet': 'b08da78c-2218-...',
+ 'ares.demo': 'b1a03624-aefc-...',
+ 'demo-secgroup': '266a8088-c630-...',
+ 'ares.demo-test-port': '10.0.1.5',
+ 'demo-test-subnet-gateway_ip': '10.0.1.1',
+ 'ares.demo-test-port-device_id': '',
+ 'ares.demo-fip': '172.24.4.7',
+ 'ares.demo-test-port-mac_address': 'fa:16:3e:6c:c3:0f',
+ }
+
+ stack_id = "my-existing-stack-id"
+ self.heatstack.name = "my-existing-stack"
+ self.heatstack.get()
+
+ self.mock_stack_get.assert_called_once_with(self.heatstack.name)
+ self.assertEqual(expected_outputs, self.heatstack.outputs)
+ self.assertEqual(1, len(heat._DEPLOYED_STACKS))
+ self.assertEqual(self.heatstack._stack,
+ heat._DEPLOYED_STACKS[stack_id])
+
+ def test_get_invalid_name(self):
+ # No context matching this name exists
+ self.mock_stack_get.return_value = []
+ self.heatstack.name = 'not-a-stack'
+ self.heatstack.get()
+ self.assertEqual(0, len(heat._DEPLOYED_STACKS))
+
class HeatTemplateTestCase(unittest.TestCase):
@@ -160,7 +247,8 @@ class HeatTemplateTestCase(unittest.TestCase):
def test__add_resources_to_template_raw(self):
test_context = node.NodeContext()
- test_context.name = 'foo'
+ self.addCleanup(test_context._delete_context)
+ test_context._name = 'foo'
test_context.template_file = '/tmp/some-heat-file'
test_context.heat_parameters = {'image': 'cirros'}
test_context.key_filename = "/tmp/1234"
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index dbaae8c37..615783f3e 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -21,12 +21,13 @@ import os
import socket
import unittest
from io import StringIO
+from itertools import count
import mock
from oslo_utils import encodeutils
from yardstick import ssh
-from yardstick.ssh import SSHError
+from yardstick.ssh import SSHError, SSHTimeout
from yardstick.ssh import SSH
from yardstick.ssh import AutoConnectSSH
@@ -508,13 +509,45 @@ class SSHRunTestCase(unittest.TestCase):
class TestAutoConnectSSH(unittest.TestCase):
- def test__connect_with_wait(self):
- auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=True)
- auto_connect_ssh._get_client = mock.Mock()
- auto_connect_ssh.wait = mock_wait = mock.Mock()
+ def test__connect_loop(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=0)
+ auto_connect_ssh._get_client = mock__get_client = mock.Mock()
auto_connect_ssh._connect()
- self.assertEqual(mock_wait.call_count, 1)
+ self.assertEqual(mock__get_client.call_count, 1)
+
+ def test___init___negative(self):
+ with self.assertRaises(TypeError):
+ AutoConnectSSH('user1', 'host1', wait=['wait'])
+
+ with self.assertRaises(ValueError):
+ AutoConnectSSH('user1', 'host1', wait='wait')
+
+ @mock.patch('yardstick.ssh.time')
+ def test__connect_loop_ssh_error(self, mock_time):
+ mock_time.time.side_effect = count()
+
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=10)
+ auto_connect_ssh._get_client = mock__get_client = mock.Mock()
+ mock__get_client.side_effect = SSHError
+
+ with self.assertRaises(SSHTimeout):
+ auto_connect_ssh._connect()
+
+ self.assertEqual(mock_time.time.call_count, 12)
+
+ def test_get_file_obj(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=10)
+ auto_connect_ssh._get_client = mock__get_client = mock.Mock()
+ mock_client = mock__get_client()
+ mock_open_sftp = mock_client.open_sftp()
+ mock_sftp = mock.Mock()
+ mock_open_sftp.__enter__ = mock.Mock(return_value=mock_sftp)
+ mock_open_sftp.__exit__ = mock.Mock()
+
+ auto_connect_ssh.get_file_obj('remote/path', mock.Mock())
+
+ self.assertEqual(mock_sftp.getfo.call_count, 1)
def test__make_dict(self):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')
@@ -527,7 +560,7 @@ class TestAutoConnectSSH(unittest.TestCase):
'key_filename': None,
'password': None,
'name': None,
- 'wait': True,
+ 'wait': AutoConnectSSH.DEFAULT_WAIT_TIMEOUT,
}
result = auto_connect_ssh._make_dict()
self.assertDictEqual(result, expected)
@@ -537,6 +570,13 @@ class TestAutoConnectSSH(unittest.TestCase):
self.assertEqual(auto_connect_ssh.get_class(), AutoConnectSSH)
+ def test_drop_connection(self):
+ auto_connect_ssh = AutoConnectSSH('user1', 'host1')
+ self.assertFalse(auto_connect_ssh._client)
+ auto_connect_ssh._client = True
+ auto_connect_ssh.drop_connection()
+ self.assertFalse(auto_connect_ssh._client)
+
@mock.patch('yardstick.ssh.SCPClient')
def test_put(self, mock_scp_client_type):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')