From 6ea6c76805a35d5ff8f31d9336d20925dd86e26f Mon Sep 17 00:00:00 2001 From: zhongjun Date: Thu, 19 Oct 2017 15:57:22 +0800 Subject: Add the unit test file test_tempest.py 1.Add the unit test file test_tempest.py and related test data file daisyrc_admin, test stub file daisyclient_stub.py for tempest.py 2.modify the tempest.py to adapt the unit test. Change-Id: Ice92ed5a01a049a1a828f7f9e13979bda4b026b7 Signed-off-by: zhongjun --- deploy/tempest.py | 50 +++--- tests/data/daisy_conf/daisyrc_admin | 1 + tests/unit/daisyclient_stub.py | 164 ++++++++++++++++++ tests/unit/test_tempest.py | 324 ++++++++++++++++++++++++++++++++++++ 4 files changed, 513 insertions(+), 26 deletions(-) create mode 100644 tests/data/daisy_conf/daisyrc_admin create mode 100644 tests/unit/daisyclient_stub.py create mode 100644 tests/unit/test_tempest.py diff --git a/deploy/tempest.py b/deploy/tempest.py index 89411f3f..dc0847fa 100644 --- a/deploy/tempest.py +++ b/deploy/tempest.py @@ -72,10 +72,6 @@ def get_endpoint(file_path): return daisy_endpoint -daisy_endpoint = get_endpoint(daisyrc_path) -client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint) - - def prepare_install(): global deployment_interface try: @@ -95,7 +91,7 @@ def prepare_install(): update_network(cluster_id, network_map) print("build pxe server to install os...") deployment_interface = get_configure_from_daisyconf("PXE", "eth_name") - build_pxe_for_discover(cluster_id) + build_pxe_for_discover(cluster_id, client, deployment_interface) elif conf['host'] and conf['host'] == 'yes': isbare = False if 'isbare' in conf and conf['isbare'] == 0 else True print("discover host...") @@ -103,10 +99,10 @@ def prepare_install(): time.sleep(10) print("update hosts interface...") hosts_info = get_hosts() - cluster_info = get_cluster() + cluster_info = get_cluster(client) cluster_id = cluster_info.id add_hosts_interface(cluster_id, hosts_info, mac_address_map, - host_interface_map, vip, isbare) + host_interface_map, vip, isbare, client) if len(hosts_name) == 1: protocol_type = 'LVM' service_name = 'cinder' @@ -117,24 +113,24 @@ def prepare_install(): print('hosts_num is %s' % len(hosts_name)) protocol_type = None enable_cinder_backend(cluster_id, service_name, - ceph_disk_name, protocol_type) + ceph_disk_name, protocol_type, client) if 'scenario' in conf: if 'odl_l3' in conf['scenario'] or \ 'odl' in conf['scenario']: - enable_opendaylight(cluster_id, 'odl_l3') + enable_opendaylight(cluster_id, 'odl_l3', client) elif 'odl_l2' in conf['scenario']: - enable_opendaylight(cluster_id, 'odl_l2') + enable_opendaylight(cluster_id, 'odl_l2', client) if not isbare: - install_os_for_vm_step1(cluster_id) + install_os_for_vm_step1(cluster_id, client) else: print("daisy baremetal deploy start") - install_os_for_bm_oneshot(cluster_id) + install_os_for_bm_oneshot(cluster_id, client) elif conf['install'] and conf['install'] == 'yes': - cluster_info = get_cluster() + cluster_info = get_cluster(client) cluster_id = cluster_info.id - install_os_for_vm_step2(cluster_id) + install_os_for_vm_step2(cluster_id, client) except Exception: print("Deploy failed!!!.%s." % traceback.format_exc()) @@ -143,24 +139,24 @@ def prepare_install(): print_bar("Everything is done!") -def build_pxe_for_discover(cluster_id): +def build_pxe_for_discover(cluster_id, client, deployment_interface): cluster_meta = {'cluster_id': cluster_id, 'deployment_interface': deployment_interface} client.install.install(**cluster_meta) -def install_os_for_vm_step1(cluster_id): +def install_os_for_vm_step1(cluster_id, client): cluster_meta = {'cluster_id': cluster_id, 'pxe_only': "true"} client.install.install(**cluster_meta) -def install_os_for_bm_oneshot(cluster_id): +def install_os_for_bm_oneshot(cluster_id, client): cluster_meta = {'cluster_id': cluster_id} client.install.install(**cluster_meta) -def install_os_for_vm_step2(cluster_id): +def install_os_for_vm_step2(cluster_id, client): cluster_meta = {'cluster_id': cluster_id, 'skip_pxe_ipmi': "true"} client.install.install(**cluster_meta) @@ -176,7 +172,7 @@ def discover_host(hosts_name): time.sleep(10) -def update_network(cluster_id, network_map): +def update_network(cluster_id, network_map, client): network_meta = {'filters': {'cluster_id': cluster_id}} network_info_gernerator = client.networks.list(**network_meta) for net in network_info_gernerator: @@ -187,7 +183,7 @@ def update_network(cluster_id, network_map): client.networks.update(network_id, **network_meta) -def get_hosts(): +def get_hosts(client): hosts_list_generator = client.hosts.list() hosts_info = [] for host in hosts_list_generator: @@ -196,7 +192,7 @@ def get_hosts(): return hosts_info -def get_cluster(): +def get_cluster(client): cluster_list_generator = client.clusters.list() for cluster in cluster_list_generator: cluster_info = client.clusters.get(cluster.id) @@ -205,7 +201,7 @@ def get_cluster(): def add_hosts_interface(cluster_id, hosts_info, mac_address_map, host_interface_map, - vip, isbare): + vip, isbare, client): for host in hosts_info: dha_host_name = None host = host.to_dict() @@ -234,10 +230,10 @@ def add_hosts_interface(cluster_id, hosts_info, mac_address_map, print("do not have os iso file in /var/lib/daisy/kolla/.") client.hosts.update(host['id'], **host) print("update role...") - add_host_role(cluster_id, host['id'], dha_host_name, vip) + add_host_role(cluster_id, host['id'], dha_host_name, vip, client) -def add_host_role(cluster_id, host_id, dha_host_name, vip): +def add_host_role(cluster_id, host_id, dha_host_name, vip, client): role_meta = {'filters': {'cluster_id': cluster_id}} role_list_generator = client.roles.list(**role_meta) role_list = [role for role in role_list_generator] @@ -262,7 +258,7 @@ def add_host_role(cluster_id, host_id, dha_host_name, vip): client.roles.update(computer_role_id, **role_computer_update_meta) -def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type): +def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type, client): role_meta = {'filters': {'cluster_id': cluster_id}} role_list_generator = client.roles.list(**role_meta) lb_role_id = [role.id for role in role_list_generator if @@ -278,7 +274,7 @@ def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type): print e -def enable_opendaylight(cluster_id, layer): +def enable_opendaylight(cluster_id, layer, client): role_meta = {'filters': {'cluster_id': cluster_id}} role_list_generator = client.roles.list(**role_meta) lb_role_id = [role.id for role in role_list_generator if @@ -303,4 +299,6 @@ def enable_opendaylight(cluster_id, layer): if __name__ == "__main__": + daisy_endpoint = get_endpoint(daisyrc_path) + client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint) prepare_install() diff --git a/tests/data/daisy_conf/daisyrc_admin b/tests/data/daisy_conf/daisyrc_admin new file mode 100644 index 00000000..7909a6a4 --- /dev/null +++ b/tests/data/daisy_conf/daisyrc_admin @@ -0,0 +1 @@ +export OS_ENDPOINT=http://10.20.11.2:19292 \ No newline at end of file diff --git a/tests/unit/daisyclient_stub.py b/tests/unit/daisyclient_stub.py new file mode 100644 index 00000000..59540daf --- /dev/null +++ b/tests/unit/daisyclient_stub.py @@ -0,0 +1,164 @@ +############################################################################## +# Copyright (c) 2017 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + + +class StubTestInstall(): + def __init__(self): + pass + + def install(self, **cluster_meta): + self.cluster_meta = cluster_meta + + +class StubTestHost(): + def __init__(self, id, name, cluster_id, interfaces): + self.id = id + self.name = name + self.cluster_id = cluster_id + self.interfaces = interfaces + self.metadata = None + + def to_dict(self): + return {'id': self.id, 'name': self.name, 'cluster_id': self.cluster_id, + 'interfaces': self.interfaces} + + +class StubTestHosts(): + def __init__(self): + self.hosts = [] + + def add(self, host): + self.hosts.append(host) + + def get(self, id): + for host in self.hosts: + if host.id == id: + return host + return None + + def list(self): + return self.hosts + + def update(self, host_id, **metadata): + for host in self.hosts: + if host.id == host_id: + host.metadata = metadata + + +class StubTestCluster(): + def __init__(self, id, name): + self.id = id + self.name = name + + +class StubTestClusters(): + def __init__(self): + self.clusters = [] + + def add(self, cluster): + self.clusters.append(cluster) + + def get(self, id): + for cluster in self.clusters: + if cluster.id == id: + return cluster.name + return None + + def list(self): + return self.clusters + + +class StubTestNet(): + def __init__(self, id, name, cluster_id, **metadata): + self.id = id + self.name = name + self.cluster_id = cluster_id + self.metadata = metadata + + +class StubTestNetworks(): + def __init__(self): + self.networks = [] + + def add(self, net): + self.networks.append(net) + + def list(self, **filter): + networks = [] + if filter: + filter_item = filter.get('filters') + for net in self.networks: + cluster_id_is_match = False + if filter_item.get('cluster_id'): + if filter_item.get('cluster_id') == net.cluster_id: + cluster_id_is_match = True + else: + cluster_id_is_match = True + if cluster_id_is_match is True: + networks.append(net) + return networks + + def update(self, network_id, **network_meta): + for net in self.networks: + if net.id == network_id: + net.metadata = network_meta + + +class StubTestRole(): + def __init__(self, id, name, cluster_id): + self.id = id + self.name = name + self.cluster_id = cluster_id + self.metadata = None + + +class StubTestRoles(): + def __init__(self): + self.roles = [] + + def add(self, role): + self.roles.append(role) + + def list(self, **filter): + roles = [] + if filter: + filter_item = filter.get('filters') + for role in self.roles: + cluster_id_is_match = False + if filter_item.get('cluster_id'): + if filter_item.get('cluster_id') == role.cluster_id: + cluster_id_is_match = True + else: + cluster_id_is_match = True + if cluster_id_is_match is True: + roles.append(role) + return roles + + def update(self, role_id, **meta): + for role in self.roles: + if role.id == role_id: + role.metadata = meta + + +class StubTestDisks(): + def __init__(self): + self.disks = [] + + def service_disk_add(self, **metadata): + self.disks.append(metadata) + + +class StubTestClient(): + def __init__(self): + self.install = StubTestInstall() + self.hosts = StubTestHosts() + self.networks = StubTestNetworks() + self.clusters = StubTestClusters() + self.roles = StubTestRoles() + self.disk_array = StubTestDisks() diff --git a/tests/unit/test_tempest.py b/tests/unit/test_tempest.py new file mode 100644 index 00000000..34ab4073 --- /dev/null +++ b/tests/unit/test_tempest.py @@ -0,0 +1,324 @@ +############################################################################## +# Copyright (c) 2017 ZTE Corp and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import os +import sys +import pytest +from oslo_config import cfg +from tests.unit.daisyclient_stub import ( + StubTestHost, + StubTestCluster, + StubTestNet, + StubTestRole, + StubTestClient +) + +import mock +sys.modules['daisyclient'] = mock.Mock() +sys.modules['daisyclient.v1'] = mock.Mock() +import deploy.tempest # noqa: ignore=E402 +from deploy.tempest import ( + parse, + get_configure_from_daisyconf, + get_endpoint, + build_pxe_for_discover, + install_os_for_vm_step1, + install_os_for_bm_oneshot, + install_os_for_vm_step2, + discover_host, + update_network, + get_hosts, + get_cluster, + add_hosts_interface, + add_host_role, + enable_cinder_backend, + enable_opendaylight +) # noqa: ignore=E402 + + +def get_val_index_in_list(key, list): + return list.index(key) + 1 + + +@pytest.mark.parametrize('argv', [ + (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network', + '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes', '--host', + 'yes', '--install', 'yes', '--isbare', '1', '--scenario', 'os-nosdn-nofeature-noha']), + (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network', + '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes']), + (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network', + '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--install', 'yes']), + (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network', + '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes', '--host', + 'yes', '--install', 'yes', '--isbare', '1', '--scenario', 'os-nosdn-nofeature-noha'])]) +def test_parser(argv): + options_keys = ['dha', 'network', 'cluster', 'host', 'install', 'isbare', 'scenario'] + + conf = cfg.ConfigOpts() + parse(conf, argv) + for option in options_keys: + if conf[option]: + if option == 'isbare': + argv[argv.index('--' + option) + 1] = int(argv[argv.index('--' + option) + 1]) + assert conf[option] == argv[argv.index('--' + option) + 1] + + +@pytest.fixture(scope="module") +def conf_file_dir(data_root): + return os.path.join(data_root, 'daisy_conf') + + +@pytest.mark.parametrize('section, key, exp', [ + ("PXE", "eth_name", 'ens3'), + ("PXE", "build_pxe", 'no')]) +def test_get_configure_from_daisyconf(section, key, exp, conf_file_dir): + res_old_val = deploy.tempest.daisy_conf_path + deploy.tempest.daisy_conf_path = os.path.join(conf_file_dir, 'daisy.conf') + ret = get_configure_from_daisyconf(section, key) + deploy.tempest.daisy_conf_path = res_old_val + assert ret == exp + + +def test_get_endpoint(conf_file_dir): + daisyrc_file_path = os.path.join(conf_file_dir, 'daisyrc_admin') + exp = 'http://10.20.11.2:19292' + ret = get_endpoint(daisyrc_file_path) + assert ret == exp + + +def test_build_pxe_for_discover(): + client = StubTestClient() + cluster_id = 0x123456 + deployment_interface = 'eth3' + build_pxe_for_discover(cluster_id, client, deployment_interface) + + +def test_install_os_for_vm_step1(): + client = StubTestClient() + cluster_id = 0x123456 + install_os_for_vm_step1(cluster_id, client) + + +def test_install_os_for_bm_oneshot(): + client = StubTestClient() + cluster_id = 0x123456 + install_os_for_bm_oneshot(cluster_id, client) + + +def test_install_os_for_vm_step2(): + client = StubTestClient() + cluster_id = 0x123456 + install_os_for_vm_step2(cluster_id, client) + + +@mock.patch('time.sleep') +@mock.patch('deploy.tempest.get_hosts') +def test_discover_host(mock_get_hosts, mock_sleep): + hosts_name = ['computer01', 'computer02', 'controller01', 'controller02', 'controller03'] + mock_get_hosts.return_value = hosts_name + discover_host(hosts_name) + mock_sleep.assert_not_called() + + +def test_update_network(): + client = StubTestClient() + cluster_id = 1 + network_map = {'MANAGEMENT': {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1', + 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]}, + 'STORAGE': {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1', + 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]}} + metadata_net1 = {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1', + 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]} + net1 = StubTestNet(0x1234, 'MANAGEMENT', 1, **metadata_net1) + client.networks.add(net1) + metadata_net2 = {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1', + 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]} + net2 = StubTestNet(0x2345, 'STORAGE', 1, **metadata_net2) + client.networks.add(net2) + exp_nets_data = [metadata_net1, metadata_net2] + update_network(cluster_id, network_map, client) + for i in range(len(exp_nets_data)): + assert client.networks.networks[i].metadata == exp_nets_data[i] + + +def test_get_hosts(): + client = StubTestClient() + host1 = StubTestHost(0x1234, 'test_host_1', 1, + [{'name': 'ens8', 'mac': '11:11:11:11:11:11'}]) + client.hosts.add(host1) + host2 = StubTestHost(0x2345, 'test_host_2', 1, + [{'name': 'ens3', 'mac': '22:22:22:22:22:22'}]) + client.hosts.add(host2) + exp = [host1, host2] + ret = get_hosts(client) + assert ret == exp + + +def test_get_cluster(): + client = StubTestClient() + cluster1 = StubTestCluster(1, 'test_cluster_1') + client.clusters.add(cluster1) + cluster2 = StubTestCluster(2, 'test_cluster_2') + client.clusters.add(cluster2) + exp = 'test_cluster_2' + ret = get_cluster(client) + assert ret == exp + + +@pytest.mark.parametrize('isbare', [ + (False), (True)]) +def test_add_hosts_interface(isbare, tmpdir): + res_old_val = deploy.tempest.iso_path + deploy.tempest.iso_path = os.path.join(tmpdir.dirname, tmpdir.basename) + '/' + iso_file_path = os.path.join(deploy.tempest.iso_path, 'test_os.iso') + with open(iso_file_path, 'a') as f: + f.write('test_data') + client = StubTestClient() + cluster_id = 1 + host_id1 = 0x1234 + host_id2 = 0x2345 + host_id3 = 0x3456 + host1 = StubTestHost(host_id1, 'controller01', cluster_id, [{'name': 'ens8', 'mac': '11:11:11:11:11:11'}]) + client.hosts.add(host1) + host2 = StubTestHost(host_id2, 'controller02', cluster_id, [{'name': 'ens3', 'mac': '22:22:22:22:22:22'}]) + client.hosts.add(host2) + host3 = StubTestHost(host_id3, 'computer01', cluster_id, [{'name': 'ens9', 'mac': '33:33:33:33:33:33'}]) + client.hosts.add(host3) + hosts_info = [host1, host2, host3] + role1 = StubTestRole(0xaaaa, 'CONTROLLER_LB', cluster_id) + client.roles.add(role1) + role2 = StubTestRole(0xbbbb, 'COMPUTER', cluster_id) + client.roles.add(role2) + mac_address_map = { + 'controller01': ['11:11:11:11:11:11'], 'controller02': ['22:22:22:22:22:22'], 'controller03': [], + 'computer01': ['33:33:33:33:33:33'], 'computer02': []} + host_interface_map = { + 'ens8': [{'ip': '', 'name': 'EXTERNAL'}], + 'ens3': [{'ip': '', 'name': 'MANAGEMENT'}, + {'ip': '', 'name': 'PUBLICAPI'}, + {'ip': '', 'name': 'STORAGE'}, + {'ip': '', 'name': 'physnet1'}], + 'ens9': [{'ip': '', 'name': 'HEARTBEAT'}]} + vip = '10.20.11.11' + add_hosts_interface(1, hosts_info, mac_address_map, + host_interface_map, + vip, isbare, client) + deploy.tempest.iso_path = res_old_val + if isbare: + assert client.hosts.get(host_id1).metadata == { + 'id': host_id1, 'name': 'controller01', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser', + 'interfaces': [{'name': 'ens8', 'mac': '11:11:11:11:11:11', + 'assigned_networks': [{'ip': '', 'name': 'EXTERNAL'}]}], + } + assert client.hosts.get(host_id2).metadata == { + 'id': host_id2, 'name': 'controller02', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser', + 'interfaces': [{'name': 'ens3', 'mac': '22:22:22:22:22:22', + 'assigned_networks': [ + {'ip': '', 'name': 'MANAGEMENT'}, + {'ip': '', 'name': 'PUBLICAPI'}, + {'ip': '', 'name': 'STORAGE'}, + {'ip': '', 'name': 'physnet1'}]}], + } + assert client.hosts.get(host_id3).metadata == { + 'id': host_id3, 'name': 'computer01', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser', + 'interfaces': [{'name': 'ens9', 'mac': '33:33:33:33:33:33', + 'assigned_networks': [{'ip': '', 'name': 'HEARTBEAT'}]}], + } + else: + assert client.hosts.get(host_id1).metadata == { + 'id': host_id1, 'name': 'controller01', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'interfaces': [{'name': 'ens8', 'mac': '11:11:11:11:11:11', + 'assigned_networks': [{'ip': '', 'name': 'EXTERNAL'}]}], + } + assert client.hosts.get(host_id2).metadata == { + 'id': host_id2, 'name': 'controller02', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'interfaces': [{'name': 'ens3', 'mac': '22:22:22:22:22:22', + 'assigned_networks': [ + {'ip': '', 'name': 'MANAGEMENT'}, + {'ip': '', 'name': 'PUBLICAPI'}, + {'ip': '', 'name': 'STORAGE'}, + {'ip': '', 'name': 'physnet1'}]}], + } + assert client.hosts.get(host_id3).metadata == { + 'id': host_id3, 'name': 'computer01', 'cluster_id': cluster_id, + 'cluster': cluster_id, 'os_version': iso_file_path, + 'interfaces': [{'name': 'ens9', 'mac': '33:33:33:33:33:33', + 'assigned_networks': [{'ip': '', 'name': 'HEARTBEAT'}]}], + } + tmpdir.remove() + + +@pytest.mark.parametrize('dha_host_name, cluster_id, host_id, vip, exp', [ + ('controller01', 1, 0x1234, '10.20.11.11', {'nodes': [0x1234], 'cluster_id': 1, 'vip': '10.20.11.11'}), + ('computer01', 1, 0x2345, '10.20.11.11', {'nodes': [0x2345], 'cluster_id': 1}), + ('all_in_one', 1, 0x1234, '10.20.11.11', + [{'nodes': [0x1234], 'cluster_id': 1, 'vip': '10.20.11.11'}, + {'nodes': [0x1234], 'cluster_id': 1}])]) +def test_add_host_role(dha_host_name, cluster_id, host_id, vip, exp): + client = StubTestClient() + role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1) + client.roles.add(role1) + role2 = StubTestRole(0x2345, 'COMPUTER', 1) + client.roles.add(role2) + add_host_role(cluster_id, host_id, dha_host_name, vip, client) + if dha_host_name == 'controller01': + assert client.roles.roles[0].metadata == exp + if dha_host_name == 'computer01': + assert client.roles.roles[1].metadata == exp + if dha_host_name == 'all_in_one': + assert client.roles.roles[0].metadata == exp[0] + assert client.roles.roles[1].metadata == exp[1] + + +def test_enable_cinder_backend(): + client = StubTestClient() + role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1) + client.roles.add(role1) + service_name = 'ceph' + disk_name = '/dev/sdb' + protocol_type = 'RAW' + exp_disk_meta = {'service': service_name, + 'disk_location': 'local', + 'partition': disk_name, + 'protocol_type': protocol_type, + 'role_id': 0x1234} + enable_cinder_backend(1, service_name, disk_name, protocol_type, client) + assert client.disk_array.disks[0] == exp_disk_meta + + +@pytest.mark.parametrize('layer, exp', [ + ('odl_l3', { + 'neutron_backends_array': [{'zenic_ip': '', + 'sdn_controller_type': 'opendaylight', + 'zenic_port': '', + 'zenic_user_password': '', + 'neutron_agent_type': '', + 'zenic_user_name': '', + 'enable_l2_or_l3': 'l3'}]}), + ('odl_l2', { + 'neutron_backends_array': [{'zenic_ip': '', + 'sdn_controller_type': 'opendaylight', + 'zenic_port': '', + 'zenic_user_password': '', + 'neutron_agent_type': '', + 'zenic_user_name': '', + 'enable_l2_or_l3': 'l2'}]})]) +def test_enable_opendaylight(layer, exp): + client = StubTestClient() + role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1) + client.roles.add(role1) + enable_opendaylight(1, layer, client) + assert client.roles.roles[0].metadata == exp -- cgit 1.2.3-korg