summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xcode/install_interface_patch.sh2
-rwxr-xr-xcode/makefile_patch.sh2
-rw-r--r--deploy/daisy_server.py14
-rw-r--r--deploy/deploy.py23
-rw-r--r--deploy/environment.py49
-rw-r--r--deploy/tempest.py50
-rw-r--r--docs/release/installation/upgrade.rst2
-rw-r--r--tests/data/daisy_conf/daisyrc_admin1
-rw-r--r--tests/unit/daisyclient_stub.py164
-rw-r--r--tests/unit/test_daisy_server.py24
-rw-r--r--tests/unit/test_deploy.py24
-rw-r--r--tests/unit/test_environment.py16
-rw-r--r--tests/unit/test_tempest.py324
13 files changed, 610 insertions, 85 deletions
diff --git a/code/install_interface_patch.sh b/code/install_interface_patch.sh
index 6e239490..5177425b 100755
--- a/code/install_interface_patch.sh
+++ b/code/install_interface_patch.sh
@@ -10,7 +10,7 @@
##############################################################################
imagebranch="ocata"
imageversion="170811155446"
-imageserver="http://120.24.17.215"
+imageserver="http://artifacts.opnfv.org/daisy/upstream"
imagedir="/var/lib/daisy/versionfile/kolla"
imagename="kolla-image-$imagebranch-$imageversion.tgz"
mkdir -p $imagedir
diff --git a/code/makefile_patch.sh b/code/makefile_patch.sh
index 1893d946..5e734910 100755
--- a/code/makefile_patch.sh
+++ b/code/makefile_patch.sh
@@ -14,7 +14,7 @@ CACHE_PATH=/home/cache
imagebranch="ocata"
imageversion="170811155446"
-imageserver="http://120.24.17.215"
+imageserver="http://artifacts.opnfv.org/daisy/upstream"
imagename="kolla-image-$imagebranch-$imageversion.tgz"
isoname="CentOS-7-x86_64-Minimal-1611.iso"
diff --git a/deploy/daisy_server.py b/deploy/daisy_server.py
index a899ea2b..fccfbe8c 100644
--- a/deploy/daisy_server.py
+++ b/deploy/daisy_server.py
@@ -207,7 +207,11 @@ class DaisyServer(object):
status = self.ssh_run('%s install' % path_join(self.remote_dir, 'opnfv.bin'))
log_bar('Daisy installation completed ! status = %s' % status)
- def prepare_configurations(self):
+ def prepare_configurations(self, deploy_file, net_file):
+ LI('Copy cluster configuration files to Daisy Server')
+ self.scp_put(deploy_file, path_join(self.remote_dir, self.deploy_file_name))
+ self.scp_put(net_file, path_join(self.remote_dir, self.net_file_name))
+
if self.adapter != 'libvirt':
return
LI('Prepare some configuration files')
@@ -218,13 +222,7 @@ class DaisyServer(object):
is_bare=1 if self.adapter == 'ipmi' else 0)
self.ssh_run(cmd)
- def prepare_cluster(self, deploy_file, net_file):
- LI('Copy cluster configuration files to Daisy Server')
- self.scp_put(deploy_file, path_join(self.remote_dir, self.deploy_file_name))
- self.scp_put(net_file, path_join(self.remote_dir, self.net_file_name))
-
- self.prepare_configurations()
-
+ def prepare_cluster(self):
LI('Prepare cluster and PXE')
cmd = "python {script} --dha {deploy_file} --network {net_file} --cluster \'yes\'".format(
script=path_join(self.remote_dir, 'deploy/tempest.py'),
diff --git a/deploy/deploy.py b/deploy/deploy.py
index 245776fb..192b4ee4 100644
--- a/deploy/deploy.py
+++ b/deploy/deploy.py
@@ -181,16 +181,24 @@ class DaisyDeployment(object):
return final_deploy_file, final_deploy_file_name
def run(self):
- self.daisy_env.delete_old_environment()
+ self.daisy_env.delete_old_environment(skip_daisy=self.skip_daisy)
if self.cleanup_only:
return
- self.daisy_env.create_daisy_server()
+
+ if self.skip_daisy:
+ self.daisy_env.connect_daisy_server(self.remote_dir, self.bin_file,
+ self.deploy_file_name, self.net_file_name)
+ else:
+ self.daisy_env.create_daisy_server()
+ self.daisy_env.connect_daisy_server(self.remote_dir, self.bin_file,
+ self.deploy_file_name, self.net_file_name)
+ self.daisy_env.install_daisy()
+
if self.daisy_only:
log_bar('Create Daisy Server successfully !')
return
- self.daisy_env.install_daisy(self.remote_dir, self.bin_file,
- self.deploy_file_name, self.net_file_name)
- self.daisy_env.deploy(self.deploy_file, self.net_file)
+
+ self.daisy_env.deploy(self.deploy_file, self.net_file, skip_preparation=self.skip_daisy)
log_bar('Daisy deploy successfully !')
@@ -214,6 +222,10 @@ def config_arg_parser():
default=path_join(WORKSPACE, 'opnfv.bin'),
help='OPNFV Daisy BIN File')
+ parser.add_argument('-S', dest='skip_daisy', action='store_true',
+ default=False,
+ help='DO NOT install Daisy Server again')
+
parser.add_argument('-do', dest='daisy_only', action='store_true',
default=False,
help='Install Daisy Server only')
@@ -273,6 +285,7 @@ def parse_arguments():
'src_deploy_file': deploy_file,
'net_file': net_file,
'bin_file': args.bin_file,
+ 'skip_daisy': args.skip_daisy,
'daisy_only': args.daisy_only,
'cleanup_only': args.cleanup_only,
'remote_dir': args.remote_dir,
diff --git a/deploy/environment.py b/deploy/environment.py
index 24c1b4f7..2dd61d11 100644
--- a/deploy/environment.py
+++ b/deploy/environment.py
@@ -42,9 +42,11 @@ from utils import (
CREATE_QCOW2_PATH = path_join(WORKSPACE, 'tools')
-VMDEPLOY_DAISY_SERVER_NET = path_join(WORKSPACE, 'templates/virtual_environment/networks/daisy.xml')
-VMDEPLOY_TARGET_NODE_NET = path_join(WORKSPACE, 'templates/virtual_environment/networks/external.xml')
-VMDEPLOY_TARGET_KEEPALIVED_NET = path_join(WORKSPACE, 'templates/virtual_environment/networks/keepalived.xml')
+VIRT_NET_TEMPLATE_PATH = path_join(WORKSPACE, 'templates/virtual_environment/networks')
+VMDEPLOY_DAISY_SERVER_NET = path_join(VIRT_NET_TEMPLATE_PATH, 'daisy.xml')
+VMDEPLOY_TARGET_NODE_NET = path_join(VIRT_NET_TEMPLATE_PATH, 'external.xml')
+VMDEPLOY_TARGET_KEEPALIVED_NET = path_join(VIRT_NET_TEMPLATE_PATH, 'keepalived.xml')
+
VMDEPLOY_DAISY_SERVER_VM = path_join(WORKSPACE, 'templates/virtual_environment/vms/daisy.xml')
BMDEPLOY_DAISY_SERVER_VM = path_join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
@@ -52,7 +54,6 @@ BMDEPLOY_DAISY_SERVER_VM = path_join(WORKSPACE, 'templates/physical_environment/
ALL_IN_ONE_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/all_in_one.xml')
CONTROLLER_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/controller.xml')
COMPUTE_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/computer.xml')
-VIRT_NET_TEMPLATE_PATH = path_join(WORKSPACE, 'templates/virtual_environment/networks')
class DaisyEnvironment(object):
@@ -109,7 +110,7 @@ class DaisyEnvironmentBase(object):
shutil.move(image, self.daisy_server_info['image'])
LI('Daisy Server image is created %s' % self.daisy_server_info['image'])
- def install_daisy(self, remote_dir, bin_file, deploy_file_name, net_file_name):
+ def connect_daisy_server(self, remote_dir, bin_file, deploy_file_name, net_file_name):
self.server = DaisyServer(self.daisy_server_info['name'],
self.daisy_server_info['address'],
self.daisy_server_info['password'],
@@ -120,14 +121,19 @@ class DaisyEnvironmentBase(object):
deploy_file_name,
net_file_name)
self.server.connect()
+
+ def install_daisy(self):
self.server.install_daisy()
class BareMetalEnvironment(DaisyEnvironmentBase):
- def delete_old_environment(self):
- LW('Begin to delete old environment !')
- self.delete_daisy_server()
- LW('Old environment cleanup finished !')
+ def delete_old_environment(self, skip_daisy=False):
+ if skip_daisy:
+ LI('Skip deletion of old daisy server VM')
+ else:
+ LW('Begin to delete old environment !')
+ self.delete_daisy_server()
+ LW('Old environment cleanup finished !')
def create_daisy_server(self):
self.create_daisy_server_image()
@@ -157,8 +163,10 @@ class BareMetalEnvironment(DaisyEnvironmentBase):
node['ipmi_pass'],
boot_source=boot_dev)
- def deploy(self, deploy_file, net_file):
- self.server.prepare_cluster(deploy_file, net_file)
+ def deploy(self, deploy_file, net_file, skip_preparation=False):
+ if not skip_preparation:
+ self.server.prepare_configurations(deploy_file, net_file)
+ self.server.prepare_cluster()
self.reboot_nodes(boot_dev='pxe')
self.server.prepare_host_and_pxe()
@@ -274,7 +282,7 @@ class VirtualEnvironment(DaisyEnvironmentBase):
for host in self.deploy_struct['hosts']:
delete_vm_and_disk(host['name'])
- def delete_networks(self):
+ def delete_networks(self, skip_daisy=False):
if 'virtNetTemplatePath' in self.deploy_struct:
path = self.deploy_struct['virtNetTemplatePath']
else:
@@ -284,19 +292,26 @@ class VirtualEnvironment(DaisyEnvironmentBase):
LW('Cannot find the virtual network template path %s' % path)
return
for f in os.listdir(path):
+ if not (skip_daisy and f == 'daisy.xml'):
f = path_join(path, f)
if os.path.isfile(f):
delete_virtual_network(f)
- def delete_old_environment(self):
+ def delete_old_environment(self, skip_daisy=False):
LW('Begin to delete old environment !')
self.delete_nodes()
- self.delete_daisy_server()
- self.delete_networks()
+
+ if skip_daisy:
+ LI('Skip deletion of old daisy server VM and network')
+ else:
+ self.delete_daisy_server()
+ self.delete_networks(skip_daisy=skip_daisy)
LW('Old environment cleanup finished !')
- def deploy(self, deploy_file, net_file):
- self.server.prepare_cluster(deploy_file, net_file)
+ def deploy(self, deploy_file, net_file, skip_preparation=False):
+ if not skip_preparation:
+ self.server.prepare_configurations(deploy_file, net_file)
+ self.server.prepare_cluster()
self.create_nodes()
self.server.copy_new_deploy_config(self.deploy_struct)
self.server.prepare_host_and_pxe()
diff --git a/deploy/tempest.py b/deploy/tempest.py
index 89411f3f..dc0847fa 100644
--- a/deploy/tempest.py
+++ b/deploy/tempest.py
@@ -72,10 +72,6 @@ def get_endpoint(file_path):
return daisy_endpoint
-daisy_endpoint = get_endpoint(daisyrc_path)
-client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint)
-
-
def prepare_install():
global deployment_interface
try:
@@ -95,7 +91,7 @@ def prepare_install():
update_network(cluster_id, network_map)
print("build pxe server to install os...")
deployment_interface = get_configure_from_daisyconf("PXE", "eth_name")
- build_pxe_for_discover(cluster_id)
+ build_pxe_for_discover(cluster_id, client, deployment_interface)
elif conf['host'] and conf['host'] == 'yes':
isbare = False if 'isbare' in conf and conf['isbare'] == 0 else True
print("discover host...")
@@ -103,10 +99,10 @@ def prepare_install():
time.sleep(10)
print("update hosts interface...")
hosts_info = get_hosts()
- cluster_info = get_cluster()
+ cluster_info = get_cluster(client)
cluster_id = cluster_info.id
add_hosts_interface(cluster_id, hosts_info, mac_address_map,
- host_interface_map, vip, isbare)
+ host_interface_map, vip, isbare, client)
if len(hosts_name) == 1:
protocol_type = 'LVM'
service_name = 'cinder'
@@ -117,24 +113,24 @@ def prepare_install():
print('hosts_num is %s' % len(hosts_name))
protocol_type = None
enable_cinder_backend(cluster_id, service_name,
- ceph_disk_name, protocol_type)
+ ceph_disk_name, protocol_type, client)
if 'scenario' in conf:
if 'odl_l3' in conf['scenario'] or \
'odl' in conf['scenario']:
- enable_opendaylight(cluster_id, 'odl_l3')
+ enable_opendaylight(cluster_id, 'odl_l3', client)
elif 'odl_l2' in conf['scenario']:
- enable_opendaylight(cluster_id, 'odl_l2')
+ enable_opendaylight(cluster_id, 'odl_l2', client)
if not isbare:
- install_os_for_vm_step1(cluster_id)
+ install_os_for_vm_step1(cluster_id, client)
else:
print("daisy baremetal deploy start")
- install_os_for_bm_oneshot(cluster_id)
+ install_os_for_bm_oneshot(cluster_id, client)
elif conf['install'] and conf['install'] == 'yes':
- cluster_info = get_cluster()
+ cluster_info = get_cluster(client)
cluster_id = cluster_info.id
- install_os_for_vm_step2(cluster_id)
+ install_os_for_vm_step2(cluster_id, client)
except Exception:
print("Deploy failed!!!.%s." % traceback.format_exc())
@@ -143,24 +139,24 @@ def prepare_install():
print_bar("Everything is done!")
-def build_pxe_for_discover(cluster_id):
+def build_pxe_for_discover(cluster_id, client, deployment_interface):
cluster_meta = {'cluster_id': cluster_id,
'deployment_interface': deployment_interface}
client.install.install(**cluster_meta)
-def install_os_for_vm_step1(cluster_id):
+def install_os_for_vm_step1(cluster_id, client):
cluster_meta = {'cluster_id': cluster_id,
'pxe_only': "true"}
client.install.install(**cluster_meta)
-def install_os_for_bm_oneshot(cluster_id):
+def install_os_for_bm_oneshot(cluster_id, client):
cluster_meta = {'cluster_id': cluster_id}
client.install.install(**cluster_meta)
-def install_os_for_vm_step2(cluster_id):
+def install_os_for_vm_step2(cluster_id, client):
cluster_meta = {'cluster_id': cluster_id,
'skip_pxe_ipmi': "true"}
client.install.install(**cluster_meta)
@@ -176,7 +172,7 @@ def discover_host(hosts_name):
time.sleep(10)
-def update_network(cluster_id, network_map):
+def update_network(cluster_id, network_map, client):
network_meta = {'filters': {'cluster_id': cluster_id}}
network_info_gernerator = client.networks.list(**network_meta)
for net in network_info_gernerator:
@@ -187,7 +183,7 @@ def update_network(cluster_id, network_map):
client.networks.update(network_id, **network_meta)
-def get_hosts():
+def get_hosts(client):
hosts_list_generator = client.hosts.list()
hosts_info = []
for host in hosts_list_generator:
@@ -196,7 +192,7 @@ def get_hosts():
return hosts_info
-def get_cluster():
+def get_cluster(client):
cluster_list_generator = client.clusters.list()
for cluster in cluster_list_generator:
cluster_info = client.clusters.get(cluster.id)
@@ -205,7 +201,7 @@ def get_cluster():
def add_hosts_interface(cluster_id, hosts_info, mac_address_map,
host_interface_map,
- vip, isbare):
+ vip, isbare, client):
for host in hosts_info:
dha_host_name = None
host = host.to_dict()
@@ -234,10 +230,10 @@ def add_hosts_interface(cluster_id, hosts_info, mac_address_map,
print("do not have os iso file in /var/lib/daisy/kolla/.")
client.hosts.update(host['id'], **host)
print("update role...")
- add_host_role(cluster_id, host['id'], dha_host_name, vip)
+ add_host_role(cluster_id, host['id'], dha_host_name, vip, client)
-def add_host_role(cluster_id, host_id, dha_host_name, vip):
+def add_host_role(cluster_id, host_id, dha_host_name, vip, client):
role_meta = {'filters': {'cluster_id': cluster_id}}
role_list_generator = client.roles.list(**role_meta)
role_list = [role for role in role_list_generator]
@@ -262,7 +258,7 @@ def add_host_role(cluster_id, host_id, dha_host_name, vip):
client.roles.update(computer_role_id, **role_computer_update_meta)
-def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type):
+def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type, client):
role_meta = {'filters': {'cluster_id': cluster_id}}
role_list_generator = client.roles.list(**role_meta)
lb_role_id = [role.id for role in role_list_generator if
@@ -278,7 +274,7 @@ def enable_cinder_backend(cluster_id, service_name, disk_name, protocol_type):
print e
-def enable_opendaylight(cluster_id, layer):
+def enable_opendaylight(cluster_id, layer, client):
role_meta = {'filters': {'cluster_id': cluster_id}}
role_list_generator = client.roles.list(**role_meta)
lb_role_id = [role.id for role in role_list_generator if
@@ -303,4 +299,6 @@ def enable_opendaylight(cluster_id, layer):
if __name__ == "__main__":
+ daisy_endpoint = get_endpoint(daisyrc_path)
+ client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint)
prepare_install()
diff --git a/docs/release/installation/upgrade.rst b/docs/release/installation/upgrade.rst
index ce95d50b..164c72c9 100644
--- a/docs/release/installation/upgrade.rst
+++ b/docs/release/installation/upgrade.rst
@@ -11,7 +11,7 @@ update OpenStack minor version as the follows:
Since Daisy's Kolla images are build by meeting the OPNFV requirements
and have their own file packaging layout, Daisy requires user to
always use Kolla image file built by Daisy team. Currently, it can be
-got from http://120.24.17.215/, or please
+got from http://artifacts.opnfv.org/daisy/upstream, or please
see :ref:`this chapter <daisy-build-kolla-image>`
for how to build your own image.
diff --git a/tests/data/daisy_conf/daisyrc_admin b/tests/data/daisy_conf/daisyrc_admin
new file mode 100644
index 00000000..7909a6a4
--- /dev/null
+++ b/tests/data/daisy_conf/daisyrc_admin
@@ -0,0 +1 @@
+export OS_ENDPOINT=http://10.20.11.2:19292 \ No newline at end of file
diff --git a/tests/unit/daisyclient_stub.py b/tests/unit/daisyclient_stub.py
new file mode 100644
index 00000000..59540daf
--- /dev/null
+++ b/tests/unit/daisyclient_stub.py
@@ -0,0 +1,164 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class StubTestInstall():
+ def __init__(self):
+ pass
+
+ def install(self, **cluster_meta):
+ self.cluster_meta = cluster_meta
+
+
+class StubTestHost():
+ def __init__(self, id, name, cluster_id, interfaces):
+ self.id = id
+ self.name = name
+ self.cluster_id = cluster_id
+ self.interfaces = interfaces
+ self.metadata = None
+
+ def to_dict(self):
+ return {'id': self.id, 'name': self.name, 'cluster_id': self.cluster_id,
+ 'interfaces': self.interfaces}
+
+
+class StubTestHosts():
+ def __init__(self):
+ self.hosts = []
+
+ def add(self, host):
+ self.hosts.append(host)
+
+ def get(self, id):
+ for host in self.hosts:
+ if host.id == id:
+ return host
+ return None
+
+ def list(self):
+ return self.hosts
+
+ def update(self, host_id, **metadata):
+ for host in self.hosts:
+ if host.id == host_id:
+ host.metadata = metadata
+
+
+class StubTestCluster():
+ def __init__(self, id, name):
+ self.id = id
+ self.name = name
+
+
+class StubTestClusters():
+ def __init__(self):
+ self.clusters = []
+
+ def add(self, cluster):
+ self.clusters.append(cluster)
+
+ def get(self, id):
+ for cluster in self.clusters:
+ if cluster.id == id:
+ return cluster.name
+ return None
+
+ def list(self):
+ return self.clusters
+
+
+class StubTestNet():
+ def __init__(self, id, name, cluster_id, **metadata):
+ self.id = id
+ self.name = name
+ self.cluster_id = cluster_id
+ self.metadata = metadata
+
+
+class StubTestNetworks():
+ def __init__(self):
+ self.networks = []
+
+ def add(self, net):
+ self.networks.append(net)
+
+ def list(self, **filter):
+ networks = []
+ if filter:
+ filter_item = filter.get('filters')
+ for net in self.networks:
+ cluster_id_is_match = False
+ if filter_item.get('cluster_id'):
+ if filter_item.get('cluster_id') == net.cluster_id:
+ cluster_id_is_match = True
+ else:
+ cluster_id_is_match = True
+ if cluster_id_is_match is True:
+ networks.append(net)
+ return networks
+
+ def update(self, network_id, **network_meta):
+ for net in self.networks:
+ if net.id == network_id:
+ net.metadata = network_meta
+
+
+class StubTestRole():
+ def __init__(self, id, name, cluster_id):
+ self.id = id
+ self.name = name
+ self.cluster_id = cluster_id
+ self.metadata = None
+
+
+class StubTestRoles():
+ def __init__(self):
+ self.roles = []
+
+ def add(self, role):
+ self.roles.append(role)
+
+ def list(self, **filter):
+ roles = []
+ if filter:
+ filter_item = filter.get('filters')
+ for role in self.roles:
+ cluster_id_is_match = False
+ if filter_item.get('cluster_id'):
+ if filter_item.get('cluster_id') == role.cluster_id:
+ cluster_id_is_match = True
+ else:
+ cluster_id_is_match = True
+ if cluster_id_is_match is True:
+ roles.append(role)
+ return roles
+
+ def update(self, role_id, **meta):
+ for role in self.roles:
+ if role.id == role_id:
+ role.metadata = meta
+
+
+class StubTestDisks():
+ def __init__(self):
+ self.disks = []
+
+ def service_disk_add(self, **metadata):
+ self.disks.append(metadata)
+
+
+class StubTestClient():
+ def __init__(self):
+ self.install = StubTestInstall()
+ self.hosts = StubTestHosts()
+ self.networks = StubTestNetworks()
+ self.clusters = StubTestClusters()
+ self.roles = StubTestRoles()
+ self.disk_array = StubTestDisks()
diff --git a/tests/unit/test_daisy_server.py b/tests/unit/test_daisy_server.py
index 65282e82..ea9c495c 100644
--- a/tests/unit/test_daisy_server.py
+++ b/tests/unit/test_daisy_server.py
@@ -49,8 +49,8 @@ data3 = get_ssh_test_command_from_file(ssh_test_file_dir(), 'ssh_stream_data3.tx
(data3, res1, expected_ret1),
(data3, res2, expected_ret1)])
def test_log_from_stream(data, res, expected):
- def log_func(str):
- print str
+ def log_func(msg):
+ print(msg)
pre_val = daisy_server.BLOCK_SIZE
daisy_server.BLOCK_SIZE = 16
ret = log_from_stream(res, data, log_func)
@@ -448,8 +448,9 @@ def test_install_daisy_DaisyServer(mock_prepare_files, mock_ssh_run, tmpdir):
@pytest.mark.parametrize('adapter', [
('libvirt'), ('ipmi')])
+@mock.patch.object(daisy_server.DaisyServer, 'scp_put')
@mock.patch.object(daisy_server.DaisyServer, 'ssh_run')
-def test_prepare_configurations_DaisyServer(mock_ssh_run, adapter, tmpdir):
+def test_prepare_configurations_DaisyServer(mock_ssh_run, mock_scp_put, adapter, tmpdir):
bin_file = os.path.join(tmpdir.dirname, tmpdir.basename, bin_file_name)
DaisyServerInst = DaisyServer(daisy_server_info['name'],
daisy_server_info['address'],
@@ -461,25 +462,24 @@ def test_prepare_configurations_DaisyServer(mock_ssh_run, adapter, tmpdir):
deploy_file_name,
net_file_name)
mock_ssh_run.return_value = 0
+ deploy_file = os.path.join(tmpdir.dirname, tmpdir.basename, deploy_file_name)
+ net_file = os.path.join(tmpdir.dirname, tmpdir.basename, net_file_name)
cmd = 'export PYTHONPATH={python_path}; python {script} -nw {net_file} -b {is_bare}'.format(
python_path=remote_dir,
script=os.path.join(remote_dir, 'deploy/prepare/execute.py'),
net_file=os.path.join(remote_dir, net_file_name),
is_bare=1 if adapter == 'ipmi' else 0)
- DaisyServerInst.prepare_configurations()
+ DaisyServerInst.prepare_configurations(deploy_file, net_file)
if adapter == 'libvirt':
DaisyServerInst.ssh_run.assert_called_once_with(cmd)
else:
DaisyServerInst.ssh_run.assert_not_called()
+ assert DaisyServerInst.scp_put.call_count == 2
tmpdir.remove()
-@mock.patch.object(daisy_server.DaisyServer, 'scp_put')
-@mock.patch.object(daisy_server.DaisyServer, 'prepare_configurations')
@mock.patch.object(daisy_server.DaisyServer, 'ssh_run')
-def test_prepare_cluster_DaisyServer(mock_scp_put,
- mock_prepare_configurations,
- mock_ssh_run,
+def test_prepare_cluster_DaisyServer(mock_ssh_run,
tmpdir):
bin_file = os.path.join(tmpdir.dirname, tmpdir.basename, bin_file_name)
DaisyServerInst = DaisyServer(daisy_server_info['name'],
@@ -496,12 +496,8 @@ def test_prepare_cluster_DaisyServer(mock_scp_put,
script=os.path.join(remote_dir, 'deploy/tempest.py'),
deploy_file=os.path.join(remote_dir, deploy_file_name),
net_file=os.path.join(remote_dir, net_file_name))
- deploy_file = os.path.join(tmpdir.dirname, tmpdir.basename, deploy_file_name)
- net_file = os.path.join(tmpdir.dirname, tmpdir.basename, net_file_name)
- DaisyServerInst.prepare_cluster(deploy_file, net_file)
+ DaisyServerInst.prepare_cluster()
DaisyServerInst.ssh_run.assert_called_once_with(cmd, check=True)
- DaisyServerInst.prepare_configurations.assert_called_once_with()
- assert DaisyServerInst.scp_put.call_count == 2
tmpdir.remove()
diff --git a/tests/unit/test_deploy.py b/tests/unit/test_deploy.py
index db887a01..4b68316a 100644
--- a/tests/unit/test_deploy.py
+++ b/tests/unit/test_deploy.py
@@ -195,6 +195,7 @@ def test__construct_final_deploy_conf_in_DaisyDeployment(mock__use_pod_descripto
'src_deploy_file': 'deploy_baremetal.yml',
'net_file': 'network_baremetal.yml',
'bin_file': 'opnfv.bin',
+ 'skip_daisy': False,
'daisy_only': False,
'cleanup_only': False,
'remote_dir': '/home/daisy',
@@ -212,6 +213,7 @@ def test__construct_final_deploy_conf_in_DaisyDeployment(mock__use_pod_descripto
'src_deploy_file': 'deploy_baremetal.yml',
'net_file': 'network_baremetal.yml',
'bin_file': 'opnfv.bin',
+ 'skip_daisy': False,
'daisy_only': False,
'cleanup_only': True,
'remote_dir': '/home/daisy',
@@ -229,6 +231,7 @@ def test__construct_final_deploy_conf_in_DaisyDeployment(mock__use_pod_descripto
'src_deploy_file': 'deploy_baremetal.yml',
'net_file': 'network_baremetal.yml',
'bin_file': 'opnfv.bin',
+ 'skip_daisy': False,
'daisy_only': True,
'cleanup_only': False,
'remote_dir': '/home/daisy',
@@ -242,8 +245,9 @@ def test__construct_final_deploy_conf_in_DaisyDeployment(mock__use_pod_descripto
@mock.patch.object(environment.BareMetalEnvironment, 'delete_old_environment')
@mock.patch.object(environment.BareMetalEnvironment, 'create_daisy_server')
@mock.patch.object(environment.BareMetalEnvironment, 'install_daisy')
+@mock.patch.object(environment.BareMetalEnvironment, 'connect_daisy_server')
@mock.patch.object(environment.BareMetalEnvironment, 'deploy')
-def test_run_in_DaisyDeployment(mock_deploy, mock_install_daisy,
+def test_run_in_DaisyDeployment(mock_deploy, mock_connect_daisy_server, mock_install_daisy,
mock_create_daisy_server, mock_delete_old_environment,
conf_file_dir, tmpdir, kwargs):
kwargs['src_deploy_file'] = os.path.join(conf_file_dir, kwargs['src_deploy_file'])
@@ -261,12 +265,16 @@ def test_run_in_DaisyDeployment(mock_deploy, mock_install_daisy,
if daisy_deploy.cleanup_only is False:
mock_create_daisy_server.assert_called_once_with()
if daisy_deploy.daisy_only is False:
- mock_deploy.assert_called_once_with(daisy_deploy.deploy_file, daisy_deploy.net_file)
- mock_install_daisy.assert_called_once_with(daisy_deploy.remote_dir, daisy_deploy.bin_file,
- daisy_deploy.deploy_file_name, daisy_deploy.net_file_name)
+ mock_deploy.assert_called_once_with(daisy_deploy.deploy_file,
+ daisy_deploy.net_file,
+ skip_preparation=False)
+ mock_connect_daisy_server.assert_called_once_with(daisy_deploy.remote_dir,
+ daisy_deploy.bin_file,
+ daisy_deploy.deploy_file_name,
+ daisy_deploy.net_file_name)
+ mock_install_daisy.assert_called_once_with()
else:
mock_deploy.assert_not_called()
- mock_install_daisy.assert_not_called()
else:
mock_create_daisy_server.assert_not_called()
tmpdir.remove()
@@ -286,13 +294,14 @@ def test_parse_arguments(mock_confirm_dir_exists, mock_make_file_executable,
mock_save_log_to_file, mock_check_sudo_privilege,
mock_parse_args, cleanup_only, tmpdir):
class MockArg():
- def __init__(self, labs_dir, lab_name, pod_name, bin_file, daisy_only,
+ def __init__(self, labs_dir, lab_name, pod_name, bin_file, skip_daisy, daisy_only,
cleanup_only, remote_dir, work_dir, storage_dir, pxe_bridge,
deploy_log, scenario):
self.labs_dir = labs_dir
self.lab_name = lab_name
self.pod_name = pod_name
self.bin_file = bin_file
+ self.skip_daisy = skip_daisy
self.daisy_only = daisy_only
self.cleanup_only = cleanup_only
self.remote_dir = remote_dir
@@ -315,6 +324,7 @@ def test_parse_arguments(mock_confirm_dir_exists, mock_make_file_executable,
'src_deploy_file': deploy_file,
'net_file': net_file,
'bin_file': bin_file_path,
+ 'skip_daisy': False,
'daisy_only': False,
'cleanup_only': cleanup_only,
'remote_dir': '/home/daisy',
@@ -324,7 +334,7 @@ def test_parse_arguments(mock_confirm_dir_exists, mock_make_file_executable,
'deploy_log': deploy_log_path,
'scenario': 'os-nosdn-nofeature-noha'
}
- mockarg = MockArg('/var/tmp/securedlab', 'zte', 'pod2', bin_file_path, False, cleanup_only, '/home/daisy', '/tmp/workdir',
+ mockarg = MockArg('/var/tmp/securedlab', 'zte', 'pod2', bin_file_path, False, False, cleanup_only, '/home/daisy', '/tmp/workdir',
'/home/qemu/vms', 'pxebr', deploy_log_path, 'os-nosdn-nofeature-noha')
mock_parse_args.return_value = mockarg
ret = parse_arguments()
diff --git a/tests/unit/test_environment.py b/tests/unit/test_environment.py
index aed2c73c..f7cf5985 100644
--- a/tests/unit/test_environment.py
+++ b/tests/unit/test_environment.py
@@ -136,7 +136,8 @@ def test_install_daisy_DaisyEnvironmentBase(mock_install_daisy, mock_connect, tm
DaisyEnvBaseInst = DaisyEnvironmentBase(
deploy_struct, net_struct, adapter, pxe_bridge,
daisy_server, work_dir, storage_dir, scenario)
- DaisyEnvBaseInst.install_daisy(remote_dir, bin_file, deploy_file_name, net_file_name)
+ DaisyEnvBaseInst.connect_daisy_server(remote_dir, bin_file, deploy_file_name, net_file_name)
+ DaisyEnvBaseInst.install_daisy()
mock_install_daisy.assert_called_once_with()
mock_connect.assert_called_once_with()
tmpdir.remove()
@@ -246,6 +247,7 @@ def test_create_daisy_server_BareMetalEnvironment(mock_create_daisy_server_vm, m
@mock.patch('deploy.environment.time.sleep')
@mock.patch.object(daisy_server.DaisyServer, 'prepare_cluster')
+@mock.patch.object(daisy_server.DaisyServer, 'prepare_configurations')
@mock.patch.object(environment.BareMetalEnvironment, 'reboot_nodes')
@mock.patch.object(daisy_server.DaisyServer, 'prepare_host_and_pxe')
@mock.patch.object(daisy_server.DaisyServer, 'check_os_installation')
@@ -253,7 +255,8 @@ def test_create_daisy_server_BareMetalEnvironment(mock_create_daisy_server_vm, m
@mock.patch.object(daisy_server.DaisyServer, 'post_deploy')
def test_deploy_BareMetalEnvironment(mock_post_deploy, mock_check_openstack_installation,
mock_check_os_installation, mock_prepare_host_and_pxe,
- mock_reboot_nodes, mock_prepare_cluster,
+ mock_reboot_nodes, mock_prepare_configurations,
+ mock_prepare_cluster,
mock_sleep,
tmpdir):
work_dir = os.path.join(tmpdir.dirname, tmpdir.basename, work_dir_name)
@@ -280,7 +283,8 @@ def test_deploy_BareMetalEnvironment(mock_post_deploy, mock_check_openstack_inst
deploy_file_name,
net_file_name)
BareMetalEnvironmentInst.deploy(deploy_file, net_file)
- mock_prepare_cluster.assert_called_once_with(deploy_file, net_file)
+ mock_prepare_configurations.assert_called_once_with(deploy_file, net_file)
+ mock_prepare_cluster.assert_called_once_with()
mock_reboot_nodes.assert_called_once_with(boot_dev='pxe')
mock_prepare_host_and_pxe.assert_called_once_with()
mock_check_os_installation.assert_called_once_with(len(BareMetalEnvironmentInst.deploy_struct['hosts']))
@@ -537,7 +541,7 @@ def test_delete_old_environment_VirtualEnvironment(mock_delete_daisy_server,
daisy_server, work_dir, storage_dir, scenario)
VirtualEnvironmentInst.delete_old_environment()
VirtualEnvironmentInst.delete_daisy_server.assert_called_once_with()
- VirtualEnvironmentInst.delete_networks.assert_called_once_with()
+ VirtualEnvironmentInst.delete_networks.assert_called_once_with(skip_daisy=False)
VirtualEnvironmentInst.delete_nodes.assert_called_once_with()
tmpdir.remove()
@@ -550,11 +554,12 @@ def test_delete_old_environment_VirtualEnvironment(mock_delete_daisy_server,
@mock.patch.object(environment.DaisyServer, 'prepare_host_and_pxe')
@mock.patch.object(environment.DaisyServer, 'copy_new_deploy_config')
@mock.patch.object(environment.DaisyServer, 'prepare_cluster')
+@mock.patch.object(environment.DaisyServer, 'prepare_configurations')
@mock.patch.object(environment.VirtualEnvironment, '_post_deploy')
@mock.patch.object(environment.VirtualEnvironment, 'reboot_nodes')
@mock.patch.object(environment.VirtualEnvironment, 'create_nodes')
def test_deploy_VirtualEnvironment(mock_create_nodes, mock_reboot_nodes,
- mock__post_deploy, mock_prepare_cluster,
+ mock__post_deploy, mock_prepare_configurations, mock_prepare_cluster,
mock_copy_new_deploy_config, mock_prepare_host_and_pxe,
mock_install_virtual_nodes, mock_check_os_installation,
mock_check_openstack_installation, mock_post_deploy,
@@ -587,6 +592,7 @@ def test_deploy_VirtualEnvironment(mock_create_nodes, mock_reboot_nodes,
mock_create_nodes.assert_called_once()
assert mock_reboot_nodes.call_count == 2
mock__post_deploy.assert_called_once()
+ mock_prepare_configurations.assert_called_once()
mock_prepare_cluster.assert_called_once()
mock_copy_new_deploy_config.assert_called_once()
mock_prepare_host_and_pxe.assert_called_once()
diff --git a/tests/unit/test_tempest.py b/tests/unit/test_tempest.py
new file mode 100644
index 00000000..34ab4073
--- /dev/null
+++ b/tests/unit/test_tempest.py
@@ -0,0 +1,324 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import sys
+import pytest
+from oslo_config import cfg
+from tests.unit.daisyclient_stub import (
+ StubTestHost,
+ StubTestCluster,
+ StubTestNet,
+ StubTestRole,
+ StubTestClient
+)
+
+import mock
+sys.modules['daisyclient'] = mock.Mock()
+sys.modules['daisyclient.v1'] = mock.Mock()
+import deploy.tempest # noqa: ignore=E402
+from deploy.tempest import (
+ parse,
+ get_configure_from_daisyconf,
+ get_endpoint,
+ build_pxe_for_discover,
+ install_os_for_vm_step1,
+ install_os_for_bm_oneshot,
+ install_os_for_vm_step2,
+ discover_host,
+ update_network,
+ get_hosts,
+ get_cluster,
+ add_hosts_interface,
+ add_host_role,
+ enable_cinder_backend,
+ enable_opendaylight
+) # noqa: ignore=E402
+
+
+def get_val_index_in_list(key, list):
+ return list.index(key) + 1
+
+
+@pytest.mark.parametrize('argv', [
+ (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network',
+ '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes', '--host',
+ 'yes', '--install', 'yes', '--isbare', '1', '--scenario', 'os-nosdn-nofeature-noha']),
+ (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network',
+ '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes']),
+ (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network',
+ '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--install', 'yes']),
+ (['--dha', '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--network',
+ '/home/zte/dairy/tests/data/deploy_virtual1.ymal', '--cluster', 'yes', '--host',
+ 'yes', '--install', 'yes', '--isbare', '1', '--scenario', 'os-nosdn-nofeature-noha'])])
+def test_parser(argv):
+ options_keys = ['dha', 'network', 'cluster', 'host', 'install', 'isbare', 'scenario']
+
+ conf = cfg.ConfigOpts()
+ parse(conf, argv)
+ for option in options_keys:
+ if conf[option]:
+ if option == 'isbare':
+ argv[argv.index('--' + option) + 1] = int(argv[argv.index('--' + option) + 1])
+ assert conf[option] == argv[argv.index('--' + option) + 1]
+
+
+@pytest.fixture(scope="module")
+def conf_file_dir(data_root):
+ return os.path.join(data_root, 'daisy_conf')
+
+
+@pytest.mark.parametrize('section, key, exp', [
+ ("PXE", "eth_name", 'ens3'),
+ ("PXE", "build_pxe", 'no')])
+def test_get_configure_from_daisyconf(section, key, exp, conf_file_dir):
+ res_old_val = deploy.tempest.daisy_conf_path
+ deploy.tempest.daisy_conf_path = os.path.join(conf_file_dir, 'daisy.conf')
+ ret = get_configure_from_daisyconf(section, key)
+ deploy.tempest.daisy_conf_path = res_old_val
+ assert ret == exp
+
+
+def test_get_endpoint(conf_file_dir):
+ daisyrc_file_path = os.path.join(conf_file_dir, 'daisyrc_admin')
+ exp = 'http://10.20.11.2:19292'
+ ret = get_endpoint(daisyrc_file_path)
+ assert ret == exp
+
+
+def test_build_pxe_for_discover():
+ client = StubTestClient()
+ cluster_id = 0x123456
+ deployment_interface = 'eth3'
+ build_pxe_for_discover(cluster_id, client, deployment_interface)
+
+
+def test_install_os_for_vm_step1():
+ client = StubTestClient()
+ cluster_id = 0x123456
+ install_os_for_vm_step1(cluster_id, client)
+
+
+def test_install_os_for_bm_oneshot():
+ client = StubTestClient()
+ cluster_id = 0x123456
+ install_os_for_bm_oneshot(cluster_id, client)
+
+
+def test_install_os_for_vm_step2():
+ client = StubTestClient()
+ cluster_id = 0x123456
+ install_os_for_vm_step2(cluster_id, client)
+
+
+@mock.patch('time.sleep')
+@mock.patch('deploy.tempest.get_hosts')
+def test_discover_host(mock_get_hosts, mock_sleep):
+ hosts_name = ['computer01', 'computer02', 'controller01', 'controller02', 'controller03']
+ mock_get_hosts.return_value = hosts_name
+ discover_host(hosts_name)
+ mock_sleep.assert_not_called()
+
+
+def test_update_network():
+ client = StubTestClient()
+ cluster_id = 1
+ network_map = {'MANAGEMENT': {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1',
+ 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]},
+ 'STORAGE': {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1',
+ 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]}}
+ metadata_net1 = {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1',
+ 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]}
+ net1 = StubTestNet(0x1234, 'MANAGEMENT', 1, **metadata_net1)
+ client.networks.add(net1)
+ metadata_net2 = {'cidr': '10.20.11.0/24', 'gateway': '10.20.11.1',
+ 'ip_ranges': [{'start': '10.20.11.3', 'end': '10.20.11.10'}]}
+ net2 = StubTestNet(0x2345, 'STORAGE', 1, **metadata_net2)
+ client.networks.add(net2)
+ exp_nets_data = [metadata_net1, metadata_net2]
+ update_network(cluster_id, network_map, client)
+ for i in range(len(exp_nets_data)):
+ assert client.networks.networks[i].metadata == exp_nets_data[i]
+
+
+def test_get_hosts():
+ client = StubTestClient()
+ host1 = StubTestHost(0x1234, 'test_host_1', 1,
+ [{'name': 'ens8', 'mac': '11:11:11:11:11:11'}])
+ client.hosts.add(host1)
+ host2 = StubTestHost(0x2345, 'test_host_2', 1,
+ [{'name': 'ens3', 'mac': '22:22:22:22:22:22'}])
+ client.hosts.add(host2)
+ exp = [host1, host2]
+ ret = get_hosts(client)
+ assert ret == exp
+
+
+def test_get_cluster():
+ client = StubTestClient()
+ cluster1 = StubTestCluster(1, 'test_cluster_1')
+ client.clusters.add(cluster1)
+ cluster2 = StubTestCluster(2, 'test_cluster_2')
+ client.clusters.add(cluster2)
+ exp = 'test_cluster_2'
+ ret = get_cluster(client)
+ assert ret == exp
+
+
+@pytest.mark.parametrize('isbare', [
+ (False), (True)])
+def test_add_hosts_interface(isbare, tmpdir):
+ res_old_val = deploy.tempest.iso_path
+ deploy.tempest.iso_path = os.path.join(tmpdir.dirname, tmpdir.basename) + '/'
+ iso_file_path = os.path.join(deploy.tempest.iso_path, 'test_os.iso')
+ with open(iso_file_path, 'a') as f:
+ f.write('test_data')
+ client = StubTestClient()
+ cluster_id = 1
+ host_id1 = 0x1234
+ host_id2 = 0x2345
+ host_id3 = 0x3456
+ host1 = StubTestHost(host_id1, 'controller01', cluster_id, [{'name': 'ens8', 'mac': '11:11:11:11:11:11'}])
+ client.hosts.add(host1)
+ host2 = StubTestHost(host_id2, 'controller02', cluster_id, [{'name': 'ens3', 'mac': '22:22:22:22:22:22'}])
+ client.hosts.add(host2)
+ host3 = StubTestHost(host_id3, 'computer01', cluster_id, [{'name': 'ens9', 'mac': '33:33:33:33:33:33'}])
+ client.hosts.add(host3)
+ hosts_info = [host1, host2, host3]
+ role1 = StubTestRole(0xaaaa, 'CONTROLLER_LB', cluster_id)
+ client.roles.add(role1)
+ role2 = StubTestRole(0xbbbb, 'COMPUTER', cluster_id)
+ client.roles.add(role2)
+ mac_address_map = {
+ 'controller01': ['11:11:11:11:11:11'], 'controller02': ['22:22:22:22:22:22'], 'controller03': [],
+ 'computer01': ['33:33:33:33:33:33'], 'computer02': []}
+ host_interface_map = {
+ 'ens8': [{'ip': '', 'name': 'EXTERNAL'}],
+ 'ens3': [{'ip': '', 'name': 'MANAGEMENT'},
+ {'ip': '', 'name': 'PUBLICAPI'},
+ {'ip': '', 'name': 'STORAGE'},
+ {'ip': '', 'name': 'physnet1'}],
+ 'ens9': [{'ip': '', 'name': 'HEARTBEAT'}]}
+ vip = '10.20.11.11'
+ add_hosts_interface(1, hosts_info, mac_address_map,
+ host_interface_map,
+ vip, isbare, client)
+ deploy.tempest.iso_path = res_old_val
+ if isbare:
+ assert client.hosts.get(host_id1).metadata == {
+ 'id': host_id1, 'name': 'controller01', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser',
+ 'interfaces': [{'name': 'ens8', 'mac': '11:11:11:11:11:11',
+ 'assigned_networks': [{'ip': '', 'name': 'EXTERNAL'}]}],
+ }
+ assert client.hosts.get(host_id2).metadata == {
+ 'id': host_id2, 'name': 'controller02', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser',
+ 'interfaces': [{'name': 'ens3', 'mac': '22:22:22:22:22:22',
+ 'assigned_networks': [
+ {'ip': '', 'name': 'MANAGEMENT'},
+ {'ip': '', 'name': 'PUBLICAPI'},
+ {'ip': '', 'name': 'STORAGE'},
+ {'ip': '', 'name': 'physnet1'}]}],
+ }
+ assert client.hosts.get(host_id3).metadata == {
+ 'id': host_id3, 'name': 'computer01', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'ipmi_user': 'zteroot', 'ipmi_passwd': 'superuser',
+ 'interfaces': [{'name': 'ens9', 'mac': '33:33:33:33:33:33',
+ 'assigned_networks': [{'ip': '', 'name': 'HEARTBEAT'}]}],
+ }
+ else:
+ assert client.hosts.get(host_id1).metadata == {
+ 'id': host_id1, 'name': 'controller01', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'interfaces': [{'name': 'ens8', 'mac': '11:11:11:11:11:11',
+ 'assigned_networks': [{'ip': '', 'name': 'EXTERNAL'}]}],
+ }
+ assert client.hosts.get(host_id2).metadata == {
+ 'id': host_id2, 'name': 'controller02', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'interfaces': [{'name': 'ens3', 'mac': '22:22:22:22:22:22',
+ 'assigned_networks': [
+ {'ip': '', 'name': 'MANAGEMENT'},
+ {'ip': '', 'name': 'PUBLICAPI'},
+ {'ip': '', 'name': 'STORAGE'},
+ {'ip': '', 'name': 'physnet1'}]}],
+ }
+ assert client.hosts.get(host_id3).metadata == {
+ 'id': host_id3, 'name': 'computer01', 'cluster_id': cluster_id,
+ 'cluster': cluster_id, 'os_version': iso_file_path,
+ 'interfaces': [{'name': 'ens9', 'mac': '33:33:33:33:33:33',
+ 'assigned_networks': [{'ip': '', 'name': 'HEARTBEAT'}]}],
+ }
+ tmpdir.remove()
+
+
+@pytest.mark.parametrize('dha_host_name, cluster_id, host_id, vip, exp', [
+ ('controller01', 1, 0x1234, '10.20.11.11', {'nodes': [0x1234], 'cluster_id': 1, 'vip': '10.20.11.11'}),
+ ('computer01', 1, 0x2345, '10.20.11.11', {'nodes': [0x2345], 'cluster_id': 1}),
+ ('all_in_one', 1, 0x1234, '10.20.11.11',
+ [{'nodes': [0x1234], 'cluster_id': 1, 'vip': '10.20.11.11'},
+ {'nodes': [0x1234], 'cluster_id': 1}])])
+def test_add_host_role(dha_host_name, cluster_id, host_id, vip, exp):
+ client = StubTestClient()
+ role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1)
+ client.roles.add(role1)
+ role2 = StubTestRole(0x2345, 'COMPUTER', 1)
+ client.roles.add(role2)
+ add_host_role(cluster_id, host_id, dha_host_name, vip, client)
+ if dha_host_name == 'controller01':
+ assert client.roles.roles[0].metadata == exp
+ if dha_host_name == 'computer01':
+ assert client.roles.roles[1].metadata == exp
+ if dha_host_name == 'all_in_one':
+ assert client.roles.roles[0].metadata == exp[0]
+ assert client.roles.roles[1].metadata == exp[1]
+
+
+def test_enable_cinder_backend():
+ client = StubTestClient()
+ role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1)
+ client.roles.add(role1)
+ service_name = 'ceph'
+ disk_name = '/dev/sdb'
+ protocol_type = 'RAW'
+ exp_disk_meta = {'service': service_name,
+ 'disk_location': 'local',
+ 'partition': disk_name,
+ 'protocol_type': protocol_type,
+ 'role_id': 0x1234}
+ enable_cinder_backend(1, service_name, disk_name, protocol_type, client)
+ assert client.disk_array.disks[0] == exp_disk_meta
+
+
+@pytest.mark.parametrize('layer, exp', [
+ ('odl_l3', {
+ 'neutron_backends_array': [{'zenic_ip': '',
+ 'sdn_controller_type': 'opendaylight',
+ 'zenic_port': '',
+ 'zenic_user_password': '',
+ 'neutron_agent_type': '',
+ 'zenic_user_name': '',
+ 'enable_l2_or_l3': 'l3'}]}),
+ ('odl_l2', {
+ 'neutron_backends_array': [{'zenic_ip': '',
+ 'sdn_controller_type': 'opendaylight',
+ 'zenic_port': '',
+ 'zenic_user_password': '',
+ 'neutron_agent_type': '',
+ 'zenic_user_name': '',
+ 'enable_l2_or_l3': 'l2'}]})])
+def test_enable_opendaylight(layer, exp):
+ client = StubTestClient()
+ role1 = StubTestRole(0x1234, 'CONTROLLER_LB', 1)
+ client.roles.add(role1)
+ enable_opendaylight(1, layer, client)
+ assert client.roles.roles[0].metadata == exp