summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
authorAlex Yang <yangyang1@zte.com.cn>2017-04-26 14:14:43 +0800
committerAlex Yang <yangyang1@zte.com.cn>2017-04-28 19:19:27 +0800
commit0d09e2b7301defd1f408d17373cbf991a922c693 (patch)
tree35db51d8f9b9d8ae2c8d27dfd159ea0953d6f92c /deploy
parent12eaf7b46bec6d25caf0d2d58c7a1eb69f33a3d8 (diff)
Implement the deployment script with python
1. deploy.py: control the work flow of the deployment 2. daisy_server.py: maintain the ssh connection with daisy server and execute ssh commands 3. environment.py: create/find/delete the nodes/vms, install operating system and openstack on nodes 4. libvirt_utils.py: deal with the vm templates and call virsh commands 5. utils.py: some common functions such as file/directory/bash operation Change-Id: I1caa4b0b3118665e15410e8f02bcb6473e5a530b Signed-off-by: Alex Yang <yangyang1@zte.com.cn>
Diffstat (limited to 'deploy')
-rw-r--r--deploy/daisy_server.py264
-rw-r--r--deploy/deploy.py212
-rw-r--r--deploy/environment.py262
-rw-r--r--deploy/libvirt_utils.py211
-rw-r--r--deploy/utils.py138
5 files changed, 1087 insertions, 0 deletions
diff --git a/deploy/daisy_server.py b/deploy/daisy_server.py
new file mode 100644
index 00000000..ec30ec95
--- /dev/null
+++ b/deploy/daisy_server.py
@@ -0,0 +1,264 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import paramiko
+import scp
+import time
+
+from utils import (
+ WORKSPACE,
+ LD,
+ LI,
+ LW,
+ err_exit,
+ log_bar,
+ path_join,
+ update_config
+)
+
+TIMEOUT = 300
+BLOCK_SIZE = 1024
+
+
+def log_from_stream(res, data, log_func):
+ lines = data.splitlines()
+ res_data = res
+ if res_data:
+ lines[0] = res_data + lines[0]
+ res_data = None
+
+ if not data.endswith("\n"):
+ res_data = lines[-1]
+ del (lines[-1])
+ for string in lines:
+ log_func(string)
+
+ if res_data and len(res_data) >= BLOCK_SIZE:
+ log_func(res_data)
+ res_data = None
+
+ return res_data
+
+
+LEN_OF_NAME_PART = 50
+LEN_OF_SIZE_PART = 15
+
+
+def log_scp(filename, size, send):
+ if size != send:
+ return
+ unit = " B"
+ if size > 1024:
+ size /= 1024
+ unit = " KB"
+ if size > 1024:
+ size /= 1024
+ unit = " MB"
+
+ name_part = 'SCP: ' + filename + ' '
+ size_part = ' ' + str(size) + unit + ' 100%'
+ if len(name_part) <= LEN_OF_NAME_PART:
+ LD(name_part.ljust(LEN_OF_NAME_PART, '.') + size_part.rjust(LEN_OF_SIZE_PART, '.'))
+ else:
+ LD(name_part)
+ LD(" ".ljust(LEN_OF_NAME_PART, '.') + size_part.rjust(LEN_OF_SIZE_PART, '.'))
+
+
+class DaisyServer(object):
+ def __init__(self, name, address, password, remote_dir, bin_file, adapter):
+ self.name = name
+ self.address = address
+ self.password = password
+ self.remote_dir = remote_dir
+ self.bin_file = bin_file
+ self.adapter = adapter
+ self.ssh_client = None
+
+ def connect(self):
+ LI('Try to connect to Daisy Server ...')
+ self.ssh_client = paramiko.SSHClient()
+ self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ count = 0
+ MAX_COUNT = 120
+ while count < MAX_COUNT:
+ try:
+ self.ssh_client.connect(hostname=self.address,
+ username='root',
+ password=self.password,
+ timeout=TIMEOUT)
+ except (paramiko.ssh_exception.SSHException,
+ paramiko.ssh_exception.NoValidConnectionsError):
+ count += 1
+ LD('Attempted SSH connection %d time(s)' % count)
+ time.sleep(2)
+ else:
+ break
+ if count >= MAX_COUNT:
+ err_exit('SSH connect to Daisy Server failed')
+
+ LI('SSH connection established')
+ LI('Try ssh_run: ls -al')
+ self.ssh_run('ls -al', check=True)
+
+ def close(self):
+ self.ssh_client.close()
+
+ def ssh_exec_cmd(self, cmd):
+ stdin, stdout, stderr = self.ssh_client.exec_command(cmd, timeout=TIMEOUT)
+ response = stdout.read().strip()
+ error = stderr.read().strip()
+
+ if error:
+ self.close()
+ err_exit('SSH client error occurred')
+ else:
+ return response
+
+ def ssh_run(self, cmd, check=False, exit_msg='Ssh_run failed'):
+ transport = self.ssh_client.get_transport()
+ transport.set_keepalive(1)
+ session = transport.open_session()
+ res_data = None
+ session.exec_command(cmd)
+ while True:
+ if session.recv_ready():
+ data = session.recv(BLOCK_SIZE)
+ while data:
+ res_data = log_from_stream(res_data, data, LI)
+ data = session.recv(BLOCK_SIZE)
+ if res_data:
+ LI(res_data)
+ res_data = None
+
+ if session.recv_stderr_ready():
+ data = session.recv_stderr(BLOCK_SIZE)
+ while data:
+ res_data = log_from_stream(res_data, data, LW)
+ data = session.recv_stderr(BLOCK_SIZE)
+ if res_data:
+ LW(res_data)
+ res_data = None
+ if session.exit_status_ready():
+ break
+
+ status = session.recv_exit_status()
+ if check and status:
+ err_exit(exit_msg)
+
+ return status
+
+ def scp_get(self, remote, local='.'):
+ scp_client = scp.SCPClient(self.ssh_client.get_transport(),
+ progress=log_scp,
+ socket_timeout=TIMEOUT)
+ scp_client.get(remote, local_path=local, recursive=True)
+
+ def scp_put(self, local, remote='.'):
+ scp_client = scp.SCPClient(self.ssh_client.get_transport(),
+ progress=log_scp,
+ socket_timeout=TIMEOUT)
+ scp_client.put(local, remote_path=remote, recursive=True)
+
+ def create_dir(self, remote_dir):
+ cmd = 'mkdir -p %s' % remote_dir
+ self.ssh_exec_cmd(cmd)
+
+ def delete_dir(self, remote_dir):
+ cmd = 'if [[ -f {DIR} || -d {DIR} ]]; then rm -fr {DIR}; fi'.format(DIR=remote_dir)
+ self.ssh_exec_cmd(cmd)
+
+ def prepare_files(self):
+ self.delete_dir(self.remote_dir)
+ LI('Copy WORKSPACE directory to Daisy Server')
+ self.scp_put(WORKSPACE, self.remote_dir)
+ time.sleep(2)
+ LI('Copy finished')
+
+ self.create_dir('/home/daisy_install')
+ LI('Write Daisy Server address into daisy.conf')
+ update_config(path_join(WORKSPACE, 'deploy/daisy.conf'),
+ 'daisy_management_ip',
+ self.address,
+ section='DEFAULT')
+ LI('Copy daisy.conf to Daisy Server')
+ self.scp_put(path_join(WORKSPACE, 'deploy/daisy.conf'), '/home/daisy_install/')
+
+ if os.path.dirname(os.path.abspath(self.bin_file)) != WORKSPACE:
+ LI('Copy opnfv.bin to Daisy Server')
+ self.scp_put(self.bin_file, path_join(self.remote_dir, 'opnfv.bin'))
+
+ def install_daisy(self):
+ self.prepare_files()
+ LI('Begin to install Daisy')
+ status = self.ssh_run('%s install' % path_join(self.remote_dir, 'opnfv.bin'))
+ log_bar('Daisy installation completed ! status = %s' % status)
+
+ def prepare_configurations(self):
+ if self.adapter != 'libvirt':
+ return
+ LI('Prepare some configuration files')
+ cmd = 'bash {script} -n {net_file}'.format(
+ script=path_join(self.remote_dir, 'deploy/prepare.sh'),
+ net_file=path_join(self.remote_dir, 'network.yml'))
+ self.ssh_run(cmd)
+
+ def prepare_cluster(self, deploy_file, net_file):
+ LI('Copy cluster configuration files to Daisy Server')
+ self.scp_put(deploy_file, path_join(self.remote_dir, 'deploy.yml'))
+ self.scp_put(net_file, path_join(self.remote_dir, 'network.yml'))
+
+ self.prepare_configurations()
+
+ LI('Prepare cluster and PXE')
+ cmd = "python {script} --dha {deploy_file} --network {net_file} --cluster \'yes\'".format(
+ script=path_join(self.remote_dir, 'deploy/tempest.py'),
+ deploy_file=path_join(self.remote_dir, 'deploy.yml'),
+ net_file=path_join(self.remote_dir, 'network.yml'))
+ self.ssh_run(cmd, check=True)
+
+ def prepare_host_and_pxe(self):
+ LI('Prepare host and PXE')
+ cmd = "python {script} --dha {deploy_file} --network {net_file} --host \'yes\' --isbare {is_bare}".format(
+ script=path_join(self.remote_dir, 'deploy/tempest.py'),
+ deploy_file=path_join(self.remote_dir, 'deploy.yml'),
+ net_file=path_join(self.remote_dir, 'network.yml'),
+ is_bare=1 if self.adapter == 'ipmi' else 0)
+ self.ssh_run(cmd, check=True)
+
+ def install_virtual_nodes(self):
+ LI('Daisy install virtual nodes')
+ cmd = "python {script} --dha {deploy_file} --network {net_file} --install \'yes\'".format(
+ script=path_join(self.remote_dir, 'deploy/tempest.py'),
+ deploy_file=path_join(self.remote_dir, 'deploy.yml'),
+ net_file=path_join(self.remote_dir, 'network.yml'))
+ self.ssh_run(cmd, check=True)
+
+ def check_os_installation(self, nodes_num):
+ LI('Check Operating System installation progress')
+ cmd = '{script} -d {is_bare} -n {nodes_num}'.format(
+ script=path_join(self.remote_dir, 'deploy/check_os_progress.sh'),
+ is_bare=1 if self.adapter == 'ipmi' else 0,
+ nodes_num=nodes_num)
+ self.ssh_run(cmd, check=True)
+
+ def check_openstack_installation(self, nodes_num):
+ LI('Check OpenStack installation progress')
+ cmd = '{script} -n {nodes_num}'.format(
+ script=path_join(self.remote_dir, 'deploy/check_openstack_progress.sh'),
+ nodes_num=nodes_num)
+ self.ssh_run(cmd, check=True)
+
+ def post_deploy(self):
+ LI('Post deploy ...')
+ cmd = 'bash {script} -n {net_file}'.format(
+ script=path_join(self.remote_dir, 'deploy/post.sh'),
+ net_file=path_join(self.remote_dir, 'network.yml'))
+ self.ssh_run(cmd, check=False)
diff --git a/deploy/deploy.py b/deploy/deploy.py
new file mode 100644
index 00000000..23464b5a
--- /dev/null
+++ b/deploy/deploy.py
@@ -0,0 +1,212 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##############################################################################
+# TODO:
+# [ ] 1. specify VM templates (Server, Controller & Compute) in deploy.yml
+# [ ] 2. specify network templates in deploy.yml
+# [ ] 3. specify adapter(ipmi, libvirt) in deploy.yml
+# [ ] 4. get ipmi user/password from PDF (Pod Descriptor File)
+# [ ] 5. get pxe bridge from jjb
+# [ ] 6. enlarge the vm size of Controller & Compute in deploy.yml
+##############################################################################
+
+import argparse
+import yaml
+
+from utils import (
+ WORKSPACE,
+ save_log_to_file,
+ LI,
+ log_bar,
+ path_join,
+ check_sudo_privilege,
+ check_file_exists,
+ make_file_executable,
+ confirm_dir_exists
+)
+
+from environment import (
+ DaisyEnvironment,
+)
+
+
+class DaisyDeployment(object):
+ def __init__(self, lab_name, pod_name, deploy_file, net_file, bin_file,
+ daisy_only, cleanup_only, remote_dir, work_dir, storage_dir,
+ pxe_bridge, deploy_log):
+ self.lab_name = lab_name
+ self.pod_name = pod_name
+
+ self.deploy_file = deploy_file
+ with open(deploy_file) as yaml_file:
+ self.deploy_struct = yaml.safe_load(yaml_file)
+
+ if not cleanup_only:
+ self.net_file = net_file
+ with open(net_file) as yaml_file:
+ self.net_struct = yaml.safe_load(yaml_file)
+ else:
+ self.net_struct = None
+
+ self.bin_file = bin_file
+ self.daisy_only = daisy_only
+ self.cleanup_only = cleanup_only
+ self.remote_dir = remote_dir
+ self.work_dir = work_dir
+ self.storage_dir = storage_dir
+ self.pxe_bridge = pxe_bridge
+ self.deploy_log = deploy_log
+
+ self.adapter = self._get_adapter_info()
+ LI('The adapter is %s' % self.adapter)
+
+ # TODO: modify the jjb code to provide bridge name
+ if self.adapter == 'libvirt':
+ self.pxe_bridge = 'daisy1'
+ else:
+ self.pxe_bridge = 'br7'
+
+ self.daisy_server_info = self._get_daisy_server_info()
+
+ self.daisy_env = DaisyEnvironment(self.deploy_struct,
+ self.net_struct,
+ self.adapter,
+ self.pxe_bridge,
+ self.daisy_server_info,
+ self.work_dir,
+ self.storage_dir)
+
+ def _get_adapter_info(self):
+ # TODO: specify the adapter info in deploy.yml
+ if 'adapter' in self.deploy_struct:
+ return self.deploy_struct['adapter']
+ elif self.pod_name and 'virtual' in self.pod_name:
+ return 'libvirt'
+ else:
+ return 'ipmi'
+
+ def _get_daisy_server_info(self):
+ address = self.deploy_struct.get('daisy_ip', '10.20.11.2')
+ gateway = self.deploy_struct.get('daisy_gateway', '10.20.11.1')
+ password = self.deploy_struct.get('daisy_passwd', 'r00tme')
+ disk_size = self.deploy_struct.get('disks', {'daisy': 50})['daisy']
+ # TODO: get VM name of daisy server from deploy.yml or vm template
+ name = 'daisy'
+ image = path_join(self.storage_dir, name + '.qcow2')
+
+ return {'name': name,
+ 'image': image,
+ 'address': address,
+ 'gateway': gateway,
+ 'password': password,
+ 'disk_size': disk_size}
+
+ def run(self):
+ self.daisy_env.delete_old_environment()
+ if self.cleanup_only:
+ return
+ self.daisy_env.create_daisy_server()
+ if self.daisy_only:
+ log_bar('Create Daisy Server successfully !')
+ return
+ self.daisy_env.install_daisy(self.remote_dir, self.bin_file)
+ self.daisy_env.deploy(self.deploy_file, self.net_file)
+ log_bar('Daisy deploy successfully !')
+
+
+def config_arg_parser():
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-lab', dest='lab_name', action='store', nargs='?',
+ default=None,
+ help='Lab Name')
+ parser.add_argument('-pod', dest='pod_name', action='store', nargs='?',
+ default=None,
+ help='Pod Name')
+
+ parser.add_argument('-bin', dest='bin_file', action='store', nargs='?',
+ default=path_join(WORKSPACE, 'opnfv.bin'),
+ help='OPNFV Daisy BIN File')
+
+ parser.add_argument('-do', dest='daisy_only', action='store_true',
+ default=False,
+ help='Install Daisy Server only')
+ parser.add_argument('-co', dest='cleanup_only', action='store_true',
+ default=False,
+ help='Cleanup VMs and Virtual Networks')
+ # parser.add_argument('-nd', dest='no_daisy', action='store_true',
+ # default=False,
+ # help='Do not install Daisy Server when it exists')
+
+ parser.add_argument('-rdir', dest='remote_dir', action='store', nargs='?',
+ default='/home/daisy',
+ help='Code directory on Daisy Server')
+
+ parser.add_argument('-wdir', dest='work_dir', action='store', nargs='?',
+ default='/tmp/workdir',
+ help='Temporary working directory')
+ parser.add_argument('-sdir', dest='storage_dir', action='store', nargs='?',
+ default='/home/qemu/vms',
+ help='Storage directory for VM images')
+ parser.add_argument('-B', dest='pxe_bridge', action='store', nargs='?',
+ default='pxebr',
+ help='Linux Bridge for booting up the Daisy Server VM '
+ '[default: pxebr]')
+ parser.add_argument('-log', dest='deploy_log', action='store', nargs='?',
+ default=path_join(WORKSPACE, 'deploy.log'),
+ help='Path and name of the deployment log file')
+ return parser
+
+
+def parse_arguments():
+ parser = config_arg_parser()
+ args = parser.parse_args()
+
+ save_log_to_file(args.deploy_log)
+ LI(args)
+
+ conf_base_dir = path_join(WORKSPACE, 'labs', args.lab_name, args.pod_name)
+ deploy_file = path_join(conf_base_dir, 'daisy/config/deploy.yml')
+ net_file = path_join(conf_base_dir, 'daisy/config/network.yml')
+
+ check_file_exists(deploy_file)
+ if not args.cleanup_only:
+ check_file_exists(net_file)
+ make_file_executable(args.bin_file)
+
+ confirm_dir_exists(args.work_dir)
+ confirm_dir_exists(args.storage_dir)
+
+ kwargs = {
+ 'lab_name': args.lab_name,
+ 'pod_name': args.pod_name,
+ 'deploy_file': deploy_file,
+ 'net_file': net_file,
+ 'bin_file': args.bin_file,
+ 'daisy_only': args.daisy_only,
+ 'cleanup_only': args.cleanup_only,
+ 'remote_dir': args.remote_dir,
+ 'work_dir': args.work_dir,
+ 'storage_dir': args.storage_dir,
+ 'pxe_bridge': args.pxe_bridge,
+ 'deploy_log': args.deploy_log
+ }
+ return kwargs
+
+
+def main():
+ check_sudo_privilege()
+ kwargs = parse_arguments()
+ deploy = DaisyDeployment(**kwargs)
+ deploy.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deploy/environment.py b/deploy/environment.py
new file mode 100644
index 00000000..088e5008
--- /dev/null
+++ b/deploy/environment.py
@@ -0,0 +1,262 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import shutil
+import time
+
+from daisy_server import (
+ DaisyServer
+)
+from libvirt_utils import (
+ create_virtual_disk,
+ create_vm,
+ reboot_vm,
+ delete_vm_and_disk,
+ create_virtual_network,
+ delete_virtual_network
+)
+from utils import (
+ WORKSPACE,
+ LI,
+ LW,
+ err_exit,
+ run_shell,
+ path_join,
+ ipmi_reboot_node,
+)
+
+CREATE_QCOW2_PATH = path_join(WORKSPACE, 'tools')
+
+VMDEPLOY_DAISY_SERVER_NET = path_join(WORKSPACE, 'templates/virtual_environment/networks/daisy.xml')
+VMDEPLOY_TARGET_NODE_NET = path_join(WORKSPACE, 'templates/virtual_environment/networks/os-all_in_one.xml')
+VMDEPLOY_DAISY_SERVER_VM = path_join(WORKSPACE, 'templates/virtual_environment/vms/daisy.xml')
+
+BMDEPLOY_DAISY_SERVER_VM = path_join(WORKSPACE, 'templates/physical_environment/vms/daisy.xml')
+
+ALL_IN_ONE_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/all_in_one.xml')
+CONTROLLER_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/controller01.xml')
+COMPUTE_TEMPLATE = path_join(WORKSPACE, 'templates/virtual_environment/vms/computer01.xml')
+VIRT_NET_TEMPLATE_PATH = path_join(WORKSPACE, 'templates/virtual_environment/networks')
+
+
+class DaisyEnvironment(object):
+ def __new__(cls, deploy_struct, net_struct, adapter, pxe_bridge,
+ daisy_server_info, work_dir, storage_dir):
+ if adapter == 'libvirt':
+ return VirtualEnvironment(deploy_struct, net_struct,
+ adapter, pxe_bridge,
+ daisy_server_info, work_dir, storage_dir)
+ else:
+ return BareMetalEnvironment(deploy_struct, net_struct,
+ adapter, pxe_bridge,
+ daisy_server_info, work_dir, storage_dir)
+
+
+class DaisyEnvironmentBase(object):
+ def __init__(self, deploy_struct, net_struct, adapter, pxe_bridge,
+ daisy_server_info, work_dir, storage_dir):
+ self.deploy_struct = deploy_struct
+ self.net_struct = net_struct
+ self.adapter = adapter
+ self.pxe_bridge = pxe_bridge
+ self.work_dir = work_dir
+ self.storage_dir = storage_dir
+ self.daisy_server_info = daisy_server_info
+ self.server = None
+ LI('Daisy Environment Initialized')
+
+ def delete_daisy_server(self):
+ delete_vm_and_disk(self.daisy_server_info['name'])
+
+ def create_daisy_server_image(self):
+ LI('Begin to create Daisy Server image')
+ script = path_join(CREATE_QCOW2_PATH, 'daisy-img-modify.sh')
+ sub_script = path_join(CREATE_QCOW2_PATH, 'centos-img-modify.sh')
+ cmd = '{script} -c {sub_script} -a {address} -g {gateway} -s {disk_size}'.format(
+ script=script,
+ sub_script=sub_script,
+ address=self.daisy_server_info['address'],
+ gateway=self.daisy_server_info['gateway'],
+ disk_size=self.daisy_server_info['disk_size'])
+ LI('Command is: ')
+ LI(' %s' % cmd)
+ # status, output = commands.getstatusoutput(cmd)
+ status = run_shell(cmd)
+ if status:
+ err_exit('Failed to create Daisy Server image')
+ if os.access(self.daisy_server_info['image'], os.R_OK):
+ os.remove(self.daisy_server_info['image'])
+ image = path_join(self.work_dir, 'daisy/centos7.qcow2')
+ shutil.move(image, self.daisy_server_info['image'])
+ LI('Daisy Server image is created %s' % self.daisy_server_info['image'])
+
+ def install_daisy(self, remote_dir, bin_file):
+ self.server = DaisyServer(self.daisy_server_info['name'],
+ self.daisy_server_info['address'],
+ self.daisy_server_info['password'],
+ remote_dir,
+ bin_file,
+ self.adapter)
+ self.server.connect()
+ self.server.install_daisy()
+
+
+class BareMetalEnvironment(DaisyEnvironmentBase):
+ def delete_old_environment(self):
+ LW('Begin to delete old environment !')
+ self.delete_daisy_server()
+ LW('Old environment cleanup finished !')
+
+ def create_daisy_server(self):
+ self.create_daisy_server_image()
+ self.create_daisy_server_vm()
+
+ def create_daisy_server_vm(self):
+ # TODO: refactor the structure of deploy.yml, add VM template param of Daisy Server
+ # add self.pxe_bridge into the vm template
+ if 'template' in self.deploy_struct:
+ # get VM name of Daisy Server from the template
+ template = self.deploy_struct['template']
+ else:
+ template = BMDEPLOY_DAISY_SERVER_VM
+
+ create_vm(template,
+ name=self.daisy_server_info['name'],
+ disk_file=self.daisy_server_info['image'])
+
+ def reboot_nodes(self, boot_dev=None):
+ # TODO: add ipmi info into deploy.yml, or read from PDF
+ address = 106
+ for node in self.deploy_struct['hosts']:
+ node['ipmiIp'] = '192.168.1.' + str(address)
+ address += 1
+ if address > 111:
+ err_exit('the ipmi address exceeds the range 106~110')
+ node['ipmiUser'] = 'zteroot'
+ node['ipmiPass'] = 'superuser'
+ ipmi_reboot_node(node['ipmiIp'], node['ipmiUser'],
+ node['ipmiPass'], boot_source=boot_dev)
+
+ def deploy(self, deploy_file, net_file):
+ self.server.prepare_cluster(deploy_file, net_file)
+ self.reboot_nodes(boot_dev='pxe')
+ self.server.prepare_host_and_pxe()
+
+ LI('The hosts number is %d' % len(self.deploy_struct['hosts']))
+ self.server.check_os_installation(len(self.deploy_struct['hosts']))
+ time.sleep(10)
+ self.server.check_openstack_installation(len(self.deploy_struct['hosts']))
+
+
+class VirtualEnvironment(DaisyEnvironmentBase):
+ def create_daisy_server_network(self):
+ net_name = create_virtual_network(VMDEPLOY_DAISY_SERVER_NET)
+ if net_name != self.pxe_bridge:
+ self.delete_virtual_network(VMDEPLOY_DAISY_SERVER_NET)
+ err_exit('Network name %s is wrong, pxe bridge is %s' % (net_name, self.pxe_bridge))
+
+ def create_daisy_server_vm(self):
+ # TODO: refactor the structure of deploy.yml, add VM template param of Daisy Server
+ # add self.pxe_bridge into the vm template
+ if 'template' in self.deploy_struct:
+ # get VM name of Daisy Server from the template
+ template = self.deploy_struct['template']
+ else:
+ template = VMDEPLOY_DAISY_SERVER_VM
+
+ create_vm(template,
+ name=self.daisy_server_info['name'],
+ disk_file=self.daisy_server_info['image'])
+
+ def create_daisy_server(self):
+ self.create_daisy_server_image()
+ self.create_daisy_server_network()
+ self.create_daisy_server_vm()
+
+ def create_virtual_node(self, node):
+ name = node['name']
+ roles = node['roles']
+ controller_size = self.deploy_struct.get('disks', {'controller': 200}).get('controller')
+ compute_size = self.deploy_struct.get('disks', {'compute': 200}).get('compute')
+ LI('Begin to create virtual node %s, roles %s' % (name, roles))
+
+ if 'CONTROLLER_LB' in roles:
+ size = controller_size
+ if 'COMPUTER' in roles:
+ size = compute_size if compute_size > controller_size else controller_size
+ template = ALL_IN_ONE_TEMPLATE
+ else:
+ template = CONTROLLER_TEMPLATE
+ else:
+ size = compute_size
+ template = COMPUTE_TEMPLATE
+
+ if 'template' in node:
+ template = node['template']
+ disk_file = path_join(self.storage_dir, name + '.qcow2')
+ # TODO: modify the sizes in deploy.yml to more than 100G
+ if size < 200:
+ size = 200
+ create_virtual_disk(disk_file, size)
+ create_vm(template, name, disk_file)
+
+ def create_nodes(self):
+ # TODO: support virtNetTemplatePath in deploy.yml
+ # and multi interfaces, not only all-in-one
+ create_virtual_network(VMDEPLOY_TARGET_NODE_NET)
+ for node in self.deploy_struct['hosts']:
+ self.create_virtual_node(node)
+ time.sleep(20)
+
+ def reboot_nodes(self, boot_devs=None):
+ for node in self.deploy_struct['hosts']:
+ reboot_vm(node['name'], boot_devs=boot_devs)
+
+ def delete_nodes(self):
+ for host in self.deploy_struct['hosts']:
+ delete_vm_and_disk(host['name'])
+
+ def delete_networks(self):
+ if 'virtNetTemplatePath' in self.deploy_struct:
+ path = self.deploy_struct['virtNetTemplatePath']
+ else:
+ path = VIRT_NET_TEMPLATE_PATH
+
+ if not os.path.isdir(path):
+ LW('Cannot find the virtual network template path %s' % path)
+ return
+ for f in os.listdir(path):
+ f = path_join(path, f)
+ if os.path.isfile(f):
+ delete_virtual_network(f)
+
+ def delete_old_environment(self):
+ LW('Begin to delete old environment !')
+ self.delete_nodes()
+ self.delete_daisy_server()
+ self.delete_networks()
+ LW('Old environment cleanup finished !')
+
+ def deploy(self, deploy_file, net_file):
+ self.server.prepare_cluster(deploy_file, net_file)
+ self.create_nodes()
+ self.server.prepare_host_and_pxe()
+ LI('Begin Daisy virtual-deploy os and openstack')
+ self.reboot_nodes()
+ LI('Sleep 20s to wait the VM(s) startup')
+ time.sleep(20)
+ self.server.install_virtual_nodes()
+
+ LI('The hosts number is %d' % len(self.deploy_struct['hosts']))
+ self.server.check_os_installation(len(self.deploy_struct['hosts']))
+ time.sleep(10)
+ self.reboot_nodes(boot_devs=['hd'])
+ self.server.check_openstack_installation(len(self.deploy_struct['hosts']))
+ self.server.post_deploy()
diff --git a/deploy/libvirt_utils.py b/deploy/libvirt_utils.py
new file mode 100644
index 00000000..cd203784
--- /dev/null
+++ b/deploy/libvirt_utils.py
@@ -0,0 +1,211 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import commands
+import libvirt
+import os
+import xml.etree.ElementTree as ET
+
+from utils import (
+ LI,
+ LE,
+ LW,
+ WORKSPACE,
+ err_exit,
+ path_join
+)
+
+
+def get_nets_name(root):
+ nets = []
+ for interface in root.findall('./devices/interface'):
+ if 'type' in interface.attrib and interface.attrib['type'] == 'network':
+ for source in interface.iterfind('source'):
+ if 'network' in source.attrib:
+ nets.append(source.attrib['network'])
+ return nets
+
+
+def modify_vm_boot_order(root, boot_devs):
+ os_elem = root.find('os')
+ for boot_elem in os_elem.findall('boot'):
+ os_elem.remove(boot_elem)
+ for boot_dev in boot_devs:
+ boot_elem = ET.Element('boot', attrib={'dev': boot_dev})
+ os_elem.append(boot_elem)
+ return root
+
+
+def modify_vm_name(root, vm_name):
+ name_elem = root.find('./name')
+ name_elem.text = vm_name
+
+
+def modify_vm_disk_file(root, disk_file):
+ for disk in root.findall('./devices/disk'):
+ if 'device' in disk.attrib and disk.attrib['device'] == 'disk':
+ for source in disk.iterfind('source'):
+ if 'file' in source.attrib:
+ source.attrib['file'] = disk_file
+ break
+
+
+def create_virtual_disk(disk_file, size):
+ LI('Create virtual disk file %s size %d GB' % (disk_file, size))
+ cmd = 'qemu-img create -f qcow2 {disk_file} {size}G'.format(
+ disk_file=disk_file, size=size)
+ status, output = commands.getstatusoutput(cmd)
+ if status:
+ LE(output)
+ err_exit('Fail to create qemu image !')
+
+
+def create_vm(template, name=None, disk_file=None):
+ LI('Begin to create VM %s' % template)
+
+ if name or disk_file:
+ tree = ET.ElementTree(file=template)
+ root = tree.getroot()
+ if name:
+ modify_vm_name(root, name)
+ if disk_file:
+ modify_vm_disk_file(root, disk_file)
+
+ temp_file = path_join(WORKSPACE, 'tmp.xml')
+ tree.write(temp_file)
+ output = commands.getoutput('cat %s' % temp_file)
+ os.remove(temp_file)
+ else:
+ output = commands.getoutput('cat %s' % template)
+
+ conn = libvirt.open('qemu:///system')
+ domain = conn.defineXML(output)
+ if domain is None:
+ err_exit('Failed to define VM %s' % template)
+ if domain.create() < 0:
+ err_exit('Failed to start VM %s' % template)
+ domain.setAutostart(1)
+
+ LI('VM %s is started' % domain.name())
+ return
+
+
+def reboot_vm(vm_name, boot_devs=None):
+ LI('Begin to reboot VM %s', vm_name)
+ conn = libvirt.open('qemu:///system')
+ try:
+ vm = conn.lookupByName(vm_name)
+ except libvirt.libvirtError as e:
+ LE(e)
+ err_exit('VM %s is not found: ' % vm_name)
+
+ if boot_devs:
+ if vm.isActive():
+ vm.destroy()
+ LI('Destroy VM %s' % vm_name)
+
+ # root = ET.fromstring(vm.XMLDesc())
+ temp_file = path_join(WORKSPACE, 'tmp.xml')
+ commands.getoutput('virsh dumpxml %s > %s' % (vm_name, temp_file))
+ tree = ET.parse(temp_file)
+ root = tree.getroot()
+ LI('Modify the boot order %s' % boot_devs)
+ modify_vm_boot_order(root, boot_devs)
+ tree.write(temp_file)
+
+ LI('Re-define and start the VM %s' % vm_name)
+ vm.undefine()
+ vm = conn.defineXML(commands.getoutput('cat %s' % temp_file))
+ vm.create()
+ vm.setAutostart(1)
+ else:
+ vm.reset()
+
+ conn.close()
+
+
+def get_disk_file(root):
+ disks = []
+ for disk in root.findall('./devices/disk'):
+ if 'device' in disk.attrib and disk.attrib['device'] == 'disk':
+ for source in disk.iterfind('source'):
+ if 'file' in source.attrib:
+ disks.append(source.attrib['file'])
+ return disks
+
+
+def delete_vm_and_disk(vm_name):
+ LI('Begin to delete VM %s', vm_name)
+ conn = libvirt.open('qemu:///system')
+ vm = None
+ for item in conn.listAllDomains():
+ if vm_name == item.name():
+ vm = item
+ break
+ if vm is None:
+ conn.close()
+ LI('VM %s is not found' % vm_name)
+ return
+
+ output = vm.XMLDesc()
+ root = ET.fromstring(output)
+
+ if vm.isActive():
+ vm.destroy()
+ LI('Destroy VM %s' % vm.name())
+ vm.undefine()
+
+ for disk_file in get_disk_file(root):
+ if os.path.isfile(disk_file):
+ status, output = commands.getstatusoutput('rm -f %s' % disk_file)
+ if status:
+ LW('Failed to delete the VM disk file %s' % disk_file)
+
+ conn.close()
+ LI('VM %s is removed' % vm_name)
+
+
+def create_virtual_network(template):
+ LI('Begin to create virtual network %s' % template)
+ output = commands.getoutput('cat %s' % template)
+ conn = libvirt.open('qemu:///system')
+ network = conn.networkDefineXML(output)
+ if network is None:
+ err_exit('Failed to define a virtual network %s' % template)
+
+ network.create() # set the network active
+ network.setAutostart(1)
+ conn.close()
+ LI('Virtual network %s is created' % network.name())
+ return network.name()
+
+
+def delete_virtual_network(network_xml):
+ LI('Begin to find and delete network %s' % network_xml)
+ tree = ET.ElementTree(file=network_xml)
+ root = tree.getroot()
+ names = root.findall('./name')
+ assert len(names) == 1
+ name = names[0].text
+
+ result = 0
+ conn = libvirt.open('qemu:///system')
+
+ for net in conn.listAllNetworks():
+ if name == net.name():
+ if net.isActive():
+ net.destroy()
+ LI('Network %s is destroyed' % name)
+ net.undefine()
+ LI('Network %s is deleted' % name)
+ result = 1
+ break
+ conn.close()
+ if not result:
+ LI('Network %s is not found' % name)
diff --git a/deploy/utils.py b/deploy/utils.py
new file mode 100644
index 00000000..0c5b1370
--- /dev/null
+++ b/deploy/utils.py
@@ -0,0 +1,138 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import commands
+from configobj import ConfigObj
+import os
+import logging
+import subprocess
+import sys
+
+
+path_join = os.path.join
+CWD = os.getcwd()
+WORKSPACE = os.path.normpath(path_join(os.path.dirname(__file__), '..'))
+BASE = CWD
+
+
+def get_logger():
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+ return logger
+
+
+LOG = get_logger()
+LD = LOG.debug
+LI = LOG.info
+LW = LOG.warn
+LE = LOG.error
+
+
+def save_log_to_file(log_file):
+ with open(log_file, 'w+'):
+ pass
+
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
+ handler = logging.FileHandler(log_file, mode='w')
+ handler.setFormatter(formatter)
+ LOG.addHandler(handler)
+
+
+def err_exit(message):
+ LE('%s\n' % message)
+ sys.exit(1)
+
+
+def log_bar(message, log_func=LI):
+ bar = '=' * len(message)
+ log_func(bar)
+ log_func(message)
+ log_func(bar)
+
+
+def check_sudo_privilege():
+ uid = os.getuid()
+ if uid != 0:
+ err_exit('You need run this script with sudo privilege')
+
+
+def check_file_exists(file_path):
+ if not os.path.dirname(file_path):
+ file_path = os.path.normpath(path_join(BASE, file_path))
+ if not os.access(file_path, os.R_OK):
+ err_exit('File %s not found\n' % file_path)
+
+
+def make_file_executable(file_path):
+ if not os.path.isdir(file_path):
+ file_path = os.path.normpath(path_join(BASE, file_path))
+ if not os.access(file_path, os.R_OK):
+ err_exit('File %s not found\n' % file_path)
+ if not os.access(file_path, os.X_OK):
+ LW('File %s is not executable, chmod it and continue' % file_path)
+ status, output = commands.getstatusoutput('chmod +x %s' % file_path)
+ if status:
+ err_exit('Cannot change the file mode of %s' % file_path)
+
+
+def confirm_dir_exists(dir_path):
+ if not os.path.isdir(dir_path):
+ LI('Creating directory %s' % dir_path)
+ os.makedirs(dir_path)
+
+
+def update_config(conf_file, key, value, section='DEFAULT'):
+ LI('Update_config [ %s : %s ] to file: %s' % (key, value, conf_file))
+ config = ConfigObj(conf_file)
+ config[section][key] = value
+ config.write()
+
+
+def ipmi_reboot_node(host, user, passwd, boot_source=None):
+ prefix = 'ipmitool -I lanplus -H {host} -U {user} -P {passwd} -R 1 '.format(
+ host=host, user=user, passwd=passwd)
+ if boot_source:
+ cmd = prefix + 'chassis bootdev {boot_source}'.format(boot_source=boot_source)
+ LI('IMPI set node %s boot from %s' % (host, boot_source))
+ status, output = commands.getstatusoutput(cmd)
+ if status:
+ err_exit('IPMI command failed: %s' % output)
+
+ cmd = prefix + 'chassis power reset'
+ LI('IPMI reset node %s' % host)
+ status, output = commands.getstatusoutput(cmd)
+ if status:
+ err_exit('IPMI command failed: %s' % output)
+
+
+def run_shell(cmd, check=False):
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True)
+ while process.poll() is None:
+ LD(process.stdout.readline().strip())
+
+ response, stderr = process.communicate()
+ return_code = process.returncode
+
+ if check:
+ if return_code > 0:
+ stderr = stderr.strip()
+ LE('Failed command: ' + str(cmd))
+ LE('Command returned error: ' + str(stderr))
+ err_exit('Command return code: ' + str(return_code))
+ else:
+ LI('Successful command: ' + str(cmd))
+
+ return return_code