diff options
author | Szilard Cserey <szilard.cserey@ericsson.com> | 2015-04-14 15:47:45 +0200 |
---|---|---|
committer | Szilard Cserey <szilard.cserey@ericsson.com> | 2015-04-28 10:01:04 +0200 |
commit | 9c2b6848566a0b80bb44f27cca155a240d69f061 (patch) | |
tree | 5081686f09c3c4662f6b807a196a28cb4acab67a /fuel/deploy/cloud_deploy/hardware_adapters | |
parent | e2b2d46756213fde3bca42a49b04e6a1e8792498 (diff) |
Automatic Deployment
- node discovery
- refactoring to support multiple shelves
- configure nodes and interfaces
- provisioning
- deployment
- extending with autodeployment scripts from libvirt prototype
JIRA: [BGS-2] Create Fuel deployment scrip
Signed-off-by: Szilard Cserey <szilard.cserey@ericsson.com>
Change-Id: Ic48f93594914d5bef6c9de34d87434c7cd567198
Diffstat (limited to 'fuel/deploy/cloud_deploy/hardware_adapters')
7 files changed, 760 insertions, 0 deletions
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py new file mode 100644 index 0000000..2764aeb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py @@ -0,0 +1,61 @@ +from hp.hp_adapter import HpAdapter +from libvirt.libvirt_adapter import LibvirtAdapter + +class DeploymentHardwareAdapter(object): + def __new__(cls, server_type, *args): + if cls is DeploymentHardwareAdapter: + if server_type == 'esxi': return EsxiAdapter(*args) + if server_type == 'hp': return HpAdapter(*args) + if server_type == 'dell': return DellAdapter(*args) + if server_type == 'libvirt': return LibvirtAdapter(*args) + return super(DeploymentHardwareAdapter, cls).__new__(cls) + + +class HardwareAdapter(object): + + def power_off_blades(self, shelf, blade_list): + raise NotImplementedError + + def power_off_blade(self, shelf, blade): + raise NotImplementedError + + def power_on_blades(self, shelf, blade_list): + raise NotImplementedError + + def power_on_blade(self, shelf, blade): + raise NotImplementedError + + def power_cycle_blade(self): + raise NotImplementedError + + def set_boot_order_blades(self, shelf, blade_list): + raise NotImplementedError + + def set_boot_order_blade(self, shelf, blade): + raise NotImplementedError + + def reset_to_factory_defaults(self): + raise NotImplementedError + + def configure_networking(self): + raise NotImplementedError + + def get_blade_mac_addresses(self, shelf, blade): + raise NotImplementedError + + def get_hardware_info(self, shelf, blade): + raise NotImplementedError + + +class EsxiAdapter(HardwareAdapter): + + def __init__(self): + self.environment = {1: {1: {'mac': ['00:50:56:8c:05:85']}, + 2: {'mac': ['00:50:56:8c:21:92']}}} + + def get_blade_mac_addresses(self, shelf, blade): + return self.environment[shelf][blade]['mac'] + + +class DellAdapter(HardwareAdapter): + pass diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py new file mode 100644 index 0000000..916d4dc --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py @@ -0,0 +1,433 @@ +import re +import time +from netaddr import EUI, mac_unix +from cloud import common + +from run_oa_command import RunOACommand + +LOG = common.LOG + +class HpAdapter(object): + + # Exception thrown at any kind of failure to get the requested + # information. + class NoInfoFoundError(Exception): + pass + + # Totally failed to connect so a re-try with other HW should + # be done. This exception should never escape this class. + class InternalConnectError(Exception): + pass + + # Format MAC so leading zeroes are displayed + class mac_dhcp(mac_unix): + word_fmt = "%.2x" + + def __init__(self, mgmt_ip, username, password): + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + self.oa_error_message = '' + + def get_blade_mac_addresses(self, shelf, blade): + + LOG.debug("Entering: get_mac_addr_hp(%d,%d)" % (shelf, blade)) + self.oa_error_message = '' + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + + LOG.debug("Connect to active OA for shelf %d" % shelf) + try: + res = oa.connect_to_active() + except: + raise self.InternalConnectError(oa.error_message) + if res is None: + raise self.InternalConnectError(oa.error_message) + if not oa.connected(): + raise self.NoInfoFoundError(oa.error_message) + + cmd = ("show server info " + str(blade)) + + LOG.debug("Send command to OA: %s" % cmd) + try: + serverinfo = oa.send_command(cmd) + except: + raise self.NoInfoFoundError(oa.error_message) + finally: + oa.close() + + (left, right) = self.find_mac(serverinfo, shelf, blade) + + left = EUI(left, dialect=self.mac_dhcp) + right = EUI(right, dialect=self.mac_dhcp) + return [str(left), str(right)] + + def get_blades_mac_addresses(self, shelf, blade_list): + macs_per_blade_dict = {} + LOG.debug("Getting MAC addresses for shelf %s, blades %s" + % (shelf, blade_list)) + self.oa_error_message = '' + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + + LOG.debug("Connect to active OA for shelf %d" % shelf) + try: + res = oa.connect_to_active() + except: + raise self.InternalConnectError(oa.error_message) + if res is None: + raise self.InternalConnectError(oa.error_message) + if not oa.connected(): + raise self.NoInfoFoundError(oa.error_message) + try: + for blade in blade_list: + LOG.debug("Send command to OA: %s" % cmd) + cmd = ("show server info %s" % blade) + printout = oa.send_command(cmd) + left, right = self.find_mac(printout, shelf, blade) + left = EUI(left, dialect=self.mac_dhcp) + right = EUI(right, dialect=self.mac_dhcp) + macs_per_blade_dict[blade] = [str(left), str(right)] + except: + raise self.NoInfoFoundError(oa.error_message) + finally: + oa.close() + return macs_per_blade_dict + + def get_blade_hardware_info(self, shelf, blade=None): + if blade: + LOG.debug("Entering: get_hp_info(%d,%d)" % (shelf, blade)) + else: + LOG.debug("Entering: get_hp_info(%d)" % shelf) + + self.oa_error_message = '' + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + + LOG.debug("Connect to active OA for shelf %d" % shelf) + + try: + res = oa.connect_to_active() + except: + self.oa_error_message = oa.error_message + return None + if res is None: + self.oa_error_message = oa.error_message + return None + if not oa.connected(): + self.oa_error_message = oa.error_message + return None + + # If no blade specified we're done we know this is an HP at this point + if not blade: + oa.close() + return "HP" + + check = "show server info %d" % blade + LOG.debug("Send command to OA: %s" % check) + output = oa.send_command("%s" % check) + oa.close() + + match = r"Product Name:\s+(.+)\Z" + if re.search(match, str(output[:])) is None: + self.oa_error_message = ("Blade %d in shelf %d does not exist\n" + % (blade, shelf)) + return None + + for line in output: + seobj = re.search(match, line) + if seobj: + return "HP %s" % seobj.group(1) + return False + + def power_off_blades(self, shelf, blade_list): + return self.set_state(shelf, 'locked', blade_list=blade_list) + + def power_on_blades(self, shelf, blade_list): + return self.set_state(shelf, 'unlocked', blade_list=blade_list) + + def set_boot_order_blades(self, shelf, blade_list): + return self.set_boot_order(shelf, blade_list=blade_list) + + def power_off_blade(self, shelf, blade): + return self.set_state(shelf, 'locked', one_blade=blade) + + def power_on_blade(self, shelf, blade): + return self.set_state(shelf, 'unlocked', one_blade=blade) + + def set_boot_order_blade(self, shelf, blade): + return self.set_boot_order(shelf, one_blade=blade) + + # Search HP's OA server info for MAC for left and right control + def find_mac(self, printout, shelf, blade): + left = False + right = False + for line in printout: + if ("No Server Blade Installed" in line or + "Invalid Arguments" in line): + raise self.NoInfoFoundError("Blade %d in shelf %d " + "does not exist." % (blade, shelf)) + seobj = re.search(r"LOM1:1-a\s+([0-9A-F:]+)", line, re.I) + if seobj: + left = seobj.group(1) + else: + seobj = re.search(r"LOM1:2-a\s+([0-9A-F:]+)", line, re.I) + if seobj: + right = seobj.group(1) + if left and right: + return left, right + raise self.NoInfoFoundError("Could not find MAC for blade %d " + "in shelf %d." % (blade, shelf)) + + # Do power on or off on all configured blades in shelf + # Return None to indicate that no connection do OA succeeded, + # Return False to indicate some connection to OA succeeded, + # or config error + # Return True to indicate that power state succesfully updated + # state: locked, unlocked + def set_state(self, shelf, state, one_blade=None, blade_list=None): + if state not in ['locked', 'unlocked']: + return None + + if one_blade: + LOG.debug("Entering: set_state_hp(%d,%s,%d)" % + (shelf, state, one_blade)) + else: + LOG.debug("Entering: set_state_hp(%d,%s)" % (shelf, state)) + + self.oa_error_message = '' + + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + + LOG.debug("Connect to active OA for shelf %d" % shelf) + + try: + res = oa.connect_to_active() + except: + self.oa_error_message = oa.error_message + return None + if res is None: + self.oa_error_message = oa.error_message + return None + if not oa.connected(): + self.oa_error_message = oa.error_message + return False + + if one_blade: + blades = [one_blade] + else: + blades = sorted(blade_list) + + LOG.debug("Check if blades are present") + + check = "show server list" + + LOG.debug("Send command to OA: %s" % check) + output = oa.send_command(check) + first = True + bladelist = '' + for blade in blades: + prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]", + re.MULTILINE) + if prog.search(str(output[:])) is not None: + oa.close() + self.oa_error_message = ("Blade %d in shelf %d " + % (blade, shelf)) + if one_blade: + self.oa_error_message += ("does not exist.\n" + "Set state %s not performed.\n" + % state) + else: + self.oa_error_message += ( + "specified but does not exist.\nSet " + "state %s not performed on shelf %d\n" + % (state, shelf)) + return False + if not first: + bladelist += "," + else: + first = False + bladelist += str(blade) + + if blade_list: + LOG.debug("All blades present") + + # Use leading upper case on On/Off so it can be reused in match + extra = "" + if state == "locked": + powerstate = "Off" + extra = "force" + else: + powerstate = "On" + + cmd = "power%s server %s" % (powerstate, bladelist) + + if extra != "": + cmd += " %s" % extra + + LOG.debug("Send command to OA: %s" % cmd) + + try: + oa.send_command(cmd) + except: + self.oa_error_message = oa.error_message + oa.close() + return False + + # Check that all blades reach the state which can take some time, + # so re-try a couple of times + LOG.debug("Check if state %s successfully set" % state) + recheck = 2 + while True: + LOG.debug("Send command to OA: %s" % check) + try: + output = oa.send_command(check) + except: + self.oa_error_message = oa.error_message + oa.close() + return False + for blade in blades: + match = (r"\s+" + str(blade) + + r"\s+\w+\s+\w+.\w+.\w+.\w+\s+\w+\s+%s" % + powerstate) + prog = re.compile(match, re.MULTILINE) + if prog.search(str(output[:])) is None: + recheck -= 1 + if recheck >= 0: + # Re-try + time.sleep(3) + break + oa.close() + self.oa_error_message = ( + "Could not set state %s on blade %d in shelf %d\n" + % (state, one_blade, shelf)) + for line in output: + self.oa_error_message += line + return False + else: + # state reached for all blades, exit the infinite loop + break + + if one_blade: + LOG.debug("State %s successfully set on blade %d in shelf %d" + % (state, one_blade, shelf)) + else: + LOG.debug("State %s successfully set on blades %s in shelf %d" + % (state, blade_list, shelf)) + oa.close() + return True + + # Change boot order on all blades in shelf + # Return None to indicate that no connection do OA succeeded, + # Return False to indicate some connection to OA succeeded, + # or config error, + # Return True to indicate that boot order succesfully changed + def set_boot_order(self, shelf, one_blade=None, blade_list=None): + + if one_blade: + LOG.debug("Entering: set_bootorder_hp(%d,%d)" % (shelf, one_blade)) + else: + LOG.debug("Entering: set_bootorder_hp(%d)" % shelf) + + self.oa_error_message = '' + + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + + LOG.debug("Connect to active OA for shelf %d" % shelf) + + try: + res = oa.connect_to_active() + except: + self.oa_error_message = oa.error_message + return None + if res is None: + self.oa_error_message = oa.error_message + return None + if not oa.connected(): + self.oa_error_message = oa.error_message + return False + + if one_blade: + blades = [one_blade] + else: + blades = sorted(blade_list) + + LOG.debug("Check if blades are present") + + check = "show server list" + + LOG.debug("Send command to OA: %s" % check) + + output = oa.send_command(check) + first = True + bladelist = '' + for blade in blades: + prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]", + re.MULTILINE) + if prog.search(str(output[:])) is not None: + oa.close() + self.oa_error_message = ("Blade %d in shelf %d " + % (blade, shelf)) + if one_blade: + self.oa_error_message += ( + "does not exist.\nChange boot order not performed.\n") + else: + self.oa_error_message += ( + "specified but does not exist.\n" + "Change boot order not performed on shelf %d\n" + % shelf) + return False + if not first: + bladelist += ',' + else: + first = False + bladelist += str(blade) + + if blade_list: + LOG.debug("All blades present") + + # Boot origins are pushed so first set boot from hard disk, then PXE + # NB! If we want to support boot from SD we must add USB to the "stack" + cmd1 = "set server boot first hdd %s" % bladelist + cmd2 = "set server boot first pxe %s" % bladelist + for cmd in [cmd1, cmd2]: + + LOG.debug("Send command to OA: %s" % cmd) + try: + output = oa.send_command(cmd) + except: + self.oa_error_message = oa.error_message + for line in output: + self.oa_error_message += line + oa.close() + return False + + # Check that all blades got the correct boot order + # Needs updating if USB is added + LOG.debug("Check if boot order successfully set") + match = (r"^.*Boot Order\):\',\s*\'(\\t)+PXE NIC 1\',\s*\'(\\t)" + r"+Hard Drive") + prog = re.compile(match) + for blade in blades: + + check = "show server boot %d" % blade + + LOG.debug("Send command to OA: %s" % check) + try: + output = oa.send_command(check) + except: + self.oa_error_message = oa.error_message + oa.close() + return False + if prog.search(str(output[:])) is None: + oa.close() + self.oa_error_message = ("Failed to set boot order on blade " + "%d in shelf %d\n" % (blade, shelf)) + for line in output: + self.oa_error_message += line + return False + LOG.debug("Boot order successfully set on blade %d in shelf %d" + % (blade, shelf)) + + if blade_list: + LOG.debug("Boot order successfully set on all configured blades " + "in shelf %d" % (shelf)) + oa.close() + return True diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py new file mode 100644 index 0000000..36fac77 --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py @@ -0,0 +1,110 @@ +import socket +import paramiko + +from cloud import common + +LOG = common.LOG + +class RunOACommand: + + def __init__(self, mgmt_ip, username, password): + self.ssh = None + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + self.error_message = "" + + def connected(self): + return self.ssh is not None + + def close(self): + if self.connected(): + self.ssh.close() + self.ssh = None + self.error_message = "" + + def connect(self): + LOG.info("Trying to connect to OA at %s" % self.mgmt_ip) + try: + self.ssh.connect(self.mgmt_ip, + username=self.username, + password=self.password, + look_for_keys=False, + allow_agent=False) + return True + except socket.error, (err, message): + self.error_message += ("Can not talk to OA %s: %s\n" % + (self.mgmt_ip, message)) + except Exception as e: + self.error_message += ("Can not talk to OA %s: %s\n" % + (self.mgmt_ip, e.args)) + LOG.error("Failed to connect to OA at %s" % self.mgmt_ip) + return False + + # Return None if this most likely is not an OA + # False if we failed to connect to an active OA + # True if connected + def connect_to_active(self): + self.error_message = "OA connect failed with these errors:\n" + + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + initial_mgmt_ip = self.mgmt_ip + if not self.connect(self.mgmt_ip, self.username, self.password): + octets = self.mgmt_ip.split(".") + self.mgmt_ip = "%s.%s.%s.%s" % (octets[0], + octets[1], + octets[2], + str(int(octets[3]) + 1)) + if not self.connect(self.mgmt_ip, self.username, self.password): + self.ssh = None + LOG.error("Failed to connect to OA at %s (and %s)" % + (initial_mgmt_ip, self.mgmt_ip)) + return None + + output = self.send_command("show oa status") + for line in output: + if "Standby" in line: + self.ssh.close() + self.error_message += ( + "%s is the standby OA, trying next OA\n" % self.mgmt_ip) + LOG.info("%s is the standby OA" % self.mgmt_ip) + if self.mgmt_ip != initial_mgmt_ip: + self.error_message += ( + "Can only talk to OA %s which is the standby OA\n" % + self.mgmt_ip) + self.ssh = None + return False + else: + octets = self.mgmt_ip.split(".") + self.mgmt_ip = "%s.%s.%s.%s" % (octets[0], + octets[1], + octets[2], + str(int(octets[3]) + 1)) + if not self.connect(self.mgmt_ip, self.username, + self.password): + self.ssh = None + return False + LOG.info("Connected to active OA at %s" % self.mgmt_ip) + self.error_message = "" + return True + + def send_command(self, cmd): + if not self.connected(): + self.error_message = ( + "Not connected, cannot send command %s\n" % (cmd)) + raise + + LOG.info('Sending "%s" to %s' % (cmd, self.mgmt_ip)) + stdin, stdout, stderr = self.ssh.exec_command(cmd) + output = [] + for line in stdout.read().splitlines(): + if line != '': + output.append(line) + return output + + def __exit__(self, type, value, traceback): + if self.connected(): + self.close() + self.ssh = None
\ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py new file mode 100644 index 0000000..d332e59 --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py @@ -0,0 +1,153 @@ +from lxml import etree +from cloud import common +from ssh_client import SSHClient + +exec_cmd = common.exec_cmd +err = common.err +LOG = common.LOG + + +class LibvirtAdapter(object): + + def __init__(self, mgmt_ip, username, password): + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + self.parser = etree.XMLParser(remove_blank_text=True) + + def power_off_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power off blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh destroy %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def power_on_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power on blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh start %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): + if not boot_dev_list: + boot_dev_list = ['network', 'hd'] + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + for blade in blade_list: + LOG.debug('Set boot order %s on blade %s in shelf %s' + % (boot_dev_list, blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp, self.parser) + os = xml_dump.xpath('/domain/os') + for o in os: + for bootelem in ['boot', 'bootmenu']: + boot = o.xpath(bootelem) + for b in boot: + b.getparent().remove(b) + for dev in boot_dev_list: + b = etree.Element('boot') + b.set('dev', dev) + o.append(b) + bmenu = etree.Element('bootmenu') + bmenu.set('enable', 'no') + o.append(bmenu) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def get_blades_mac_addresses(self, shelf, blade_list): + LOG.debug('Get the MAC addresses of blades %s in shelf %s' + % (blade_list, shelf)) + macs_per_blade_dict = {} + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + vm_name = 's%s_b%s' % (shelf, blade) + mac_list = macs_per_blade_dict[blade] = [] + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + interfaces = xml_dump.xpath('/domain/devices/interface') + for interface in interfaces: + macs = interface.xpath('mac') + for mac in macs: + mac_list.append(mac.get('address')) + ssh.close() + return macs_per_blade_dict + + def load_image_file(self, shelf=None, blade=None, vm=None, + image_path=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Load media file %s into %s ' + % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade))) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'file') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + source = etree.SubElement(disk, 'source') + source.set('file', image_path) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def eject_image_file(self, shelf=None, blade=None, vm=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Eject media file from %s ' + % 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade)) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'block') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() |