From 4301e4cb3bd6f62caec575d30e8588b72ac626c7 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 2 Aug 2018 23:49:00 -0400 Subject: Adds deployment via snapshot New arguments are added to allow snapshot deployment: --snapshot, --snap-cache The previous tripleo-quickstart code has been removed/replaced with the snapshot option. Snapshot deployments are supported on CentOS and Fedora, and snapshot artifacts use a similar caching system as the standard deployment. Snapshots are produced daily by Apex, and include latest as well as n-1 OpenStack versions. The os-odl-nofeature scenario is used for the snapshots. Additionally multiple topology verions of Snapshots are available. The Snapshot pulled at deploy time depends on the deploy-settings and number of virtual-computes used at deploy time. Since there is only one network used with snapshot deployments (admin), there is no reason to pass in network settings for snapshot deployments. That argument is now optional. Previously we required even in Standard virtual deployments that the network settings be provided. However that is also unnecessary, as we can default to the virtual network settings. Includes minor fix to the tox.ini to allow specifying test cases to run (useful for developers writing tests). Default behavior of tox is unchanged. JIRA: APEX-548 Change-Id: I1e08c4e54eac5aae99921f61ab7f69693ed12b47 Signed-off-by: Tim Rozet --- .gitignore | 1 + apex/common/constants.py | 3 +- apex/common/exceptions.py | 12 + apex/common/utils.py | 35 ++- apex/deploy.py | 159 +++++------ apex/deployment/snapshot.py | 241 +++++++++++++++++ apex/overcloud/node.py | 147 ++++++++++ apex/tests/config/admin.xml | 7 + apex/tests/config/baremetal0.xml | 73 +++++ apex/tests/config/node.yaml | 12 + apex/tests/config/snapshot.properties | 2 + apex/tests/test_apex_common_utils.py | 2 +- apex/tests/test_apex_deploy.py | 33 ++- apex/tests/test_apex_deployment_snapshot.py | 374 ++++++++++++++++++++++++++ apex/tests/test_apex_overcloud_node.py | 191 +++++++++++++ docs/release/installation/virtual.rst | 79 +++++- lib/ansible/playbooks/deploy_dependencies.yml | 27 ++ tox.ini | 2 +- 18 files changed, 1304 insertions(+), 96 deletions(-) create mode 100644 apex/deployment/snapshot.py create mode 100644 apex/overcloud/node.py create mode 100644 apex/tests/config/admin.xml create mode 100644 apex/tests/config/baremetal0.xml create mode 100644 apex/tests/config/node.yaml create mode 100644 apex/tests/config/snapshot.properties create mode 100644 apex/tests/test_apex_deployment_snapshot.py create mode 100644 apex/tests/test_apex_overcloud_node.py diff --git a/.gitignore b/.gitignore index 2789a249..7bb5fbcb 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ /docs_build/ /docs_output/ /releng/ +venv/ apex.egg-info/ coverage.xml nosetests.xml diff --git a/apex/common/constants.py b/apex/common/constants.py index 0d71e6e3..8c65e68b 100644 --- a/apex/common/constants.py +++ b/apex/common/constants.py @@ -74,6 +74,5 @@ OPNFV_ARTIFACTS = 'http://artifacts.opnfv.org' CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \ 'rpm'.format(OPNFV_ARTIFACTS) -QUAGGA_URL = "http://artifacts.opnfv.org/sdnvpn/quagga/quagga-4.tar.gz" - OVS_URL = "http://openvswitch.org/releases/openvswitch-2.9.2.tar.gz" +QUAGGA_URL = "{}/sdnvpn/quagga/quagga-4.tar.gz".format(OPNFV_ARTIFACTS) diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py index a4d390a4..6d8383b8 100644 --- a/apex/common/exceptions.py +++ b/apex/common/exceptions.py @@ -22,3 +22,15 @@ class ApexCleanException(Exception): class ApexBuildException(Exception): pass + + +class SnapshotDeployException(Exception): + pass + + +class OvercloudNodeException(Exception): + pass + + +class FetchException(Exception): + pass diff --git a/apex/common/utils.py b/apex/common/utils.py index 464aaf28..aae821ef 100644 --- a/apex/common/utils.py +++ b/apex/common/utils.py @@ -218,7 +218,7 @@ def fetch_upstream_and_unpack(dest, url, targets, fetch=True): if download_target: urllib.request.urlretrieve(target_url, filename=target_dest) logging.info("Target downloaded: {}".format(target)) - if target.endswith('.tar'): + if target.endswith(('.tar', 'tar.gz', 'tgz')): logging.info('Unpacking tar file') tar = tarfile.open(target_dest) tar.extractall(path=dest) @@ -255,9 +255,9 @@ def open_webpage(url, timeout=5): try: response = urllib.request.urlopen(url, timeout=timeout) return response.read() - except (urllib.request.URLError, socket.timeout): + except (urllib.request.URLError, socket.timeout) as e: logging.error("Unable to open URL: {}".format(url)) - raise + raise exc.FetchException('Unable to open URL') from e def edit_tht_env(env_file, section, settings): @@ -281,3 +281,32 @@ def unique(tmp_list): if x not in uniq_list: uniq_list.append(x) return uniq_list + + +def bash_settings_to_dict(data): + """ + Parses bash settings x=y and returns dict of key, values + :param data: bash settings data in x=y format + :return: dict of keys and values + """ + return dict(item.split('=') for item in data.splitlines()) + + +def fetch_properties(url): + """ + Downloads OPNFV properties and returns a dictionary of the key, values + :param url: URL of properties file + :return: dict of k,v for each properties + """ + if bool(urllib.parse.urlparse(url).scheme): + logging.debug('Fetching properties from internet: {}'.format(url)) + return bash_settings_to_dict(open_webpage(url).decode('utf-8')) + elif os.path.isfile(url): + logging.debug('Fetching properties from file: {}'.format(url)) + with open(url, 'r') as fh: + data = fh.read() + return bash_settings_to_dict(data) + else: + logging.warning('Unable to fetch properties for: {}'.format(url)) + raise exc.FetchException('Unable determine properties location: ' + '{}'.format(url)) diff --git a/apex/deploy.py b/apex/deploy.py index 9510de9d..531c9bfa 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -30,6 +30,7 @@ from apex import DeploySettings from apex import Inventory from apex import NetworkEnvironment from apex import NetworkSettings +from apex.deployment.snapshot import SnapshotDeployment from apex.common import utils from apex.common import constants from apex.common import parsers @@ -45,11 +46,6 @@ APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp') SDN_IMAGE = 'overcloud-full-opendaylight.qcow2' -def deploy_quickstart(args, deploy_settings_file, network_settings_file, - inventory_file=None): - pass - - def validate_cross_settings(deploy_settings, net_settings, inventory): """ Used to validate compatibility across settings file. @@ -115,7 +111,7 @@ def create_deploy_parser(): help='File which contains Apex deploy settings') deploy_parser.add_argument('-n', '--network-settings', dest='network_settings_file', - required=True, + required=False, help='File which contains Apex network ' 'settings') deploy_parser.add_argument('-i', '--inventory-file', @@ -176,9 +172,14 @@ def create_deploy_parser(): default='/usr/share/opnfv-apex', help='Directory path for apex ansible ' 'and third party libs') - deploy_parser.add_argument('--quickstart', action='store_true', + deploy_parser.add_argument('-s', '--snapshot', action='store_true', default=False, - help='Use tripleo-quickstart to deploy') + help='Use snapshots for deployment') + deploy_parser.add_argument('--snap-cache', dest='snap_cache', + default="{}/snap_cache".format( + os.path.expanduser('~')), + help='Local directory to cache snapshot ' + 'artifacts. Defaults to $HOME/snap_cache') deploy_parser.add_argument('--upstream', action='store_true', default=True, help='Force deployment to use upstream ' @@ -205,20 +206,25 @@ def validate_deploy_args(args): """ logging.debug('Validating arguments for deployment') - if args.virtual and args.inventory_file is not None: + if args.snapshot: + logging.debug('Skipping inventory validation as it is not applicable' + 'to snapshot deployments') + elif args.virtual and args.inventory_file is not None: logging.error("Virtual enabled but inventory file also given") raise ApexDeployException('You should not specify an inventory file ' 'with virtual deployments') elif args.virtual: args.inventory_file = os.path.join(APEX_TEMP_DIR, 'inventory-virt.yaml') - elif os.path.isfile(args.inventory_file) is False: + elif not os.path.isfile(args.inventory_file): logging.error("Specified inventory file does not exist: {}".format( args.inventory_file)) raise ApexDeployException('Specified inventory file does not exist') for settings_file in (args.deploy_settings_file, args.network_settings_file): + if settings_file == args.network_settings_file and args.snapshot: + continue if os.path.isfile(settings_file) is False: logging.error("Specified settings file does not " "exist: {}".format(settings_file)) @@ -253,77 +259,80 @@ def main(): deploy_settings = DeploySettings(args.deploy_settings_file) logging.info("Deploy settings are:\n {}".format(pprint.pformat( deploy_settings))) - net_settings = NetworkSettings(args.network_settings_file) - logging.info("Network settings are:\n {}".format(pprint.pformat( - net_settings))) - os_version = deploy_settings['deploy_options']['os_version'] - net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE) - net_env = NetworkEnvironment(net_settings, net_env_file, - os_version=os_version) - net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE) - utils.dump_yaml(dict(net_env), net_env_target) - - # get global deploy params - ha_enabled = deploy_settings['global_params']['ha_enabled'] - introspect = deploy_settings['global_params'].get('introspect', True) - - if args.virtual: - if args.virt_compute_ram is None: - compute_ram = args.virt_default_ram - else: - compute_ram = args.virt_compute_ram - if deploy_settings['deploy_options']['sdn_controller'] == \ - 'opendaylight' and args.virt_default_ram < 12: - control_ram = 12 - logging.warning('RAM per controller is too low. OpenDaylight ' - 'requires at least 12GB per controller.') - logging.info('Increasing RAM per controller to 12GB') - elif args.virt_default_ram < 10: - control_ram = 10 - logging.warning('RAM per controller is too low. nosdn ' - 'requires at least 10GB per controller.') - logging.info('Increasing RAM per controller to 10GB') - else: - control_ram = args.virt_default_ram - if ha_enabled and args.virt_compute_nodes < 2: - logging.debug('HA enabled, bumping number of compute nodes to 2') - args.virt_compute_nodes = 2 - virt_utils.generate_inventory(args.inventory_file, ha_enabled, - num_computes=args.virt_compute_nodes, - controller_ram=control_ram * 1024, - compute_ram=compute_ram * 1024, - vcpus=args.virt_cpus - ) - inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) - logging.info("Inventory is:\n {}".format(pprint.pformat( - inventory))) - - validate_cross_settings(deploy_settings, net_settings, inventory) + + if not args.snapshot: + net_settings = NetworkSettings(args.network_settings_file) + logging.info("Network settings are:\n {}".format(pprint.pformat( + net_settings))) + os_version = deploy_settings['deploy_options']['os_version'] + net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE) + net_env = NetworkEnvironment(net_settings, net_env_file, + os_version=os_version) + net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE) + utils.dump_yaml(dict(net_env), net_env_target) + + # get global deploy params + ha_enabled = deploy_settings['global_params']['ha_enabled'] + introspect = deploy_settings['global_params'].get('introspect', True) + net_list = net_settings.enabled_network_list + if args.virtual: + if args.virt_compute_ram is None: + compute_ram = args.virt_default_ram + else: + compute_ram = args.virt_compute_ram + if (deploy_settings['deploy_options']['sdn_controller'] == + 'opendaylight' and args.virt_default_ram < 12): + control_ram = 12 + logging.warning('RAM per controller is too low. OpenDaylight ' + 'requires at least 12GB per controller.') + logging.info('Increasing RAM per controller to 12GB') + elif args.virt_default_ram < 10: + control_ram = 10 + logging.warning('RAM per controller is too low. nosdn ' + 'requires at least 10GB per controller.') + logging.info('Increasing RAM per controller to 10GB') + else: + control_ram = args.virt_default_ram + if ha_enabled and args.virt_compute_nodes < 2: + logging.debug( + 'HA enabled, bumping number of compute nodes to 2') + args.virt_compute_nodes = 2 + virt_utils.generate_inventory(args.inventory_file, ha_enabled, + num_computes=args.virt_compute_nodes, + controller_ram=control_ram * 1024, + compute_ram=compute_ram * 1024, + vcpus=args.virt_cpus + ) + inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) + logging.info("Inventory is:\n {}".format(pprint.pformat( + inventory))) + + validate_cross_settings(deploy_settings, net_settings, inventory) + else: + # only one network with snapshots + net_list = [constants.ADMIN_NETWORK] + ds_opts = deploy_settings['deploy_options'] - if args.quickstart: - deploy_settings_file = os.path.join(APEX_TEMP_DIR, - 'apex_deploy_settings.yaml') - utils.dump_yaml(utils.dict_objects_to_str(deploy_settings), - deploy_settings_file) - logging.info("File created: {}".format(deploy_settings_file)) - network_settings_file = os.path.join(APEX_TEMP_DIR, - 'apex_network_settings.yaml') - utils.dump_yaml(utils.dict_objects_to_str(net_settings), - network_settings_file) - logging.info("File created: {}".format(network_settings_file)) - deploy_quickstart(args, deploy_settings_file, network_settings_file, - args.inventory_file) + ansible_args = { + 'virsh_enabled_networks': net_list, + 'snapshot': args.snapshot + } + utils.run_ansible(ansible_args, + os.path.join(args.lib_dir, constants.ANSIBLE_PATH, + 'deploy_dependencies.yml')) + if args.snapshot: + # Start snapshot Deployment + logging.info('Executing Snapshot Deployment...') + SnapshotDeployment(deploy_settings=deploy_settings, + snap_cache_dir=args.snap_cache, + fetch=not args.no_fetch, + all_in_one=not bool(args.virt_compute_nodes)) else: + # Start Standard TripleO Deployment deployment = ApexDeployment(deploy_settings, args.patches_file, args.deploy_settings_file) # TODO (trozet): add logic back from: # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR) - ansible_args = { - 'virsh_enabled_networks': net_settings.enabled_network_list - } - utils.run_ansible(ansible_args, - os.path.join(args.lib_dir, constants.ANSIBLE_PATH, - 'deploy_dependencies.yml')) uc_external = False if 'external' in net_settings.enabled_network_list: uc_external = True diff --git a/apex/deployment/snapshot.py b/apex/deployment/snapshot.py new file mode 100644 index 00000000..b33907fb --- /dev/null +++ b/apex/deployment/snapshot.py @@ -0,0 +1,241 @@ +############################################################################## +# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import fnmatch +import logging +import os +import pprint +import socket +import time + +import libvirt + +import apex.common.constants as con +from apex.common import exceptions as exc +from apex.common import utils +from apex.overcloud.node import OvercloudNode +import apex.settings.deploy_settings as ds + + +SNAP_FILE = 'snapshot.properties' +CHECKSUM = 'OPNFV_SNAP_SHA512SUM' +OVERCLOUD_RC = 'overcloudrc' +SSH_KEY = 'id_rsa' +OPENSTACK = 'openstack' +OPENDAYLIGHT = 'opendaylight' +SERVICES = (OPENSTACK, OPENDAYLIGHT) + + +class SnapshotDeployment: + def __init__(self, deploy_settings, snap_cache_dir, fetch=True, + all_in_one=False): + self.id_rsa = None + self.fetch = fetch + ds_opts = deploy_settings['deploy_options'] + self.os_version = ds_opts['os_version'] + self.ha_enabled = deploy_settings['global_params']['ha_enabled'] + if self.ha_enabled: + self.ha_ext = 'ha' + elif all_in_one: + self.ha_ext = 'noha-allinone' + else: + self.ha_ext = 'noha' + self.snap_cache_dir = os.path.join(snap_cache_dir, + "{}/{}".format(self.os_version, + self.ha_ext)) + self.networks = [] + self.oc_nodes = [] + self.properties_url = "{}/apex/{}/{}".format(con.OPNFV_ARTIFACTS, + self.os_version, + self.ha_ext) + self.conn = libvirt.open('qemu:///system') + if not self.conn: + raise exc.SnapshotDeployException( + 'Unable to open libvirt connection') + if self.fetch: + self.pull_snapshot(self.properties_url, self.snap_cache_dir) + else: + logging.info('No fetch enabled. Will not attempt to pull latest ' + 'snapshot') + self.deploy_snapshot() + + @staticmethod + def pull_snapshot(url_path, snap_cache_dir): + """ + Compare opnfv properties file and download and unpack snapshot if + necessary + :param url_path: path of latest snap info + :param snap_cache_dir: local directory for snap cache + :return: None + """ + full_url = os.path.join(url_path, SNAP_FILE) + upstream_props = utils.fetch_properties(full_url) + logging.debug("Upstream properties are: {}".format(upstream_props)) + try: + upstream_sha = upstream_props[CHECKSUM] + except KeyError: + logging.error('Unable to find {} for upstream properties: ' + '{}'.format(CHECKSUM, upstream_props)) + raise exc.SnapshotDeployException('Unable to find upstream ' + 'properties checksum value') + local_prop_file = os.path.join(snap_cache_dir, SNAP_FILE) + try: + local_props = utils.fetch_properties(local_prop_file) + local_sha = local_props[CHECKSUM] + pull_snap = local_sha != upstream_sha + except (exc.FetchException, KeyError): + logging.info("No locally cached properties found, will pull " + "latest") + local_sha = None + pull_snap = True + logging.debug('Local sha: {}, Upstream sha: {}'.format(local_sha, + upstream_sha)) + if pull_snap: + logging.info('SHA mismatch, will download latest snapshot') + full_snap_url = upstream_props['OPNFV_SNAP_URL'] + snap_file = os.path.basename(full_snap_url) + snap_url = full_snap_url.replace(snap_file, '') + if not snap_url.startswith('http://'): + snap_url = 'http://' + snap_url + utils.fetch_upstream_and_unpack(dest=snap_cache_dir, + url=snap_url, + targets=[SNAP_FILE, snap_file] + ) + else: + logging.info('SHA match, artifacts in cache are already latest. ' + 'Will not download.') + + def create_networks(self): + logging.info("Detecting snapshot networks") + try: + xmls = fnmatch.filter(os.listdir(self.snap_cache_dir), '*.xml') + except FileNotFoundError: + raise exc.SnapshotDeployException( + 'No XML files found in snap cache directory: {}'.format( + self.snap_cache_dir)) + net_xmls = list() + for xml in xmls: + if xml.startswith('baremetal'): + continue + net_xmls.append(os.path.join(self.snap_cache_dir, xml)) + if not net_xmls: + raise exc.SnapshotDeployException( + 'No network XML files detected in snap cache, ' + 'please check local snap cache contents') + logging.info('Snapshot networks found: {}'.format(net_xmls)) + for xml in net_xmls: + logging.debug('Creating network from {}'.format(xml)) + with open(xml, 'r') as fh: + net_xml = fh.read() + net = self.conn.networkCreateXML(net_xml) + self.networks.append(net) + logging.info('Network started: {}'.format(net.name())) + + def parse_and_create_nodes(self): + """ + Parse snapshot node.yaml config file and create overcloud nodes + :return: None + """ + node_file = os.path.join(self.snap_cache_dir, 'node.yaml') + if not os.path.isfile(node_file): + raise exc.SnapshotDeployException('Missing node definitions from ' + ''.format(node_file)) + node_data = utils.parse_yaml(node_file) + if 'servers' not in node_data: + raise exc.SnapshotDeployException('Invalid node.yaml format') + for node, data in node_data['servers'].items(): + logging.info('Creating node: {}'.format(node)) + logging.debug('Node data is:\n{}'.format(pprint.pformat(data))) + node_xml = os.path.join(self.snap_cache_dir, + '{}.xml'.format(data['vNode-name'])) + node_qcow = os.path.join(self.snap_cache_dir, + '{}.qcow2'.format(data['vNode-name'])) + self.oc_nodes.append( + OvercloudNode(ip=data['address'], + ovs_ctrlrs=data['ovs-controller'], + ovs_mgrs=data['ovs-managers'], + role=data['type'], + name=node, + node_xml=node_xml, + disk_img=node_qcow) + ) + logging.info('Node Created') + logging.info('Starting nodes') + for node in self.oc_nodes: + node.start() + + def get_controllers(self): + controllers = [] + for node in self.oc_nodes: + if node.role == 'controller': + controllers.append(node) + return controllers + + def is_service_up(self, service): + assert service in SERVICES + if service == OPENSTACK: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(5) + controllers = self.get_controllers() + if not controllers: + raise exc.SnapshotDeployException('No OpenStack controllers found') + + for node in controllers: + logging.info('Waiting until {} is up on controller: ' + '{}'.format(service, node.name)) + for x in range(10): + logging.debug('Checking {} is up attempt {}'.format(service, + str(x + 1))) + if service == OPENSTACK: + # Check if Neutron is up + if sock.connect_ex((node.ip, 9696)) == 0: + logging.info('{} is up on controller {}'.format( + service, node.name)) + break + elif service == OPENDAYLIGHT: + url = 'http://{}:8081/diagstatus'.format(node.ip) + try: + utils.open_webpage(url) + logging.info('{} is up on controller {}'.format( + service, node.name)) + break + except Exception as e: + logging.debug('Cannot contact ODL. Reason: ' + '{}'.format(e)) + time.sleep(60) + else: + logging.error('{} is not running after 10 attempts'.format( + service)) + return False + return True + + def deploy_snapshot(self): + # bring up networks + self.create_networks() + # check overcloudrc exists, id_rsa + for snap_file in (OVERCLOUD_RC, SSH_KEY): + if not os.path.isfile(os.path.join(self.snap_cache_dir, + snap_file)): + logging.warning('File is missing form snap cache: ' + '{}'.format(snap_file)) + # create nodes + self.parse_and_create_nodes() + # validate deployment + if self.is_service_up(OPENSTACK): + logging.info('OpenStack is up') + else: + raise exc.SnapshotDeployException('OpenStack is not alive') + if self.is_service_up(OPENDAYLIGHT): + logging.info('OpenDaylight is up') + else: + raise exc.SnapshotDeployException( + 'OpenDaylight {} is not reporting diag status') + # TODO(trozet): recreate external network/subnet if missing + logging.info('Snapshot deployment complete. Please use the {} file ' + 'in {} to interact with ' + 'OpenStack'.format(OVERCLOUD_RC, self.snap_cache_dir)) diff --git a/apex/overcloud/node.py b/apex/overcloud/node.py new file mode 100644 index 00000000..622d1fd1 --- /dev/null +++ b/apex/overcloud/node.py @@ -0,0 +1,147 @@ +############################################################################## +# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import logging +import os +import shutil +import xml.etree.ElementTree as ET + +import distro +import libvirt + +from apex.common.exceptions import OvercloudNodeException + + +class OvercloudNode: + """ + Overcloud server + """ + def __init__(self, role, ip, ovs_ctrlrs, ovs_mgrs, name, node_xml, + disk_img): + self.role = role + self.ip = ip + self.ovs_ctrlrs = ovs_ctrlrs + self.ovs_mgrs = ovs_mgrs + self.name = name + self.node_xml_file = node_xml + self.node_xml = None + self.vm = None + self.disk_img = None + if not os.path.isfile(self.node_xml_file): + raise OvercloudNodeException('XML definition file not found: ' + '{}'.format(self.node_xml_file)) + if not os.path.isfile(disk_img): + raise OvercloudNodeException('Disk image file not found: ' + '{}'.format(disk_img)) + self.conn = libvirt.open('qemu:///system') + if not self.conn: + raise OvercloudNodeException('Unable to open libvirt connection') + + self.create(src_disk=disk_img) + + def _configure_disk(self, disk): + # find default storage pool path + pool = self.conn.storagePoolLookupByName('default') + if pool is None: + raise OvercloudNodeException('Cannot find default storage pool') + pool_xml = pool.XMLDesc() + logging.debug('Default storage pool xml: {}'.format(pool_xml)) + etree = ET.fromstring(pool_xml) + try: + path = etree.find('target').find('path').text + logging.info('System libvirt default pool path: {}'.format(path)) + except AttributeError as e: + logging.error('Failure to find libvirt storage path: {}'.format( + e)) + raise OvercloudNodeException('Cannot find default storage path') + # copy disk to system path + self.disk_img = os.path.join(path, os.path.basename(disk)) + logging.info('Copying disk image to: {}. This may take some ' + 'time...'.format(self.disk_img)) + shutil.copyfile(disk, self.disk_img) + + @staticmethod + def _update_xml(xml, disk_path=None): + """ + Updates a libvirt XML file for the current architecture and OS of this + machine + :param xml: XML string of Libvirt domain definition + :param disk_path: Optional file path to update for the backing disk + image + :return: Updated XML + """ + logging.debug('Parsing xml') + try: + etree = ET.fromstring(xml) + except ET.ParseError: + logging.error('Unable to parse node XML: {}'.format(xml)) + raise OvercloudNodeException('Unable to parse node XML') + + try: + type_element = etree.find('os').find('type') + if 'machine' in type_element.keys(): + type_element.set('machine', 'pc') + logging.debug('XML updated with machine "pc"') + except AttributeError: + logging.warning('Failure to set XML machine type') + + # qemu-kvm path may differ per system, need to detect it and update xml + linux_ver = distro.linux_distribution()[0] + if linux_ver == 'Fedora': + qemu_path = '/usr/bin/qemu-kvm' + else: + qemu_path = '/usr/libexec/qemu-kvm' + + try: + etree.find('devices').find('emulator').text = qemu_path + logging.debug('XML updated with emulator location: ' + '{}'.format(qemu_path)) + xml = ET.tostring(etree).decode('utf-8') + except AttributeError: + logging.warning('Failure to update XML qemu path') + + if disk_path: + try: + disk_element = etree.find('devices').find('disk').find( + 'source') + disk_element.set('file', disk_path) + logging.debug('XML updated with file path: {}'.format( + disk_path)) + except AttributeError: + logging.error('Failure to parse XML and set disk type') + raise OvercloudNodeException( + 'Unable to set new disk path in xml {}'.format(xml)) + + return ET.tostring(etree).decode('utf-8') + + def create(self, src_disk): + # copy disk to pool and get new disk location + logging.debug('Preparing disk image') + self._configure_disk(src_disk) + logging.debug('Parsing node XML from {}'.format(self.node_xml_file)) + with open(self.node_xml_file, 'r') as fh: + self.node_xml = fh.read() + # if machine is not pc we need to set, also need to update qemu-kvm and + # storage location + self.node_xml = self._update_xml(self.node_xml, self.disk_img) + logging.info('Creating node {} in libvirt'.format(self.name)) + self.vm = self.conn.defineXML(self.node_xml) + + def start(self): + """ + Boot node in libvirt + :return: + """ + try: + self.vm.create() + logging.info('Node {} started'.format(self.name)) + except libvirt.libvirtError as e: + logging.error('Failed to start domain: {}'.format(self.name)) + raise OvercloudNodeException('Failed to start VM. Reason: ' + '{}'.format(e)) diff --git a/apex/tests/config/admin.xml b/apex/tests/config/admin.xml new file mode 100644 index 00000000..69b15b1f --- /dev/null +++ b/apex/tests/config/admin.xml @@ -0,0 +1,7 @@ + + admin + 761c34f8-2a72-4205-8e69-5ed6626c6efa + + + + diff --git a/apex/tests/config/baremetal0.xml b/apex/tests/config/baremetal0.xml new file mode 100644 index 00000000..4ff8f65a --- /dev/null +++ b/apex/tests/config/baremetal0.xml @@ -0,0 +1,73 @@ + + baremetal0 + 25bf15b6-130c-4bca-87af-e5cbc14bb454 + 12582912 + 12582912 + 4 + + /machine + + + hvm + + + + + + + + + + + destroy + restart + restart + + /usr/libexec/qemu-kvm + + + + +
+ + +
+ + +
+ + + +
+ + + + + + + + +
+ + + + + + + + + + + + +