summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Rozet <trozet@redhat.com>2018-08-02 23:49:00 -0400
committerTim Rozet <trozet@redhat.com>2018-08-23 18:01:16 -0400
commit4301e4cb3bd6f62caec575d30e8588b72ac626c7 (patch)
tree31f6ca88598c12d45f578a6a25b5c3b86c7d5dad
parentdc83fb1667a1a65ad333a3aab1c2843601180b23 (diff)
Adds deployment via snapshot
New arguments are added to allow snapshot deployment: --snapshot, --snap-cache The previous tripleo-quickstart code has been removed/replaced with the snapshot option. Snapshot deployments are supported on CentOS and Fedora, and snapshot artifacts use a similar caching system as the standard deployment. Snapshots are produced daily by Apex, and include latest as well as n-1 OpenStack versions. The os-odl-nofeature scenario is used for the snapshots. Additionally multiple topology verions of Snapshots are available. The Snapshot pulled at deploy time depends on the deploy-settings and number of virtual-computes used at deploy time. Since there is only one network used with snapshot deployments (admin), there is no reason to pass in network settings for snapshot deployments. That argument is now optional. Previously we required even in Standard virtual deployments that the network settings be provided. However that is also unnecessary, as we can default to the virtual network settings. Includes minor fix to the tox.ini to allow specifying test cases to run (useful for developers writing tests). Default behavior of tox is unchanged. JIRA: APEX-548 Change-Id: I1e08c4e54eac5aae99921f61ab7f69693ed12b47 Signed-off-by: Tim Rozet <trozet@redhat.com>
-rw-r--r--.gitignore1
-rw-r--r--apex/common/constants.py3
-rw-r--r--apex/common/exceptions.py12
-rw-r--r--apex/common/utils.py35
-rw-r--r--apex/deploy.py159
-rw-r--r--apex/deployment/snapshot.py241
-rw-r--r--apex/overcloud/node.py147
-rw-r--r--apex/tests/config/admin.xml7
-rw-r--r--apex/tests/config/baremetal0.xml73
-rw-r--r--apex/tests/config/node.yaml12
-rw-r--r--apex/tests/config/snapshot.properties2
-rw-r--r--apex/tests/test_apex_common_utils.py2
-rw-r--r--apex/tests/test_apex_deploy.py33
-rw-r--r--apex/tests/test_apex_deployment_snapshot.py374
-rw-r--r--apex/tests/test_apex_overcloud_node.py191
-rw-r--r--docs/release/installation/virtual.rst79
-rw-r--r--lib/ansible/playbooks/deploy_dependencies.yml27
-rw-r--r--tox.ini2
18 files changed, 1304 insertions, 96 deletions
diff --git a/.gitignore b/.gitignore
index 2789a249..7bb5fbcb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
/docs_build/
/docs_output/
/releng/
+venv/
apex.egg-info/
coverage.xml
nosetests.xml
diff --git a/apex/common/constants.py b/apex/common/constants.py
index 0d71e6e3..8c65e68b 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -74,6 +74,5 @@ OPNFV_ARTIFACTS = 'http://artifacts.opnfv.org'
CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \
'rpm'.format(OPNFV_ARTIFACTS)
-QUAGGA_URL = "http://artifacts.opnfv.org/sdnvpn/quagga/quagga-4.tar.gz"
-
OVS_URL = "http://openvswitch.org/releases/openvswitch-2.9.2.tar.gz"
+QUAGGA_URL = "{}/sdnvpn/quagga/quagga-4.tar.gz".format(OPNFV_ARTIFACTS)
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
index a4d390a4..6d8383b8 100644
--- a/apex/common/exceptions.py
+++ b/apex/common/exceptions.py
@@ -22,3 +22,15 @@ class ApexCleanException(Exception):
class ApexBuildException(Exception):
pass
+
+
+class SnapshotDeployException(Exception):
+ pass
+
+
+class OvercloudNodeException(Exception):
+ pass
+
+
+class FetchException(Exception):
+ pass
diff --git a/apex/common/utils.py b/apex/common/utils.py
index 464aaf28..aae821ef 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -218,7 +218,7 @@ def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
if download_target:
urllib.request.urlretrieve(target_url, filename=target_dest)
logging.info("Target downloaded: {}".format(target))
- if target.endswith('.tar'):
+ if target.endswith(('.tar', 'tar.gz', 'tgz')):
logging.info('Unpacking tar file')
tar = tarfile.open(target_dest)
tar.extractall(path=dest)
@@ -255,9 +255,9 @@ def open_webpage(url, timeout=5):
try:
response = urllib.request.urlopen(url, timeout=timeout)
return response.read()
- except (urllib.request.URLError, socket.timeout):
+ except (urllib.request.URLError, socket.timeout) as e:
logging.error("Unable to open URL: {}".format(url))
- raise
+ raise exc.FetchException('Unable to open URL') from e
def edit_tht_env(env_file, section, settings):
@@ -281,3 +281,32 @@ def unique(tmp_list):
if x not in uniq_list:
uniq_list.append(x)
return uniq_list
+
+
+def bash_settings_to_dict(data):
+ """
+ Parses bash settings x=y and returns dict of key, values
+ :param data: bash settings data in x=y format
+ :return: dict of keys and values
+ """
+ return dict(item.split('=') for item in data.splitlines())
+
+
+def fetch_properties(url):
+ """
+ Downloads OPNFV properties and returns a dictionary of the key, values
+ :param url: URL of properties file
+ :return: dict of k,v for each properties
+ """
+ if bool(urllib.parse.urlparse(url).scheme):
+ logging.debug('Fetching properties from internet: {}'.format(url))
+ return bash_settings_to_dict(open_webpage(url).decode('utf-8'))
+ elif os.path.isfile(url):
+ logging.debug('Fetching properties from file: {}'.format(url))
+ with open(url, 'r') as fh:
+ data = fh.read()
+ return bash_settings_to_dict(data)
+ else:
+ logging.warning('Unable to fetch properties for: {}'.format(url))
+ raise exc.FetchException('Unable determine properties location: '
+ '{}'.format(url))
diff --git a/apex/deploy.py b/apex/deploy.py
index 9510de9d..531c9bfa 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -30,6 +30,7 @@ from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
from apex import NetworkSettings
+from apex.deployment.snapshot import SnapshotDeployment
from apex.common import utils
from apex.common import constants
from apex.common import parsers
@@ -45,11 +46,6 @@ APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
-def deploy_quickstart(args, deploy_settings_file, network_settings_file,
- inventory_file=None):
- pass
-
-
def validate_cross_settings(deploy_settings, net_settings, inventory):
"""
Used to validate compatibility across settings file.
@@ -115,7 +111,7 @@ def create_deploy_parser():
help='File which contains Apex deploy settings')
deploy_parser.add_argument('-n', '--network-settings',
dest='network_settings_file',
- required=True,
+ required=False,
help='File which contains Apex network '
'settings')
deploy_parser.add_argument('-i', '--inventory-file',
@@ -176,9 +172,14 @@ def create_deploy_parser():
default='/usr/share/opnfv-apex',
help='Directory path for apex ansible '
'and third party libs')
- deploy_parser.add_argument('--quickstart', action='store_true',
+ deploy_parser.add_argument('-s', '--snapshot', action='store_true',
default=False,
- help='Use tripleo-quickstart to deploy')
+ help='Use snapshots for deployment')
+ deploy_parser.add_argument('--snap-cache', dest='snap_cache',
+ default="{}/snap_cache".format(
+ os.path.expanduser('~')),
+ help='Local directory to cache snapshot '
+ 'artifacts. Defaults to $HOME/snap_cache')
deploy_parser.add_argument('--upstream', action='store_true',
default=True,
help='Force deployment to use upstream '
@@ -205,20 +206,25 @@ def validate_deploy_args(args):
"""
logging.debug('Validating arguments for deployment')
- if args.virtual and args.inventory_file is not None:
+ if args.snapshot:
+ logging.debug('Skipping inventory validation as it is not applicable'
+ 'to snapshot deployments')
+ elif args.virtual and args.inventory_file is not None:
logging.error("Virtual enabled but inventory file also given")
raise ApexDeployException('You should not specify an inventory file '
'with virtual deployments')
elif args.virtual:
args.inventory_file = os.path.join(APEX_TEMP_DIR,
'inventory-virt.yaml')
- elif os.path.isfile(args.inventory_file) is False:
+ elif not os.path.isfile(args.inventory_file):
logging.error("Specified inventory file does not exist: {}".format(
args.inventory_file))
raise ApexDeployException('Specified inventory file does not exist')
for settings_file in (args.deploy_settings_file,
args.network_settings_file):
+ if settings_file == args.network_settings_file and args.snapshot:
+ continue
if os.path.isfile(settings_file) is False:
logging.error("Specified settings file does not "
"exist: {}".format(settings_file))
@@ -253,77 +259,80 @@ def main():
deploy_settings = DeploySettings(args.deploy_settings_file)
logging.info("Deploy settings are:\n {}".format(pprint.pformat(
deploy_settings)))
- net_settings = NetworkSettings(args.network_settings_file)
- logging.info("Network settings are:\n {}".format(pprint.pformat(
- net_settings)))
- os_version = deploy_settings['deploy_options']['os_version']
- net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
- net_env = NetworkEnvironment(net_settings, net_env_file,
- os_version=os_version)
- net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
- utils.dump_yaml(dict(net_env), net_env_target)
-
- # get global deploy params
- ha_enabled = deploy_settings['global_params']['ha_enabled']
- introspect = deploy_settings['global_params'].get('introspect', True)
-
- if args.virtual:
- if args.virt_compute_ram is None:
- compute_ram = args.virt_default_ram
- else:
- compute_ram = args.virt_compute_ram
- if deploy_settings['deploy_options']['sdn_controller'] == \
- 'opendaylight' and args.virt_default_ram < 12:
- control_ram = 12
- logging.warning('RAM per controller is too low. OpenDaylight '
- 'requires at least 12GB per controller.')
- logging.info('Increasing RAM per controller to 12GB')
- elif args.virt_default_ram < 10:
- control_ram = 10
- logging.warning('RAM per controller is too low. nosdn '
- 'requires at least 10GB per controller.')
- logging.info('Increasing RAM per controller to 10GB')
- else:
- control_ram = args.virt_default_ram
- if ha_enabled and args.virt_compute_nodes < 2:
- logging.debug('HA enabled, bumping number of compute nodes to 2')
- args.virt_compute_nodes = 2
- virt_utils.generate_inventory(args.inventory_file, ha_enabled,
- num_computes=args.virt_compute_nodes,
- controller_ram=control_ram * 1024,
- compute_ram=compute_ram * 1024,
- vcpus=args.virt_cpus
- )
- inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
- logging.info("Inventory is:\n {}".format(pprint.pformat(
- inventory)))
-
- validate_cross_settings(deploy_settings, net_settings, inventory)
+
+ if not args.snapshot:
+ net_settings = NetworkSettings(args.network_settings_file)
+ logging.info("Network settings are:\n {}".format(pprint.pformat(
+ net_settings)))
+ os_version = deploy_settings['deploy_options']['os_version']
+ net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+ net_env = NetworkEnvironment(net_settings, net_env_file,
+ os_version=os_version)
+ net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+ utils.dump_yaml(dict(net_env), net_env_target)
+
+ # get global deploy params
+ ha_enabled = deploy_settings['global_params']['ha_enabled']
+ introspect = deploy_settings['global_params'].get('introspect', True)
+ net_list = net_settings.enabled_network_list
+ if args.virtual:
+ if args.virt_compute_ram is None:
+ compute_ram = args.virt_default_ram
+ else:
+ compute_ram = args.virt_compute_ram
+ if (deploy_settings['deploy_options']['sdn_controller'] ==
+ 'opendaylight' and args.virt_default_ram < 12):
+ control_ram = 12
+ logging.warning('RAM per controller is too low. OpenDaylight '
+ 'requires at least 12GB per controller.')
+ logging.info('Increasing RAM per controller to 12GB')
+ elif args.virt_default_ram < 10:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
+ else:
+ control_ram = args.virt_default_ram
+ if ha_enabled and args.virt_compute_nodes < 2:
+ logging.debug(
+ 'HA enabled, bumping number of compute nodes to 2')
+ args.virt_compute_nodes = 2
+ virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+ num_computes=args.virt_compute_nodes,
+ controller_ram=control_ram * 1024,
+ compute_ram=compute_ram * 1024,
+ vcpus=args.virt_cpus
+ )
+ inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+ logging.info("Inventory is:\n {}".format(pprint.pformat(
+ inventory)))
+
+ validate_cross_settings(deploy_settings, net_settings, inventory)
+ else:
+ # only one network with snapshots
+ net_list = [constants.ADMIN_NETWORK]
+
ds_opts = deploy_settings['deploy_options']
- if args.quickstart:
- deploy_settings_file = os.path.join(APEX_TEMP_DIR,
- 'apex_deploy_settings.yaml')
- utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
- deploy_settings_file)
- logging.info("File created: {}".format(deploy_settings_file))
- network_settings_file = os.path.join(APEX_TEMP_DIR,
- 'apex_network_settings.yaml')
- utils.dump_yaml(utils.dict_objects_to_str(net_settings),
- network_settings_file)
- logging.info("File created: {}".format(network_settings_file))
- deploy_quickstart(args, deploy_settings_file, network_settings_file,
- args.inventory_file)
+ ansible_args = {
+ 'virsh_enabled_networks': net_list,
+ 'snapshot': args.snapshot
+ }
+ utils.run_ansible(ansible_args,
+ os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'deploy_dependencies.yml'))
+ if args.snapshot:
+ # Start snapshot Deployment
+ logging.info('Executing Snapshot Deployment...')
+ SnapshotDeployment(deploy_settings=deploy_settings,
+ snap_cache_dir=args.snap_cache,
+ fetch=not args.no_fetch,
+ all_in_one=not bool(args.virt_compute_nodes))
else:
+ # Start Standard TripleO Deployment
deployment = ApexDeployment(deploy_settings, args.patches_file,
args.deploy_settings_file)
# TODO (trozet): add logic back from:
# Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
- ansible_args = {
- 'virsh_enabled_networks': net_settings.enabled_network_list
- }
- utils.run_ansible(ansible_args,
- os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
- 'deploy_dependencies.yml'))
uc_external = False
if 'external' in net_settings.enabled_network_list:
uc_external = True
diff --git a/apex/deployment/snapshot.py b/apex/deployment/snapshot.py
new file mode 100644
index 00000000..b33907fb
--- /dev/null
+++ b/apex/deployment/snapshot.py
@@ -0,0 +1,241 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import fnmatch
+import logging
+import os
+import pprint
+import socket
+import time
+
+import libvirt
+
+import apex.common.constants as con
+from apex.common import exceptions as exc
+from apex.common import utils
+from apex.overcloud.node import OvercloudNode
+import apex.settings.deploy_settings as ds
+
+
+SNAP_FILE = 'snapshot.properties'
+CHECKSUM = 'OPNFV_SNAP_SHA512SUM'
+OVERCLOUD_RC = 'overcloudrc'
+SSH_KEY = 'id_rsa'
+OPENSTACK = 'openstack'
+OPENDAYLIGHT = 'opendaylight'
+SERVICES = (OPENSTACK, OPENDAYLIGHT)
+
+
+class SnapshotDeployment:
+ def __init__(self, deploy_settings, snap_cache_dir, fetch=True,
+ all_in_one=False):
+ self.id_rsa = None
+ self.fetch = fetch
+ ds_opts = deploy_settings['deploy_options']
+ self.os_version = ds_opts['os_version']
+ self.ha_enabled = deploy_settings['global_params']['ha_enabled']
+ if self.ha_enabled:
+ self.ha_ext = 'ha'
+ elif all_in_one:
+ self.ha_ext = 'noha-allinone'
+ else:
+ self.ha_ext = 'noha'
+ self.snap_cache_dir = os.path.join(snap_cache_dir,
+ "{}/{}".format(self.os_version,
+ self.ha_ext))
+ self.networks = []
+ self.oc_nodes = []
+ self.properties_url = "{}/apex/{}/{}".format(con.OPNFV_ARTIFACTS,
+ self.os_version,
+ self.ha_ext)
+ self.conn = libvirt.open('qemu:///system')
+ if not self.conn:
+ raise exc.SnapshotDeployException(
+ 'Unable to open libvirt connection')
+ if self.fetch:
+ self.pull_snapshot(self.properties_url, self.snap_cache_dir)
+ else:
+ logging.info('No fetch enabled. Will not attempt to pull latest '
+ 'snapshot')
+ self.deploy_snapshot()
+
+ @staticmethod
+ def pull_snapshot(url_path, snap_cache_dir):
+ """
+ Compare opnfv properties file and download and unpack snapshot if
+ necessary
+ :param url_path: path of latest snap info
+ :param snap_cache_dir: local directory for snap cache
+ :return: None
+ """
+ full_url = os.path.join(url_path, SNAP_FILE)
+ upstream_props = utils.fetch_properties(full_url)
+ logging.debug("Upstream properties are: {}".format(upstream_props))
+ try:
+ upstream_sha = upstream_props[CHECKSUM]
+ except KeyError:
+ logging.error('Unable to find {} for upstream properties: '
+ '{}'.format(CHECKSUM, upstream_props))
+ raise exc.SnapshotDeployException('Unable to find upstream '
+ 'properties checksum value')
+ local_prop_file = os.path.join(snap_cache_dir, SNAP_FILE)
+ try:
+ local_props = utils.fetch_properties(local_prop_file)
+ local_sha = local_props[CHECKSUM]
+ pull_snap = local_sha != upstream_sha
+ except (exc.FetchException, KeyError):
+ logging.info("No locally cached properties found, will pull "
+ "latest")
+ local_sha = None
+ pull_snap = True
+ logging.debug('Local sha: {}, Upstream sha: {}'.format(local_sha,
+ upstream_sha))
+ if pull_snap:
+ logging.info('SHA mismatch, will download latest snapshot')
+ full_snap_url = upstream_props['OPNFV_SNAP_URL']
+ snap_file = os.path.basename(full_snap_url)
+ snap_url = full_snap_url.replace(snap_file, '')
+ if not snap_url.startswith('http://'):
+ snap_url = 'http://' + snap_url
+ utils.fetch_upstream_and_unpack(dest=snap_cache_dir,
+ url=snap_url,
+ targets=[SNAP_FILE, snap_file]
+ )
+ else:
+ logging.info('SHA match, artifacts in cache are already latest. '
+ 'Will not download.')
+
+ def create_networks(self):
+ logging.info("Detecting snapshot networks")
+ try:
+ xmls = fnmatch.filter(os.listdir(self.snap_cache_dir), '*.xml')
+ except FileNotFoundError:
+ raise exc.SnapshotDeployException(
+ 'No XML files found in snap cache directory: {}'.format(
+ self.snap_cache_dir))
+ net_xmls = list()
+ for xml in xmls:
+ if xml.startswith('baremetal'):
+ continue
+ net_xmls.append(os.path.join(self.snap_cache_dir, xml))
+ if not net_xmls:
+ raise exc.SnapshotDeployException(
+ 'No network XML files detected in snap cache, '
+ 'please check local snap cache contents')
+ logging.info('Snapshot networks found: {}'.format(net_xmls))
+ for xml in net_xmls:
+ logging.debug('Creating network from {}'.format(xml))
+ with open(xml, 'r') as fh:
+ net_xml = fh.read()
+ net = self.conn.networkCreateXML(net_xml)
+ self.networks.append(net)
+ logging.info('Network started: {}'.format(net.name()))
+
+ def parse_and_create_nodes(self):
+ """
+ Parse snapshot node.yaml config file and create overcloud nodes
+ :return: None
+ """
+ node_file = os.path.join(self.snap_cache_dir, 'node.yaml')
+ if not os.path.isfile(node_file):
+ raise exc.SnapshotDeployException('Missing node definitions from '
+ ''.format(node_file))
+ node_data = utils.parse_yaml(node_file)
+ if 'servers' not in node_data:
+ raise exc.SnapshotDeployException('Invalid node.yaml format')
+ for node, data in node_data['servers'].items():
+ logging.info('Creating node: {}'.format(node))
+ logging.debug('Node data is:\n{}'.format(pprint.pformat(data)))
+ node_xml = os.path.join(self.snap_cache_dir,
+ '{}.xml'.format(data['vNode-name']))
+ node_qcow = os.path.join(self.snap_cache_dir,
+ '{}.qcow2'.format(data['vNode-name']))
+ self.oc_nodes.append(
+ OvercloudNode(ip=data['address'],
+ ovs_ctrlrs=data['ovs-controller'],
+ ovs_mgrs=data['ovs-managers'],
+ role=data['type'],
+ name=node,
+ node_xml=node_xml,
+ disk_img=node_qcow)
+ )
+ logging.info('Node Created')
+ logging.info('Starting nodes')
+ for node in self.oc_nodes:
+ node.start()
+
+ def get_controllers(self):
+ controllers = []
+ for node in self.oc_nodes:
+ if node.role == 'controller':
+ controllers.append(node)
+ return controllers
+
+ def is_service_up(self, service):
+ assert service in SERVICES
+ if service == OPENSTACK:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
+ controllers = self.get_controllers()
+ if not controllers:
+ raise exc.SnapshotDeployException('No OpenStack controllers found')
+
+ for node in controllers:
+ logging.info('Waiting until {} is up on controller: '
+ '{}'.format(service, node.name))
+ for x in range(10):
+ logging.debug('Checking {} is up attempt {}'.format(service,
+ str(x + 1)))
+ if service == OPENSTACK:
+ # Check if Neutron is up
+ if sock.connect_ex((node.ip, 9696)) == 0:
+ logging.info('{} is up on controller {}'.format(
+ service, node.name))
+ break
+ elif service == OPENDAYLIGHT:
+ url = 'http://{}:8081/diagstatus'.format(node.ip)
+ try:
+ utils.open_webpage(url)
+ logging.info('{} is up on controller {}'.format(
+ service, node.name))
+ break
+ except Exception as e:
+ logging.debug('Cannot contact ODL. Reason: '
+ '{}'.format(e))
+ time.sleep(60)
+ else:
+ logging.error('{} is not running after 10 attempts'.format(
+ service))
+ return False
+ return True
+
+ def deploy_snapshot(self):
+ # bring up networks
+ self.create_networks()
+ # check overcloudrc exists, id_rsa
+ for snap_file in (OVERCLOUD_RC, SSH_KEY):
+ if not os.path.isfile(os.path.join(self.snap_cache_dir,
+ snap_file)):
+ logging.warning('File is missing form snap cache: '
+ '{}'.format(snap_file))
+ # create nodes
+ self.parse_and_create_nodes()
+ # validate deployment
+ if self.is_service_up(OPENSTACK):
+ logging.info('OpenStack is up')
+ else:
+ raise exc.SnapshotDeployException('OpenStack is not alive')
+ if self.is_service_up(OPENDAYLIGHT):
+ logging.info('OpenDaylight is up')
+ else:
+ raise exc.SnapshotDeployException(
+ 'OpenDaylight {} is not reporting diag status')
+ # TODO(trozet): recreate external network/subnet if missing
+ logging.info('Snapshot deployment complete. Please use the {} file '
+ 'in {} to interact with '
+ 'OpenStack'.format(OVERCLOUD_RC, self.snap_cache_dir))
diff --git a/apex/overcloud/node.py b/apex/overcloud/node.py
new file mode 100644
index 00000000..622d1fd1
--- /dev/null
+++ b/apex/overcloud/node.py
@@ -0,0 +1,147 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+import shutil
+import xml.etree.ElementTree as ET
+
+import distro
+import libvirt
+
+from apex.common.exceptions import OvercloudNodeException
+
+
+class OvercloudNode:
+ """
+ Overcloud server
+ """
+ def __init__(self, role, ip, ovs_ctrlrs, ovs_mgrs, name, node_xml,
+ disk_img):
+ self.role = role
+ self.ip = ip
+ self.ovs_ctrlrs = ovs_ctrlrs
+ self.ovs_mgrs = ovs_mgrs
+ self.name = name
+ self.node_xml_file = node_xml
+ self.node_xml = None
+ self.vm = None
+ self.disk_img = None
+ if not os.path.isfile(self.node_xml_file):
+ raise OvercloudNodeException('XML definition file not found: '
+ '{}'.format(self.node_xml_file))
+ if not os.path.isfile(disk_img):
+ raise OvercloudNodeException('Disk image file not found: '
+ '{}'.format(disk_img))
+ self.conn = libvirt.open('qemu:///system')
+ if not self.conn:
+ raise OvercloudNodeException('Unable to open libvirt connection')
+
+ self.create(src_disk=disk_img)
+
+ def _configure_disk(self, disk):
+ # find default storage pool path
+ pool = self.conn.storagePoolLookupByName('default')
+ if pool is None:
+ raise OvercloudNodeException('Cannot find default storage pool')
+ pool_xml = pool.XMLDesc()
+ logging.debug('Default storage pool xml: {}'.format(pool_xml))
+ etree = ET.fromstring(pool_xml)
+ try:
+ path = etree.find('target').find('path').text
+ logging.info('System libvirt default pool path: {}'.format(path))
+ except AttributeError as e:
+ logging.error('Failure to find libvirt storage path: {}'.format(
+ e))
+ raise OvercloudNodeException('Cannot find default storage path')
+ # copy disk to system path
+ self.disk_img = os.path.join(path, os.path.basename(disk))
+ logging.info('Copying disk image to: {}. This may take some '
+ 'time...'.format(self.disk_img))
+ shutil.copyfile(disk, self.disk_img)
+
+ @staticmethod
+ def _update_xml(xml, disk_path=None):
+ """
+ Updates a libvirt XML file for the current architecture and OS of this
+ machine
+ :param xml: XML string of Libvirt domain definition
+ :param disk_path: Optional file path to update for the backing disk
+ image
+ :return: Updated XML
+ """
+ logging.debug('Parsing xml')
+ try:
+ etree = ET.fromstring(xml)
+ except ET.ParseError:
+ logging.error('Unable to parse node XML: {}'.format(xml))
+ raise OvercloudNodeException('Unable to parse node XML')
+
+ try:
+ type_element = etree.find('os').find('type')
+ if 'machine' in type_element.keys():
+ type_element.set('machine', 'pc')
+ logging.debug('XML updated with machine "pc"')
+ except AttributeError:
+ logging.warning('Failure to set XML machine type')
+
+ # qemu-kvm path may differ per system, need to detect it and update xml
+ linux_ver = distro.linux_distribution()[0]
+ if linux_ver == 'Fedora':
+ qemu_path = '/usr/bin/qemu-kvm'
+ else:
+ qemu_path = '/usr/libexec/qemu-kvm'
+
+ try:
+ etree.find('devices').find('emulator').text = qemu_path
+ logging.debug('XML updated with emulator location: '
+ '{}'.format(qemu_path))
+ xml = ET.tostring(etree).decode('utf-8')
+ except AttributeError:
+ logging.warning('Failure to update XML qemu path')
+
+ if disk_path:
+ try:
+ disk_element = etree.find('devices').find('disk').find(
+ 'source')
+ disk_element.set('file', disk_path)
+ logging.debug('XML updated with file path: {}'.format(
+ disk_path))
+ except AttributeError:
+ logging.error('Failure to parse XML and set disk type')
+ raise OvercloudNodeException(
+ 'Unable to set new disk path in xml {}'.format(xml))
+
+ return ET.tostring(etree).decode('utf-8')
+
+ def create(self, src_disk):
+ # copy disk to pool and get new disk location
+ logging.debug('Preparing disk image')
+ self._configure_disk(src_disk)
+ logging.debug('Parsing node XML from {}'.format(self.node_xml_file))
+ with open(self.node_xml_file, 'r') as fh:
+ self.node_xml = fh.read()
+ # if machine is not pc we need to set, also need to update qemu-kvm and
+ # storage location
+ self.node_xml = self._update_xml(self.node_xml, self.disk_img)
+ logging.info('Creating node {} in libvirt'.format(self.name))
+ self.vm = self.conn.defineXML(self.node_xml)
+
+ def start(self):
+ """
+ Boot node in libvirt
+ :return:
+ """
+ try:
+ self.vm.create()
+ logging.info('Node {} started'.format(self.name))
+ except libvirt.libvirtError as e:
+ logging.error('Failed to start domain: {}'.format(self.name))
+ raise OvercloudNodeException('Failed to start VM. Reason: '
+ '{}'.format(e))
diff --git a/apex/tests/config/admin.xml b/apex/tests/config/admin.xml
new file mode 100644
index 00000000..69b15b1f
--- /dev/null
+++ b/apex/tests/config/admin.xml
@@ -0,0 +1,7 @@
+<network connections='1' ipv6='yes'>
+ <name>admin</name>
+ <uuid>761c34f8-2a72-4205-8e69-5ed6626c6efa</uuid>
+ <forward mode='bridge'/>
+ <bridge name='br-admin'/>
+ <virtualport type='openvswitch'/>
+</network>
diff --git a/apex/tests/config/baremetal0.xml b/apex/tests/config/baremetal0.xml
new file mode 100644
index 00000000..4ff8f65a
--- /dev/null
+++ b/apex/tests/config/baremetal0.xml
@@ -0,0 +1,73 @@
+<domain type='kvm'>
+ <name>baremetal0</name>
+ <uuid>25bf15b6-130c-4bca-87af-e5cbc14bb454</uuid>
+ <memory unit='KiB'>12582912</memory>
+ <currentMemory unit='KiB'>12582912</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+ <boot dev='hd'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-passthrough'/>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/libexec/qemu-kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source file='/home/images/baremetal0.qcow2'/>
+ <target dev='sda' bus='sata'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+ </disk>
+ <controller type='scsi' index='0' model='virtio-scsi'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </controller>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <controller type='sata' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </controller>
+ <interface type='bridge'>
+ <mac address='00:5b:06:25:0c:dc'/>
+ <source bridge='br-admin'/>
+ <virtualport type='openvswitch'>
+ <parameters interfaceid='04b63cb9-21a9-4385-bbd6-df677a5eeecf'/>
+ </virtualport>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'>
+ <listen type='address' address='127.0.0.1'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='16384' heads='1' primary='yes'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='selinux' relabel='yes'/>
+ <seclabel type='dynamic' model='dac' relabel='yes'/>
+</domain>
diff --git a/apex/tests/config/node.yaml b/apex/tests/config/node.yaml
new file mode 100644
index 00000000..e05644c9
--- /dev/null
+++ b/apex/tests/config/node.yaml
@@ -0,0 +1,12 @@
+---
+servers:
+ overcloud-controller-0.opnfvlf.org:
+ address: 192.0.2.28
+ orig-ctl-mac: 00:5b:06:25:0c:dc
+ ovs-controller: tcp:192.0.2.28:6653
+ ovs-managers:
+ - ptcp:6639:127.0.0.1
+ - tcp:192.0.2.28:6640
+ type: controller
+ user: heat-admin
+ vNode-name: baremetal0
diff --git a/apex/tests/config/snapshot.properties b/apex/tests/config/snapshot.properties
new file mode 100644
index 00000000..64c149e2
--- /dev/null
+++ b/apex/tests/config/snapshot.properties
@@ -0,0 +1,2 @@
+OPNFV_SNAP_URL=artifacts.opnfv.org/apex/master/noha/apex-csit-snap-2018-08-05.tar.gz
+OPNFV_SNAP_SHA512SUM=bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc236e36a74d05ee813584f3e5bb92aa23dec775846317b75d574f8c86186c666f78a299c24fb68849897bdd4bc
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 412d6f49..4c250117 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -135,7 +135,7 @@ class TestCommonUtils:
assert output is not None
def test_open_invalid_webpage(self):
- assert_raises(urllib.request.URLError, utils.open_webpage,
+ assert_raises(exceptions.FetchException, utils.open_webpage,
'http://inv4lIdweb-page.com')
@patch('builtins.open', a_mock_open)
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index 5741818a..be52c276 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -8,6 +8,7 @@
##############################################################################
import argparse
+import os
import unittest
from mock import patch
@@ -17,12 +18,12 @@ from mock import mock_open
from apex.common.exceptions import ApexDeployException
from apex.common.constants import DEFAULT_OS_VERSION
-from apex.deploy import deploy_quickstart
from apex.deploy import validate_cross_settings
from apex.deploy import build_vms
from apex.deploy import create_deploy_parser
from apex.deploy import validate_deploy_args
from apex.deploy import main
+from apex.tests.constants import TEST_DUMMY_CONFIG
from nose.tools import (
assert_is_instance,
@@ -48,9 +49,6 @@ class TestDeploy(unittest.TestCase):
def teardown(self):
"""This method is run once after _each_ test method is executed"""
- def test_deloy_quickstart(self):
- deploy_quickstart(None, None, None)
-
def test_validate_cross_settings(self):
deploy_settings = {'deploy_options': {'dataplane': 'ovs'}}
net_settings = Mock()
@@ -85,12 +83,23 @@ class TestDeploy(unittest.TestCase):
args = Mock()
args.inventory_file = None
args.virtual = True
+ args.snapshot = False
+ validate_deploy_args(args)
+
+ def test_validate_snapshot_deploy_args(self):
+ args = Mock()
+ args.deploy_settings_file = os.path.join(TEST_DUMMY_CONFIG,
+ 'dummy-deploy-settings.yaml')
+ args.inventory_file = None
+ args.virtual = True
+ args.snapshot = True
validate_deploy_args(args)
def test_validate_deploy_args_no_virt_no_inv(self):
args = Mock()
args.inventory_file = 'file_name'
args.virtual = False
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
@patch('apex.deploy.os.path')
@@ -99,12 +108,14 @@ class TestDeploy(unittest.TestCase):
args = Mock()
args.inventory_file = None
args.virtual = True
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
def test_validate_deploy_args_virt_and_inv_file(self):
args = Mock()
args.inventory_file = 'file_name'
args.virtual = True
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
@patch('apex.deploy.ApexDeployment')
@@ -153,6 +164,7 @@ class TestDeploy(unittest.TestCase):
args.virtual = False
args.quickstart = False
args.debug = False
+ args.snapshot = False
args.upstream = True
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['external']
@@ -164,6 +176,7 @@ class TestDeploy(unittest.TestCase):
mock_parsers.parse_nova_output.return_value = {'testnode1': 'test'}
main()
+ @patch('apex.deploy.SnapshotDeployment')
@patch('apex.deploy.validate_cross_settings')
@patch('apex.deploy.virt_utils')
@patch('apex.deploy.utils')
@@ -174,14 +187,15 @@ class TestDeploy(unittest.TestCase):
@patch('apex.deploy.os')
@patch('apex.deploy.create_deploy_parser')
@patch('builtins.open', a_mock_open, create=True)
- def test_main_qs(self, mock_parser, mock_os, mock_deploy,
- mock_net_sets, mock_net_env, mock_inv, mock_utils,
- mock_virt_utils, mock_cross):
+ def test_main_snapshot(self, mock_parser, mock_os, mock_deploy,
+ mock_net_sets, mock_net_env, mock_inv, mock_utils,
+ mock_virt_utils, mock_cross, mock_snap_deployment):
args = mock_parser.return_value.parse_args.return_value
args.virtual = False
- args.quickstart = True
+ args.snapshot = True
args.debug = True
main()
+ mock_snap_deployment.assert_called()
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@@ -237,6 +251,7 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_ram = None
args.virt_default_ram = 12
args.upstream = True
+ args.snapshot = False
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['admin']
deploy_sets = mock_deploy_sets.return_value
@@ -300,6 +315,7 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_ram = None
args.virt_default_ram = 12
args.upstream = True
+ args.snapshot = False
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['admin']
deploy_sets = mock_deploy_sets.return_value
@@ -361,6 +377,7 @@ class TestDeploy(unittest.TestCase):
args.quickstart = False
args.debug = False
args.upstream = False
+ args.snapshot = False
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['external']
net_sets.__getitem__.side_effect = net_sets_dict.__getitem__
diff --git a/apex/tests/test_apex_deployment_snapshot.py b/apex/tests/test_apex_deployment_snapshot.py
new file mode 100644
index 00000000..d7542585
--- /dev/null
+++ b/apex/tests/test_apex_deployment_snapshot.py
@@ -0,0 +1,374 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from mock import patch
+import os
+import unittest
+import urllib.request
+
+from apex.common import exceptions as exc
+from apex.deployment.snapshot import SnapshotDeployment
+from apex.settings.deploy_settings import DeploySettings
+from apex.tests.constants import TEST_DUMMY_CONFIG
+
+DUMMY_SNAP_DIR = '/tmp/dummy_cache'
+
+
+class TestSnapshotDeployment(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_init(self, mock_deploy_snap, mock_libvirt_open, mock_pull_snap):
+
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=True, all_in_one=False)
+ snap_dir = os.path.join(DUMMY_SNAP_DIR, 'queens', 'noha')
+ self.assertEqual(d.snap_cache_dir, snap_dir)
+ mock_pull_snap.assert_called()
+ mock_deploy_snap.assert_called()
+ self.assertEqual(d.ha_ext, 'noha')
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_init_allinone_no_fetch(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap):
+
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=True)
+ snap_dir = os.path.join(DUMMY_SNAP_DIR, 'queens', 'noha-allinone')
+ self.assertEqual(d.snap_cache_dir, snap_dir)
+ mock_pull_snap.assert_not_called()
+ mock_deploy_snap.assert_called()
+ self.assertEqual(d.ha_ext, 'noha-allinone')
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_is_latest(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.return_value = {
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': 'bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'
+ }
+ SnapshotDeployment.pull_snapshot('http://dummy_url',
+ TEST_DUMMY_CONFIG)
+ mock_fetch_artifact.assert_not_called()
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_fetch_props_failure(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.side_effect = exc.FetchException
+ self.assertRaises(exc.FetchException,
+ SnapshotDeployment.pull_snapshot,
+ 'http://dummy_url', TEST_DUMMY_CONFIG)
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_is_not_latest(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.side_effect = [{
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': '123c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'},
+ {
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': 'bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'}]
+ SnapshotDeployment.pull_snapshot('http://dummy_url',
+ TEST_DUMMY_CONFIG)
+ mock_fetch_artifact.assert_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ conn = mock_libvirt_open('qemu:///system')
+ d.create_networks()
+ conn.networkCreateXML.assert_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks_invalid_cache(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = '/doesnotexist/'
+ self.assertRaises(exc.SnapshotDeployException, d.create_networks)
+
+ @patch('apex.deployment.snapshot.fnmatch')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks_no_net_xmls(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_fnmatch):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = '/doesnotexist/'
+ mock_fnmatch.filter.return_value = []
+ self.assertRaises(exc.SnapshotDeployException, d.create_networks)
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_parse_and_create_nodes(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ d.parse_and_create_nodes()
+ node.start.assert_called()
+ self.assertListEqual([node], d.oc_nodes)
+
+ @patch('apex.deployment.snapshot.utils.parse_yaml')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_parse_and_create_nodes_invalid_node_yaml(
+ self, mock_deploy_snap, mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_parse_yaml):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ mock_parse_yaml.return_value = {'blah': 'dummy'}
+ self.assertRaises(exc.SnapshotDeployException,
+ d.parse_and_create_nodes)
+ node.start.assert_not_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_get_controllers(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.role = 'controller'
+ d.oc_nodes = [node]
+ self.assertListEqual(d.get_controllers(), [node])
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_get_controllers_none(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.role = 'compute'
+ d.oc_nodes = [node]
+ self.assertListEqual(d.get_controllers(), [])
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.socket')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_openstack_up(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_socket,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ sock = mock_socket.socket(mock_socket.AF_INET, mock_socket.SOCK_STREAM)
+ sock.connect_ex.return_value = 0
+ self.assertTrue(d.is_service_up('openstack'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.socket')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_openstack_up_false(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_socket,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ sock = mock_socket.socket(mock_socket.AF_INET, mock_socket.SOCK_STREAM)
+ sock.connect_ex.return_value = 1
+ self.assertFalse(d.is_service_up('openstack'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.utils')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_opendaylight_up(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_utils,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ mock_utils.open_webpage.return_value = 0
+ self.assertTrue(d.is_service_up('opendaylight'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.utils')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_opendaylight_up_false(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_utils,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ mock_utils.open_webpage.side_effect = urllib.request.URLError(
+ reason='blah')
+ self.assertFalse(d.is_service_up('opendaylight'))
+
+ @patch('apex.deployment.snapshot.os.path.isfile')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.is_service_up')
+ @patch('apex.deployment.snapshot.SnapshotDeployment'
+ '.parse_and_create_nodes')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.create_networks')
+ def test_deploy_snapshot(self, mock_create_networks, mock_libvirt_open,
+ mock_pull_snap, mock_parse_create,
+ mock_service_up, mock_is_file):
+ mock_is_file.return_value = True
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ SnapshotDeployment(deploy_settings=ds, snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ mock_parse_create.assert_called()
+ mock_create_networks.assert_called()
+ mock_service_up.assert_called()
+
+ @patch('apex.deployment.snapshot.os.path.isfile')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.is_service_up')
+ @patch('apex.deployment.snapshot.SnapshotDeployment'
+ '.parse_and_create_nodes')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.create_networks')
+ def test_deploy_snapshot_services_down(self, mock_create_networks,
+ mock_libvirt_open,
+ mock_pull_snap, mock_parse_create,
+ mock_service_up, mock_is_file):
+ mock_is_file.return_value = True
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ mock_service_up.return_value = False
+ self.assertRaises(exc.SnapshotDeployException,
+ SnapshotDeployment,
+ ds, DUMMY_SNAP_DIR, False, False)
+
+ mock_service_up.side_effect = [True, False]
+ self.assertRaises(exc.SnapshotDeployException,
+ SnapshotDeployment,
+ ds, DUMMY_SNAP_DIR, False, False)
diff --git a/apex/tests/test_apex_overcloud_node.py b/apex/tests/test_apex_overcloud_node.py
new file mode 100644
index 00000000..4c67b1d8
--- /dev/null
+++ b/apex/tests/test_apex_overcloud_node.py
@@ -0,0 +1,191 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from libvirt import libvirtError
+from mock import patch
+from mock import MagicMock
+import os
+import unittest
+import urllib.request
+
+from apex.common import exceptions as exc
+from apex.overcloud.node import OvercloudNode
+from apex.settings.deploy_settings import DeploySettings
+from apex.tests.constants import TEST_DUMMY_CONFIG
+
+DUMMY_SNAP_DIR = '/tmp/dummy_cache'
+
+
+class TestSnapshotDeployment(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_init(self, mock_libvirt_open, mock_is_file, mock_node_create):
+ mock_is_file.return_value = True
+ OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0', node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ mock_node_create.assert_called()
+
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_init_invalid_files(self, mock_libvirt_open, mock_node_create):
+ self.assertRaises(exc.OvercloudNodeException,
+ OvercloudNode, 'controller', '123.123.123',
+ None, None, 'dummy-controller-0', 'dummynode.xml',
+ 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value.XMLDesc.return_value = """
+ <pool type='dir'>
+ <target>
+ <path>/var/lib/libvirt/images</path>
+ </target>
+ </pool>
+ """
+ node._configure_disk('dummy.qcow2')
+ mock_copy.assert_called()
+ self.assertEqual(node.disk_img, '/var/lib/libvirt/images/dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk_bad_path(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value.XMLDesc.return_value = """
+ <pool type='dir'>
+ <target>
+ </target>
+ </pool>
+ """
+ self.assertRaises(exc.OvercloudNodeException,
+ node._configure_disk, 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk_no_pool(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value = None
+ self.assertRaises(exc.OvercloudNodeException,
+ node._configure_disk, 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.distro.linux_distribution')
+ def test_update_xml(self, mock_linux_distro):
+ mock_linux_distro.return_value = ['Fedora']
+ xml_file = os.path.join(TEST_DUMMY_CONFIG, 'baremetal0.xml')
+ with open(xml_file, 'r') as fh:
+ xml = fh.read()
+ new_xml = OvercloudNode._update_xml(
+ xml=xml, disk_path='/dummy/disk/path/blah.qcow2')
+ self.assertIn('/dummy/disk/path/blah.qcow2', new_xml)
+ self.assertIn('/usr/bin/qemu-kvm', new_xml)
+
+ @patch('apex.overcloud.node.distro.linux_distribution')
+ def test_update_xml_no_disk(self, mock_linux_distro):
+ mock_linux_distro.return_value = ['Fedora']
+ xml_file = os.path.join(TEST_DUMMY_CONFIG, 'baremetal0.xml')
+ with open(xml_file, 'r') as fh:
+ xml = fh.read()
+ new_xml = OvercloudNode._update_xml(xml=xml)
+ self.assertIn('/home/images/baremetal0.qcow2', new_xml)
+ self.assertIn('/usr/bin/qemu-kvm', new_xml)
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_create(self, mock_isfile, mock_libvirt_conn, mock_configure_disk,
+ mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ self.assertIs(node.vm, domain)
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_start(self, mock_isfile, mock_libvirt_conn, mock_configure_disk,
+ mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ node.start()
+ domain.create.assert_called()
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_start_fail(self, mock_isfile, mock_libvirt_conn,
+ mock_configure_disk, mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ domain.create.side_effect = libvirtError('blah')
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ self.assertRaises(exc.OvercloudNodeException, node.start)
diff --git a/docs/release/installation/virtual.rst b/docs/release/installation/virtual.rst
index 5682f364..a844d43f 100644
--- a/docs/release/installation/virtual.rst
+++ b/docs/release/installation/virtual.rst
@@ -3,10 +3,23 @@ Installation High-Level Overview - Virtual Deployment
Deploying virtually is an alternative deployment method to bare metal, where
only a single bare metal Jump Host server is required to execute deployment.
-In this deployment type, the Jump Host server will host the undercloud VM along
-with any number of OPNFV overcloud control/compute nodes. This deployment type
-is useful when physical resources are constrained, or there is a desire to
-deploy a temporary sandbox environment.
+This deployment type is useful when physical resources are constrained, or
+there is a desire to deploy a temporary sandbox environment.
+
+With virtual deployments, two deployment options are offered. The first is a
+standard deployment where the Jump Host server will host the undercloud VM along
+with any number of OPNFV overcloud control/compute nodes. This follows the same
+deployment workflow as baremetal, and can take between 1 to 2 hours to complete.
+
+The second option is to use snapshot deployments. Snapshots are saved disk images
+of previously deployed OPNFV upstream. These snapshots are promoted daily and contain
+and already deployed OPNFV environment that has passed a series of tests. The
+advantage of the snapshot is that it deploys in less than 10 minutes. Another
+major advantage is that the snapshots work on both CentOS and Fedora OS. Note:
+Fedora support is only tested via PIP installation at this time and not via RPM.
+
+Standard Deployment Overview
+----------------------------
The virtual deployment operates almost the same way as the bare metal
deployment with a few differences mainly related to power management.
@@ -27,6 +40,23 @@ the power management. Finally, the default network settings file will deploy wi
modification. Customizations are welcome but not needed if a generic set of
network settings are acceptable.
+Snapshot Deployment Overview
+----------------------------
+
+Snapshot deployments use the same ``opnfv-deploy`` CLI as standard deployments.
+The snapshot deployment will use a cache in order to store snapshots that are
+downloaded from the internet at deploy time. This caching avoids re-downloading
+the same artifact between deployments. The snapshot deployment recreates the same
+network and libvirt setup as would have been provisioned by the Standard
+deployment, with the exception that there is no undercloud VM. The snapshot
+deployment will give the location of the RC file to use in order to interact
+with the Overcloud directly from the jump host.
+
+Snapshots come in different topology flavors. One is able to deploy either HA
+(3 Control, 2 Computes, no-HA (1 Control, 2 Computes), or all-in-one
+(1 Control/Compute. The snapshot deployment itself is always done with the
+os-odl-nofeature-* scenario.
+
Installation Guide - Virtual Deployment
=======================================
@@ -57,8 +87,8 @@ Install Jump Host
Follow the instructions in the `Install Bare Metal Jump Host`_ section.
-Running ``opnfv-deploy``
-------------------------
+Running ``opnfv-deploy`` for Standard Deployment
+------------------------------------------------
You are now ready to deploy OPNFV!
``opnfv-deploy`` has virtual deployment capability that includes all of
@@ -96,6 +126,43 @@ Follow the steps below to execute:
3. When the deployment is complete the IP for the undercloud and a url for the
OpenStack dashboard will be displayed
+Running ``opnfv-deploy`` for Snapshot Deployment
+------------------------------------------------
+
+Deploying snapshots requires enough disk space to cache snapshot archives, as well
+as store VM disk images per deployment. The snapshot cache directory can be
+configured at deploy time. Ensure a directory is used on a partition with enough
+space for about 20GB. Additionally, Apex will attempt to detect the default
+libvirt storage pool on the jump host. This is typically '/var/lib/libvirt/images'.
+On default CentOS installations, this path will resolve to the /root partition,
+which is only around 50GB. Therefore, ensure that the path for the default storage
+pool has enough space to hold the VM backing storage (approx 4GB per VM). Note,
+each Overcloud VM disk size is set to 40GB, however Libvirt grows these disks
+dynamically. Due to this only 4GB will show up at initial deployment, but the disk
+may grow from there up to 40GB.
+
+The new arguments to deploy snapshots include:
+
+ - `--snapshot`: Enables snapshot deployments
+ - `--snap-cache`: Indicates the directory to use for caching artifacts
+
+An example deployment command is:
+
+.. code-block::
+
+ opnfv-deploy -d ../config/deploy/os-odl-queens-noha.yaml --snapshot
+ --snap-cache /home/trozet/snap_cache --virtual-computes 0 --no-fetch
+
+In the above example, several of the Standard Deployment arguments are still
+used to deploy snapshots:
+
+ - `-d`: Deploy settings are used to determine OpenStack version of snapshots
+ to use as well as the topology
+ - `--virtual-computes` - When set to 0, it indicates to Apex to use an
+ all-in-one snapshot
+ - `--no-fetch` - Can be used to disable fetching latest snapshot artifact
+ from upstream and use the latest found in `--snap-cache`
+
Verifying the Setup - VMs
-------------------------
diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml
index 1cc304a5..ab09ff6e 100644
--- a/lib/ansible/playbooks/deploy_dependencies.yml
+++ b/lib/ansible/playbooks/deploy_dependencies.yml
@@ -10,10 +10,23 @@
- libguestfs-tools
- python-netaddr
- python2-pip
+ when: ansible_distribution == 'CentOS'
+ - dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - libselinux-python
+ - python-lxml
+ - libvirt-python
+ - libguestfs-tools
+ - python-netaddr
+ - python2-pip
+ when: ansible_distribution == 'Fedora'
- pip:
name: ansible-modules-hashivault,hvac,Jinja2
state: latest
executable: pip2
+ when: not snapshot
- sysctl:
name: net.ipv4.ip_forward
state: present
@@ -38,20 +51,31 @@
xml: '{{ lookup("template", "virsh_network_default.xml.j2") }}'
state: active
autostart: yes
+ when: not snapshot
- openvswitch_bridge:
bridge: 'br-{{ item }}'
state: present
with_items: '{{ virsh_enabled_networks }}'
+ - name: 'Configure IP on bridge'
+ shell: 'ip addr add 192.0.2.99/24 dev br-{{ item }}'
+ with_items: '{{ virsh_enabled_networks }}'
+ when: snapshot
+ - name: 'Bring up bridge'
+ shell: 'ip link set up br-{{ item }}'
+ with_items: '{{ virsh_enabled_networks }}'
+ when: snapshot
- virt_net:
state: present
name: '{{ item }}'
xml: '{{ lookup("template", "virsh_network_ovs.xml.j2") }}'
with_items: '{{ virsh_enabled_networks }}'
+ when: not snapshot
- virt_net:
state: active
name: '{{ item }}'
autostart: yes
with_items: '{{ virsh_enabled_networks }}'
+ when: not snapshot
- virt_pool:
name: default
autostart: yes
@@ -87,16 +111,19 @@
state: present
- name: Generate SSH key for root if missing
shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+ when: not snapshot
- name: Check that /u/l/python3.4/site-packages/virtualbmc/vbmc.py exists
stat:
path: /usr/lib/python3.4/site-packages/virtualbmc/vbmc.py
register: vbmc_py
+ when: not snapshot
- name: Manually patch vmbc to work with python3.x
lineinfile:
line: " conn.defineXML(ET.tostring(tree, encoding='unicode'))"
regexp: "tostring"
path: /usr/lib/python3.4/site-packages/virtualbmc/vbmc.py
when: vbmc_py.stat.exists == True
+ when: not snapshot
- name: Add ssh retry to Ansible config
ini_file:
path: /etc/ansible/ansible.cfg
diff --git a/tox.ini b/tox.ini
index 6d53f30a..4d9ed626 100644
--- a/tox.ini
+++ b/tox.ini
@@ -14,7 +14,7 @@ commands =
--cover-package=apex \
--cover-xml \
--cover-min-percentage 95 \
- apex/tests
+ {posargs}
coverage report
[testenv:pep8]