summaryrefslogtreecommitdiffstats
path: root/apex
diff options
context:
space:
mode:
Diffstat (limited to 'apex')
-rw-r--r--apex/build.py18
-rw-r--r--apex/build_utils.py50
-rw-r--r--apex/builders/common_builder.py260
-rw-r--r--apex/builders/exceptions.py12
-rw-r--r--apex/builders/overcloud_builder.py141
-rw-r--r--apex/builders/undercloud_builder.py71
-rw-r--r--apex/clean.py8
-rw-r--r--apex/common/constants.py43
-rw-r--r--apex/common/exceptions.py16
-rw-r--r--apex/common/utils.py180
-rw-r--r--apex/deploy.py527
-rw-r--r--apex/deployment/__init__.py0
-rw-r--r--apex/deployment/snapshot.py241
-rw-r--r--apex/deployment/tripleo.py60
-rw-r--r--apex/inventory/inventory.py7
-rw-r--r--apex/network/jumphost.py8
-rw-r--r--apex/network/network_data.py2
-rw-r--r--apex/network/network_environment.py11
-rw-r--r--apex/overcloud/config.py6
-rw-r--r--apex/overcloud/deploy.py493
-rw-r--r--apex/overcloud/node.py147
-rw-r--r--apex/settings/deploy_settings.py20
-rw-r--r--apex/settings/network_settings.py7
-rw-r--r--apex/tests/config/98faaca.diff331
-rw-r--r--apex/tests/config/admin.xml7
-rw-r--r--apex/tests/config/baremetal0.xml73
-rw-r--r--apex/tests/config/common-patches.yaml6
-rw-r--r--apex/tests/config/dummy-deploy-settings.yaml19
-rw-r--r--apex/tests/config/inventory-virt-1-compute-node.yaml14
-rw-r--r--apex/tests/config/node.yaml12
-rw-r--r--apex/tests/config/snapshot.properties2
-rw-r--r--apex/tests/test_apex_build_utils.py27
-rw-r--r--apex/tests/test_apex_common_builder.py227
-rw-r--r--apex/tests/test_apex_common_utils.py62
-rw-r--r--apex/tests/test_apex_deploy.py183
-rw-r--r--apex/tests/test_apex_deployment_snapshot.py374
-rw-r--r--apex/tests/test_apex_deployment_tripleo.py49
-rw-r--r--apex/tests/test_apex_inventory.py7
-rw-r--r--apex/tests/test_apex_network_environment.py7
-rw-r--r--apex/tests/test_apex_network_settings.py3
-rw-r--r--apex/tests/test_apex_overcloud_builder.py67
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py672
-rw-r--r--apex/tests/test_apex_overcloud_node.py191
-rw-r--r--apex/tests/test_apex_undercloud.py178
-rw-r--r--apex/tests/test_apex_virtual_utils.py20
-rw-r--r--apex/undercloud/undercloud.py131
-rw-r--r--apex/utils.py107
-rwxr-xr-xapex/virtual/configure_vm.py9
-rw-r--r--apex/virtual/exceptions.py12
-rw-r--r--apex/virtual/utils.py36
50 files changed, 4672 insertions, 482 deletions
diff --git a/apex/build.py b/apex/build.py
index 08f91abe..6e903814 100644
--- a/apex/build.py
+++ b/apex/build.py
@@ -109,11 +109,15 @@ def unpack_cache(cache_dest, cache_dir=None):
def build(build_root, version, iso=False, rpms=False):
if iso:
- make_targets = ['iso']
+ logging.warning("iso is deprecated. Will not build iso and build rpm "
+ "instead.")
+ make_targets = ['rpm']
elif rpms:
- make_targets = ['rpms']
+ make_targets = ['rpm']
else:
- make_targets = ['images', 'rpms-check']
+ logging.warning("Nothing specified to build, and images are no "
+ "longer supported in Apex. Will only run rpm check")
+ make_targets = ['rpm-check']
if version is not None:
make_args = ['RELEASE={}'.format(version)]
else:
@@ -225,6 +229,7 @@ def main():
console.setLevel(log_level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
+ utils.install_ansible()
# Since we only support building inside of git repo this should be fine
try:
apex_root = subprocess.check_output(
@@ -233,9 +238,7 @@ def main():
logging.error("Must be in an Apex git repo to execute build")
raise
apex_build_root = os.path.join(apex_root, BUILD_ROOT)
- if os.path.isdir(apex_build_root):
- cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
- else:
+ if not os.path.isdir(apex_build_root):
logging.error("You must execute this script inside of the Apex "
"local code repository")
raise ApexBuildException("Invalid path for apex root: {}. Must be "
@@ -244,10 +247,7 @@ def main():
dep_playbook = os.path.join(apex_root,
'lib/ansible/playbooks/build_dependencies.yml')
utils.run_ansible(None, dep_playbook)
- unpack_cache(cache_tmp_dir, args.cache_dir)
build(apex_build_root, args.build_version, args.iso, args.rpms)
- build_cache(cache_tmp_dir, args.cache_dir)
- prune_cache(args.cache_dir)
if __name__ == '__main__':
diff --git a/apex/build_utils.py b/apex/build_utils.py
index c9d8472e..7457e561 100644
--- a/apex/build_utils.py
+++ b/apex/build_utils.py
@@ -27,7 +27,7 @@ def get_change(url, repo, branch, change_id):
:param repo: name of repo
:param branch: branch of repo
:param change_id: SHA change id
- :return: change if found and not abandoned, closed, or merged
+ :return: change if found and not abandoned, closed
"""
rest = GerritRestAPI(url=url)
change_path = "{}~{}~{}".format(quote_plus(repo), quote_plus(branch),
@@ -37,12 +37,8 @@ def get_change(url, repo, branch, change_id):
try:
assert change['status'] not in 'ABANDONED' 'CLOSED', \
'Change {} is in {} state'.format(change_id, change['status'])
- if change['status'] == 'MERGED':
- logging.info('Change {} is merged, ignoring...'
- .format(change_id))
- return None
- else:
- return change
+ logging.debug('Change found: {}'.format(change))
+ return change
except KeyError:
logging.error('Failed to get valid change data structure from url '
@@ -90,6 +86,44 @@ def clone_fork(args):
logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
+def strip_patch_sections(patch, sections=['releasenotes', 'tests']):
+ """
+ Removes patch sections from a diff which contain a file path
+ :param patch: patch to strip
+ :param sections: list of keywords to use to strip out of the patch file
+ :return: stripped patch
+ """
+
+ append_line = True
+ tmp_patch = []
+ for line in patch.split("\n"):
+ if re.match('diff\s', line):
+ for section in sections:
+ if re.search(section, line):
+ logging.debug("Stripping {} from patch: {}".format(
+ section, line))
+ append_line = False
+ break
+ else:
+ append_line = True
+ if append_line:
+ tmp_patch.append(line)
+ return '\n'.join(tmp_patch)
+
+
+def is_path_in_patch(patch, path):
+ """
+ Checks if a particular path is modified in a patch diff
+ :param patch: patch diff
+ :param path: path to check for in diff
+ :return: Boolean
+ """
+ for line in patch.split("\n"):
+ if re.match('^diff.*{}'.format(path), line):
+ return True
+ return False
+
+
def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
logging.info("Fetching patch for change id {}".format(change_id))
change = get_change(url, repo, branch, change_id)
@@ -100,7 +134,7 @@ def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
change_id)
patch_url = "changes/{}/revisions/{}/patch".format(change_path,
current_revision)
- return rest.get(patch_url)
+ return strip_patch_sections(rest.get(patch_url))
def get_parser():
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index fd3bcc3d..59af94cd 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -9,19 +9,30 @@
# Common building utilities for undercloud and overcloud
+import datetime
import git
+import json
import logging
import os
+import platform
+import pprint
+import re
+import urllib.parse
+import yaml
+import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
+from apex.builders import exceptions as exc
from apex.common import constants as con
+from apex.common import utils
from apex.virtual import utils as virt_utils
-def project_to_path(project):
+def project_to_path(project, patch=None):
"""
- Translates project to absolute file path
+ Translates project to absolute file path to use in patching
:param project: name of project
+ :param patch: the patch to applied to the project
:return: File path
"""
if project.startswith('openstack/'):
@@ -30,14 +41,116 @@ def project_to_path(project):
return "/etc/puppet/modules/{}".format(project.replace('puppet-', ''))
elif 'tripleo-heat-templates' in project:
return "/usr/share/openstack-tripleo-heat-templates"
+ elif ('tripleo-common' in project and
+ build_utils.is_path_in_patch(patch, 'container-images/')):
+ # tripleo-common has python and another component to it
+ # here we detect if there is a change to the yaml component and if so
+ # treat it like it is not python. This has the caveat of if there
+ # is a patch to both python and yaml this will not work
+ # FIXME(trozet): add ability to split tripleo-common patches that
+ # modify both python and yaml
+ return "/usr/share/openstack-tripleo-common-containers/"
else:
- # assume python
- return "/usr/lib/python2.7/site-packages/{}".format(project)
+ # assume python. python patches will apply to a project name subdir.
+ # For example, python-tripleoclient patch will apply to the
+ # tripleoclient directory, which is the directory extracted during
+ # python install into the PYTHONPATH. Therefore we need to just be
+ # in the PYTHONPATH directory to apply a patch
+ return "/usr/lib/python2.7/site-packages/"
+
+
+def project_to_docker_image(project, docker_url):
+ """
+ Translates OpenStack project to OOO services that are containerized
+ :param project: short name of OpenStack project
+ :return: List of OOO docker service names
+ """
+ # Fetch all docker containers in docker hub with tripleo and filter
+ # based on project
+ logging.info("Checking for docker images matching project: {}".format(
+ project))
+ hub_output = utils.open_webpage(
+ urllib.parse.urljoin(docker_url,
+ '?page_size=1024'), timeout=10)
+ try:
+ results = json.loads(hub_output.decode())['results']
+ except Exception as e:
+ logging.error("Unable to parse docker hub output for"
+ "tripleoupstream repository")
+ logging.debug("HTTP response from dockerhub:\n{}".format(hub_output))
+ raise exc.ApexCommonBuilderException(
+ "Failed to parse docker image info from Docker Hub: {}".format(e))
+ logging.debug("Docker Hub tripleoupstream entities found: {}".format(
+ results))
+ docker_images = list()
+ for result in results:
+ if result['name'].startswith("centos-binary-{}".format(project)):
+ # add as docker image shortname (just service name)
+ logging.debug("Adding docker image {} for project {} for "
+ "patching".format(result['name'], project))
+ docker_images.append(result['name'].replace('centos-binary-', ''))
+
+ return docker_images
+
+
+def is_patch_promoted(change, branch, docker_url, docker_image=None):
+ """
+ Checks to see if a patch that is in merged exists in either the docker
+ container or the promoted tripleo images
+ :param change: gerrit change json output
+ :param branch: branch to use when polling artifacts (does not include
+ stable prefix)
+ :param docker_image: container this applies to if (defaults to None)
+ :return: True if the patch exists in a promoted artifact upstream
+ """
+ assert isinstance(change, dict)
+ assert 'status' in change
+
+ # if not merged we already know this is not closed/abandoned, so we know
+ # this is not promoted
+ if change['status'] != 'MERGED':
+ return False
+ assert 'submitted' in change
+ # drop microseconds cause who cares
+ stime = re.sub('\..*$', '', change['submitted'])
+ submitted_date = datetime.datetime.strptime(stime, "%Y-%m-%d %H:%M:%S")
+ # Patch applies to overcloud/undercloud
+ if docker_image is None:
+ oc_url = urllib.parse.urljoin(
+ con.UPSTREAM_RDO.replace('master', branch), 'overcloud-full.tar')
+ oc_mtime = utils.get_url_modified_date(oc_url)
+ if oc_mtime > submitted_date:
+ logging.debug("oc image was last modified at {}, which is"
+ "newer than merge date: {}".format(oc_mtime,
+ submitted_date))
+ return True
+ else:
+ # must be a docker patch, check docker tag modified time
+ docker_url = docker_url.replace('tripleomaster',
+ "tripleo{}".format(branch))
+ url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
+ docker_url = urllib.parse.urljoin(docker_url, url_path)
+ logging.debug("docker url is: {}".format(docker_url))
+ docker_output = utils.open_webpage(docker_url, 10)
+ logging.debug('Docker web output: {}'.format(docker_output))
+ hub_mtime = json.loads(docker_output.decode())['last_updated']
+ hub_mtime = re.sub('\..*$', '', hub_mtime)
+ # docker modified time is in this format '2018-06-11T15:23:55.135744Z'
+ # and we drop microseconds
+ hub_dtime = datetime.datetime.strptime(hub_mtime, "%Y-%m-%dT%H:%M:%S")
+ if hub_dtime > submitted_date:
+ logging.debug("docker image: {} was last modified at {}, which is"
+ "newer than merge date: {}".format(docker_image,
+ hub_dtime,
+ submitted_date))
+ return True
+ return False
def add_upstream_patches(patches, image, tmp_dir,
default_branch=os.path.join('stable',
- con.DEFAULT_OS_VERSION)):
+ con.DEFAULT_OS_VERSION),
+ uc_ip=None, docker_tag=None):
"""
Adds patches from upstream OpenStack gerrit to Undercloud for deployment
:param patches: list of patches
@@ -45,10 +158,13 @@ def add_upstream_patches(patches, image, tmp_dir,
:param tmp_dir: to store temporary patch files
:param default_branch: default branch to fetch commit (if not specified
in patch)
- :return: None
+ :param uc_ip: undercloud IP (required only for docker patches)
+ :param docker_tag: Docker Tag (required only for docker patches)
+ :return: Set of docker services patched (if applicable)
"""
virt_ops = [{con.VIRT_INSTALL: 'patch'}]
logging.debug("Evaluating upstream patches:\n{}".format(patches))
+ docker_services = set()
for patch in patches:
assert isinstance(patch, dict)
assert all(i in patch.keys() for i in ['project', 'change-id'])
@@ -58,23 +174,93 @@ def add_upstream_patches(patches, image, tmp_dir,
branch = default_branch
patch_diff = build_utils.get_patch(patch['change-id'],
patch['project'], branch)
- if patch_diff:
+ project_path = project_to_path(patch['project'], patch_diff)
+ # If docker tag and python we know this patch belongs on docker
+ # container for a docker service. Therefore we build the dockerfile
+ # and move the patch into the containers directory. We also assume
+ # this builder call is for overcloud, because we do not support
+ # undercloud containers
+ if platform.machine() == 'aarch64':
+ docker_url = con.DOCKERHUB_AARCH64
+ else:
+ docker_url = con.DOCKERHUB_OOO
+ if docker_tag and 'python' in project_path:
+ # Projects map to multiple THT services, need to check which
+ # are supported
+ project_short_name = os.path.basename(patch['project'])
+ ooo_docker_services = project_to_docker_image(project_short_name,
+ docker_url)
+ if not ooo_docker_services:
+ logging.error("Did not find any matching docker containers "
+ "for project: {}".format(project_short_name))
+ raise exc.ApexCommonBuilderException(
+ 'Unable to find docker services for python project in '
+ 'patch')
+ # Just use the first image to see if patch was promoted into it
+ docker_img = ooo_docker_services[0]
+ else:
+ ooo_docker_services = []
+ docker_img = None
+ change = build_utils.get_change(con.OPENSTACK_GERRIT,
+ patch['project'], branch,
+ patch['change-id'])
+ patch_promoted = is_patch_promoted(change,
+ branch.replace('stable/', ''),
+ docker_url,
+ docker_img)
+
+ if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
- patch_file_path = os.path.join(tmp_dir, patch_file)
- with open(patch_file_path, 'w') as fh:
- fh.write(patch_diff)
- project_path = project_to_path(patch['project'])
- virt_ops.extend([
- {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
- project_path)},
- {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
- project_path, patch_file)}])
- logging.info("Adding patch {} to {}".format(patch_file,
- image))
+ patch_file_paths = []
+ # If we found services, then we treat the patch like it applies to
+ # docker only
+ if ooo_docker_services:
+ os_version = default_branch.replace('stable/', '')
+ for service in ooo_docker_services:
+ docker_services = docker_services.union({service})
+ # We need to go root to be able to install patch and then
+ # switch back to previous user. Some containers that
+ # have the same name as the project do not necessarily
+ # contain the project code. For example
+ # novajoin-notifier does not contain nova package code.
+ # Therefore we must try to patch and unfortunately
+ # ignore failures until we have a better way of checking
+ # this
+ docker_cmds = [
+ "WORKDIR {}".format(project_path),
+ "USER root",
+ "ARG REAL_USER",
+ "RUN yum -y install patch",
+ "ADD {} {}".format(patch_file, project_path),
+ "RUN patch -p1 < {} || echo "
+ "'Patching failed'".format(patch_file),
+ "USER $REAL_USER"
+ ]
+ src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
+ "{}".format(uc_ip, os_version, service,
+ docker_tag)
+ oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
+ src_img_uri)
+ patch_file_paths.append(os.path.join(
+ tmp_dir, "containers/{}".format(service), patch_file))
+ else:
+ patch_file_path = os.path.join(tmp_dir, patch_file)
+ virt_ops.extend([
+ {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+ project_path)},
+ {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+ project_path, patch_file)}])
+ logging.info("Adding patch {} to {}".format(patch_file,
+ image))
+ patch_file_paths.append(patch_file_path)
+ for patch_fp in patch_file_paths:
+ with open(patch_fp, 'w') as fh:
+ fh.write(patch_diff)
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
virt_utils.virt_customize(virt_ops, image)
+ return docker_services
def add_repo(repo_url, repo_name, image, tmp_dir):
@@ -109,3 +295,41 @@ def create_git_archive(repo_url, repo_name, tmp_dir,
repo.archive(fh, prefix=prefix)
logging.debug("Wrote archive file: {}".format(archive_path))
return archive_path
+
+
+def get_neutron_driver(ds_opts):
+ sdn = ds_opts.get('sdn_controller', None)
+
+ if sdn == 'opendaylight':
+ return 'odl'
+ elif sdn == 'ovn':
+ return sdn
+ elif ds_opts.get('vpp', False):
+ return 'vpp'
+ else:
+ return None
+
+
+def prepare_container_images(prep_file, branch='master', neutron_driver=None):
+ if not os.path.isfile(prep_file):
+ raise exc.ApexCommonBuilderException("Prep file does not exist: "
+ "{}".format(prep_file))
+ with open(prep_file) as fh:
+ data = yaml.safe_load(fh)
+ try:
+ p_set = data['parameter_defaults']['ContainerImagePrepare'][0]['set']
+ if neutron_driver:
+ p_set['neutron_driver'] = neutron_driver
+ p_set['namespace'] = "docker.io/tripleo{}".format(branch)
+ if platform.machine() == 'aarch64':
+ p_set['namespace'] = "docker.io/armbandapex"
+ p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
+
+ except KeyError:
+ logging.error("Invalid prep file format: {}".format(prep_file))
+ raise exc.ApexCommonBuilderException("Invalid format for prep file")
+
+ logging.debug("Writing new container prep file:\n{}".format(
+ pprint.pformat(data)))
+ with open(prep_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
diff --git a/apex/builders/exceptions.py b/apex/builders/exceptions.py
new file mode 100644
index 00000000..b88f02bf
--- /dev/null
+++ b/apex/builders/exceptions.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexCommonBuilderException(Exception):
+ pass
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index e7b07963..eab8fb66 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -10,17 +10,27 @@
# Used to modify overcloud qcow2 image
import logging
+import os
+import tarfile
-from apex.builders import common_builder as c_builder
+import apex.builders.common_builder
from apex.common import constants as con
+from apex.common import utils as utils
+from apex.common.exceptions import ApexBuildException
from apex.virtual import utils as virt_utils
-def inject_opendaylight(odl_version, image, tmp_dir):
+def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
+ os_version, docker_tag=None):
assert odl_version in con.VALID_ODL_VERSIONS
# add repo
if odl_version == 'master':
+ # last version in the constants is "master" so select 2nd to last
+ # odl package version has no "master" version
odl_pkg_version = con.VALID_ODL_VERSIONS[-2]
+ # branch will be used to pull puppet-opendaylight. Since puppet-odl
+ # does not pull branch until later, we need to use master version of
+ # that if master ODL version is specified
branch = odl_version
else:
odl_pkg_version = odl_version
@@ -28,18 +38,137 @@ def inject_opendaylight(odl_version, image, tmp_dir):
odl_url = "https://nexus.opendaylight.org/content/repositories" \
"/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version)
repo_name = "opendaylight-{}".format(odl_pkg_version)
- c_builder.add_repo(odl_url, repo_name, image, tmp_dir)
+ apex.builders.common_builder.add_repo(odl_url, repo_name, image, tmp_dir)
# download puppet-opendaylight
- archive = c_builder.create_git_archive(
+ archive = apex.builders.common_builder.create_git_archive(
repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight',
tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/')
# install ODL, puppet-odl
virt_ops = [
- {con.VIRT_INSTALL: 'opendaylight'},
{con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
{con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
- "puppet-opendaylight.tar"}
+ "puppet-opendaylight.tar"},
+ {con.VIRT_INSTALL: "java-1.8.0-openjdk"}
]
+ if docker_tag:
+ docker_cmds = [
+ "RUN yum remove opendaylight -y",
+ "RUN echo $'[opendaylight]\\n\\",
+ "baseurl={}\\n\\".format(odl_url),
+ "gpgcheck=0\\n\\",
+ "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+ "RUN yum -y install opendaylight"
+ ]
+ src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
+ "{}".format(uc_ip, os_version, 'opendaylight',
+ docker_tag)
+ build_dockerfile('opendaylight', tmp_dir, docker_cmds, src_img_uri)
+ else:
+ virt_ops.append({con.VIRT_INSTALL: 'opendaylight'})
virt_utils.virt_customize(virt_ops, image)
logging.info("OpenDaylight injected into {}".format(image))
+
+
+def inject_quagga(image, tmp_dir):
+ """
+ Downloads quagga tarball from artifacts.opnfv.org
+ and install it on the overcloud image on the fly.
+ :param image:
+ :param tmp_dir:
+ :return:
+ """
+ utils.fetch_upstream_and_unpack(tmp_dir,
+ os.path.split(con.QUAGGA_URL)[0] + "/",
+ [os.path.basename(con.QUAGGA_URL)])
+
+ virt_ops = [
+ {con.VIRT_UPLOAD: "{}/quagga-4.tar.gz:/root/".format(tmp_dir)},
+ {con.VIRT_RUN_CMD: "cd /root/ && tar xzf quagga-4.tar.gz"},
+ {con.VIRT_RUN_CMD: "cd /root/quagga;packages=$(ls |grep -vE 'debug"
+ "info|devel|contrib');yum -y install $packages"}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+ logging.info("Quagga injected into {}".format(image))
+
+
+def inject_ovs_nsh(image, tmp_dir):
+ """
+ Downloads OpenVswitch, compiles it and installs it on the
+ overcloud image on the fly.
+ :param image:
+ :param tmp_dir:
+ :return:
+ """
+ ovs_filename = os.path.basename(con.OVS_URL)
+ ovs_folder = ovs_filename.replace(".tar.gz", "")
+ utils.fetch_upstream_and_unpack(tmp_dir,
+ os.path.split(con.OVS_URL)[0] + "/",
+ [ovs_filename])
+ (ovs_dist_name, ovs_version) = ovs_folder.split("-")
+
+ virt_ops = [
+ {con.VIRT_UPLOAD: "{}:/root/".format(tmp_dir + "/" + ovs_filename)},
+ {con.VIRT_INSTALL: "rpm-build,autoconf,automake,libtool,openssl,"
+ "openssl-devel,python,python-twisted-core,python-six,groff,graphviz,"
+ "python-zope-interface,desktop-file-utils,procps-ng,PyQt4,"
+ "libcap-ng,libcap-ng-devel,selinux-policy-devel,kernel-devel,"
+ "kernel-headers,kernel-tools,rpmdevtools,systemd-units,python-devel,"
+ "python-sphinx"},
+ {con.VIRT_RUN_CMD: "cd /root/ && tar xzf {}".format(ovs_filename)},
+ {con.VIRT_UPLOAD:
+ "{}/build_ovs_nsh.sh:/root/{}".format(tmp_dir, ovs_folder)},
+ {con.VIRT_RUN_CMD:
+ "cd /root/{0} && chmod -R 777 * && chown -R root:root * && "
+ "./build_ovs_nsh.sh && rpm -Uhv --force rpm/rpmbuild/RPMS/x86_64/{0}"
+ "-1.el7.x86_64.rpm && rpm -Uhv --force rpm/rpmbuild/RPMS/x86_64"
+ "/openvswitch-kmod-{1}-1.el7.x86_64.rpm".format(ovs_folder,
+ ovs_version)}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+ logging.info("OVS injected into {}".format(image))
+
+
+def build_dockerfile(service, tmp_dir, docker_cmds, src_image_uri):
+ """
+ Builds docker file per service and stores it in a
+ tmp_dir/containers/<service> directory. If the Dockerfile already exists,
+ simply append the docker cmds to it.
+ :param service: name of sub-directory to store Dockerfile in
+ :param tmp_dir: Temporary directory to store the container's dockerfile in
+ :param docker_cmds: List of commands to insert into the dockerfile
+ :param src_image_uri: Docker URI format for where the source image exists
+ :return: None
+ """
+ logging.debug("Building Dockerfile for {} with docker_cmds: {}".format(
+ service, docker_cmds))
+ c_dir = os.path.join(tmp_dir, 'containers')
+ service_dir = os.path.join(c_dir, service)
+ if not os.path.isdir(service_dir):
+ os.makedirs(service_dir, exist_ok=True)
+ from_cmd = "FROM {}\n".format(src_image_uri)
+ service_file = os.path.join(service_dir, 'Dockerfile')
+ assert isinstance(docker_cmds, list)
+ if os.path.isfile(service_file):
+ append_cmds = True
+ else:
+ append_cmds = False
+ with open(service_file, "a+") as fh:
+ if not append_cmds:
+ fh.write(from_cmd)
+ fh.write('\n'.join(docker_cmds))
+
+
+def archive_docker_patches(tmp_dir):
+ """
+ Archives Overcloud docker patches into a tar file for upload to Undercloud
+ :param tmp_dir: temporary directory where containers folder is stored
+ :return: None
+ """
+ container_path = os.path.join(tmp_dir, 'containers')
+ if not os.path.isdir(container_path):
+ raise ApexBuildException("Docker directory for patches not found: "
+ "{}".format(container_path))
+ archive_file = os.path.join(tmp_dir, 'docker_patches.tar.gz')
+ with tarfile.open(archive_file, "w:gz") as tar:
+ tar.add(container_path, arcname=os.path.basename(container_path))
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index baba8a55..47d2568d 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -8,8 +8,13 @@
##############################################################################
# Used to modify undercloud qcow2 image
+import logging
+import json
+import os
+import subprocess
from apex.common import constants as con
+from apex.common import utils
from apex.virtual import utils as virt_utils
@@ -21,18 +26,82 @@ def add_upstream_packages(image):
"""
virt_ops = list()
pkgs = [
+ 'epel-release',
'openstack-utils',
- 'ceph-common',
'python2-networking-sfc',
'openstack-ironic-inspector',
'subunit-filters',
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
+ 'python-tripleoclient',
+ 'openstack-tripleo-heat-templates'
]
+ # Remove incompatible python-docker version
+ virt_ops.append({con.VIRT_RUN_CMD: "yum remove -y python-docker-py"})
for pkg in pkgs:
virt_ops.append({con.VIRT_INSTALL: pkg})
virt_utils.virt_customize(virt_ops, image)
+
+def inject_calipso_installer(tmp_dir, image):
+ """
+ Downloads calipso installer script from artifacts.opnfv.org
+ and puts it under /root/ for further installation process.
+ :return:
+ """
+ calipso_file = os.path.basename(con.CALIPSO_INSTALLER_URL)
+ calipso_url = con.CALIPSO_INSTALLER_URL.replace(calipso_file, '')
+ utils.fetch_upstream_and_unpack(tmp_dir, calipso_url, [calipso_file])
+
+ virt_ops = [
+ {con.VIRT_UPLOAD: "{}/{}:/root/".format(tmp_dir, calipso_file)}]
+ virt_utils.virt_customize(virt_ops, image)
+ logging.info("Calipso injected into {}".format(image))
+
+# TODO(trozet): add unit testing for calipso injector
# TODO(trozet): add rest of build for undercloud here as well
+
+
+def update_repos(image, branch):
+ virt_ops = [
+ {con.VIRT_RUN_CMD: "rm -f /etc/yum.repos.d/delorean*"},
+ {con.VIRT_RUN_CMD: "yum-config-manager --add-repo "
+ "https://trunk.rdoproject.org/centos7/{}"
+ "/delorean.repo".format(con.RDO_TAG)},
+ {con.VIRT_RUN_CMD: "yum clean all"},
+ {con.VIRT_INSTALL: "python2-tripleo-repos"},
+ {con.VIRT_RUN_CMD: "tripleo-repos -b {} {} ceph".format(branch,
+ con.RDO_TAG)}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+
+
+def expand_disk(image, desired_size=50):
+ """
+ Expands a disk image to desired_size in GigaBytes
+ :param image: image to resize
+ :param desired_size: desired size in GB
+ :return: None
+ """
+ # there is a lib called vminspect which has some dependencies and is
+ # not yet available in pip. Consider switching to this lib later.
+ try:
+ img_out = json.loads(subprocess.check_output(
+ ['qemu-img', 'info', '--output=json', image],
+ stderr=subprocess.STDOUT).decode())
+ disk_gb_size = int(img_out['virtual-size'] / 1000000000)
+ if disk_gb_size < desired_size:
+ logging.info("Expanding disk image: {}. Current size: {} is less"
+ "than require size: {}".format(image, disk_gb_size,
+ desired_size))
+ diff_size = desired_size - disk_gb_size
+ subprocess.check_call(['qemu-img', 'resize', image,
+ "+{}G".format(diff_size)],
+ stderr=subprocess.STDOUT)
+
+ except (subprocess.CalledProcessError, json.JSONDecodeError, KeyError) \
+ as e:
+ logging.warning("Unable to resize disk, disk may not be large "
+ "enough: {}".format(e))
diff --git a/apex/clean.py b/apex/clean.py
index f56287e1..3e33c8e4 100644
--- a/apex/clean.py
+++ b/apex/clean.py
@@ -114,7 +114,13 @@ def clean_networks():
logging.debug("Destroying virsh network: {}".format(network))
if virsh_net.isActive():
virsh_net.destroy()
- virsh_net.undefine()
+ try:
+ virsh_net.undefine()
+ except libvirt.libvirtError as e:
+ if 'Network not found' in e.get_error_message():
+ logging.debug('Network already undefined')
+ else:
+ raise
def main():
diff --git a/apex/common/constants.py b/apex/common/constants.py
index a2b9a634..59988f74 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -16,7 +16,7 @@ STORAGE_NETWORK = 'storage'
API_NETWORK = 'api'
CONTROLLER = 'controller'
COMPUTE = 'compute'
-
+ANSIBLE_PATH = 'ansible/playbooks'
OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
STORAGE_NETWORK, API_NETWORK]
DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
@@ -39,14 +39,43 @@ VIRT_PW = '--root-password'
THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
+THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services')
-DEFAULT_OS_VERSION = 'pike'
-DEFAULT_ODL_VERSION = 'nitrogen'
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+DEFAULT_OS_VERSION = 'master'
+DEFAULT_ODL_VERSION = 'oxygen'
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'fluorine',
+ 'neon', 'master']
PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
'/puppet-opendaylight'
DEBUG_OVERCLOUD_PW = 'opnfvapex'
NET_ENV_FILE = 'network-environment.yaml'
-DEPLOY_TIMEOUT = 90
-UPSTREAM_RDO = 'https://images.rdoproject.org/pike/delorean/current-tripleo/'
-OPENSTACK_GERRIT = 'https://review.openstack.org'
+DEPLOY_TIMEOUT = 120
+RDO_TAG = 'current-tripleo'
+UPSTREAM_RDO = "https://images.rdoproject.org/master/rdo_trunk/{}/".format(
+ RDO_TAG)
+OPENSTACK_GERRIT = 'https://review.opendev.org'
+
+DOCKER_TAG = RDO_TAG
+# Maps regular service files to docker versions
+# None value means mapping is same as key
+VALID_DOCKER_SERVICES = {
+ 'neutron-opendaylight.yaml': None,
+ 'neutron-opendaylight-dpdk.yaml': None,
+ 'neutron-opendaylight-sriov.yaml': None,
+ 'neutron-bgpvpn-opendaylight.yaml': None,
+ 'neutron-sfc-opendaylight.yaml': None,
+ 'neutron-ml2-ovn.yaml': 'neutron-ovn-ha.yaml'
+}
+DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \
+ '/tripleomaster/'
+DOCKERHUB_AARCH64 = 'https://registry.hub.docker.com/v2/repositories' \
+ '/armbandapex/'
+KUBESPRAY_URL = 'https://github.com/kubernetes-incubator/kubespray.git'
+OPNFV_ARTIFACTS = 'http://storage.googleapis.com/artifacts.opnfv.org'
+CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \
+ 'rpm'.format(OPNFV_ARTIFACTS)
+
+OVS_URL = "http://openvswitch.org/releases/openvswitch-2.9.2.tar.gz"
+QUAGGA_URL = "{}/sdnvpn/quagga/quagga-4.tar.gz".format(OPNFV_ARTIFACTS)
+CALIPSO_INSTALLER_URL = "https://raw.githubusercontent.com/opnfv/calipso" \
+ "/master/app/install/calipso-installer.py"
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
index 54d99834..6d8383b8 100644
--- a/apex/common/exceptions.py
+++ b/apex/common/exceptions.py
@@ -18,3 +18,19 @@ class JumpHostNetworkException(Exception):
class ApexCleanException(Exception):
pass
+
+
+class ApexBuildException(Exception):
+ pass
+
+
+class SnapshotDeployException(Exception):
+ pass
+
+
+class OvercloudNodeException(Exception):
+ pass
+
+
+class FetchException(Exception):
+ pass
diff --git a/apex/common/utils.py b/apex/common/utils.py
index 13250a45..72a66d10 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -8,10 +8,12 @@
##############################################################################
import datetime
+import distro
import json
import logging
import os
import pprint
+import socket
import subprocess
import tarfile
import time
@@ -20,6 +22,8 @@ import urllib.request
import urllib.parse
import yaml
+from apex.common import exceptions as exc
+
def str2bool(var):
if isinstance(var, bool):
@@ -71,12 +75,17 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
Executes ansible playbook and checks for errors
:param ansible_vars: dictionary of variables to inject into ansible run
:param playbook: playbook to execute
+ :param host: inventory file or string of target hosts
+ :param user: remote user to run ansible tasks
:param tmp_dir: temp directory to store ansible command
:param dry_run: Do not actually apply changes
:return: None
"""
logging.info("Executing ansible playbook: {}".format(playbook))
- inv_host = "{},".format(host)
+ if not os.path.isfile(host):
+ inv_host = "{},".format(host)
+ else:
+ inv_host = host
if host == 'localhost':
conn_type = 'local'
else:
@@ -137,45 +146,66 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
raise Exception(e)
-def fetch_upstream_and_unpack(dest, url, targets):
+def get_url_modified_date(url):
+ """
+ Returns the last modified date for an Tripleo image artifact
+ :param url: URL to examine
+ :return: datetime object of when artifact was last modified
+ """
+ try:
+ u = urllib.request.urlopen(url)
+ except urllib.error.URLError as e:
+ logging.error("Failed to fetch target url. Error: {}".format(
+ e.reason))
+ raise
+
+ metadata = u.info()
+ headers = metadata.items()
+ for header in headers:
+ if isinstance(header, tuple) and len(header) == 2:
+ if header[0] == 'Last-Modified':
+ return datetime.datetime.strptime(header[1],
+ "%a, %d %b %Y %X GMT")
+
+
+def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
"""
Fetches targets from a url destination and downloads them if they are
newer. Also unpacks tar files in dest dir.
:param dest: Directory to download and unpack files to
:param url: URL where target files are located
:param targets: List of target files to download
+ :param fetch: Whether or not to fetch latest from internet (boolean)
:return: None
"""
os.makedirs(dest, exist_ok=True)
assert isinstance(targets, list)
for target in targets:
- download_target = True
target_url = urllib.parse.urljoin(url, target)
target_dest = os.path.join(dest, target)
- logging.debug("Fetching and comparing upstream target: \n{}".format(
- target_url))
- try:
- u = urllib.request.urlopen(target_url)
- except urllib.error.URLError as e:
- logging.error("Failed to fetch target url. Error: {}".format(
- e.reason))
- raise
- if os.path.isfile(target_dest):
+ target_exists = os.path.isfile(target_dest)
+ if fetch:
+ download_target = True
+ elif not target_exists:
+ logging.warning("no-fetch requested but target: {} is not "
+ "cached, will download".format(target_dest))
+ download_target = True
+ else:
+ logging.info("no-fetch requested and previous cache exists for "
+ "target: {}. Will skip download".format(target_dest))
+ download_target = False
+
+ if download_target:
+ logging.debug("Fetching and comparing upstream"
+ " target: \n{}".format(target_url))
+ # Check if previous file and fetch we need to compare files to
+ # determine if download is necessary
+ if target_exists and download_target:
logging.debug("Previous file found: {}".format(target_dest))
- metadata = u.info()
- headers = metadata.items()
- target_url_date = None
- for header in headers:
- if isinstance(header, tuple) and len(header) == 2:
- if header[0] == 'Last-Modified':
- target_url_date = header[1]
- break
+ target_url_date = get_url_modified_date(target_url)
if target_url_date is not None:
target_dest_mtime = os.path.getmtime(target_dest)
- target_url_mtime = time.mktime(
- datetime.datetime.strptime(target_url_date,
- "%a, %d %b %Y %X "
- "GMT").timetuple())
+ target_url_mtime = time.mktime(target_url_date.timetuple())
if target_url_mtime > target_dest_mtime:
logging.debug('URL target is newer than disk...will '
'download')
@@ -184,11 +214,111 @@ def fetch_upstream_and_unpack(dest, url, targets):
download_target = False
else:
logging.debug('Unable to find last modified url date')
+
if download_target:
urllib.request.urlretrieve(target_url, filename=target_dest)
logging.info("Target downloaded: {}".format(target))
- if target.endswith('.tar'):
+ if target.endswith(('.tar', 'tar.gz', 'tgz')):
logging.info('Unpacking tar file')
tar = tarfile.open(target_dest)
tar.extractall(path=dest)
tar.close()
+
+
+def install_ansible():
+ # we only install for CentOS/Fedora for now
+ dist = distro.id()
+ if 'centos' in dist:
+ pkg_mgr = 'yum'
+ elif 'fedora' in dist:
+ pkg_mgr = 'dnf'
+ else:
+ return
+
+ # yum python module only exists for 2.x, so use subprocess
+ try:
+ subprocess.check_call([pkg_mgr, '-y', 'install', 'ansible'])
+ except subprocess.CalledProcessError:
+ logging.warning('Unable to install Ansible')
+
+
+def internet_connectivity():
+ try:
+ urllib.request.urlopen('http://opnfv.org', timeout=3)
+ return True
+ except (urllib.request.URLError, socket.timeout):
+ logging.debug('No internet connectivity detected')
+ return False
+
+
+def open_webpage(url, timeout=5):
+ try:
+ response = urllib.request.urlopen(url, timeout=timeout)
+ return response.read()
+ except (urllib.request.URLError, socket.timeout) as e:
+ logging.error("Unable to open URL: {}".format(url))
+ raise exc.FetchException('Unable to open URL') from e
+
+
+def edit_tht_env(env_file, section, settings):
+ assert isinstance(settings, dict)
+ with open(env_file) as fh:
+ data = yaml.safe_load(fh)
+
+ if section not in data.keys():
+ data[section] = {}
+ for setting, value in settings.items():
+ data[section][setting] = value
+ with open(env_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+ logging.debug("Data written to env file {}:\n{}".format(env_file, data))
+
+
+def unique(tmp_list):
+ assert isinstance(tmp_list, list)
+ uniq_list = []
+ for x in tmp_list:
+ if x not in uniq_list:
+ uniq_list.append(x)
+ return uniq_list
+
+
+def bash_settings_to_dict(data):
+ """
+ Parses bash settings x=y and returns dict of key, values
+ :param data: bash settings data in x=y format
+ :return: dict of keys and values
+ """
+ return dict(item.split('=') for item in data.splitlines())
+
+
+def fetch_properties(url):
+ """
+ Downloads OPNFV properties and returns a dictionary of the key, values
+ :param url: URL of properties file
+ :return: dict of k,v for each properties
+ """
+ if bool(urllib.parse.urlparse(url).scheme):
+ logging.debug('Fetching properties from internet: {}'.format(url))
+ return bash_settings_to_dict(open_webpage(url).decode('utf-8'))
+ elif os.path.isfile(url):
+ logging.debug('Fetching properties from file: {}'.format(url))
+ with open(url, 'r') as fh:
+ data = fh.read()
+ return bash_settings_to_dict(data)
+ else:
+ logging.warning('Unable to fetch properties for: {}'.format(url))
+ raise exc.FetchException('Unable determine properties location: '
+ '{}'.format(url))
+
+
+def find_container_client(os_version):
+ """
+ Determines whether to use docker or podman client
+ :param os_version: openstack version
+ :return: client name as string
+ """
+ if os_version == 'rocky' or os_version == 'queens':
+ return 'docker'
+ else:
+ return 'podman'
diff --git a/apex/deploy.py b/apex/deploy.py
index 5485d150..d0c2b208 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -10,6 +10,7 @@
##############################################################################
import argparse
+import git
import json
import logging
import os
@@ -18,20 +19,23 @@ import pprint
import shutil
import sys
import tempfile
+import yaml
import apex.virtual.configure_vm as vm_lib
import apex.virtual.utils as virt_utils
+import apex.builders.common_builder as c_builder
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.undercloud_builder as uc_builder
from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
from apex import NetworkSettings
-from apex.builders import common_builder as c_builder
-from apex.builders import overcloud_builder as oc_builder
-from apex.builders import undercloud_builder as uc_builder
+from apex.deployment.snapshot import SnapshotDeployment
from apex.common import utils
from apex.common import constants
from apex.common import parsers
from apex.common.exceptions import ApexDeployException
+from apex.deployment.tripleo import ApexDeployment
from apex.network import jumphost
from apex.network import network_data
from apex.undercloud import undercloud as uc_lib
@@ -39,13 +43,13 @@ from apex.overcloud import config as oc_cfg
from apex.overcloud import deploy as oc_deploy
APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
-ANSIBLE_PATH = 'ansible/playbooks'
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
-
-
-def deploy_quickstart(args, deploy_settings_file, network_settings_file,
- inventory_file=None):
- pass
+UC_DISK_FILES = [
+ 'overcloud-full.vmlinuz',
+ 'overcloud-full.initrd',
+ 'ironic-python-agent.initramfs',
+ 'ironic-python-agent.kernel'
+]
def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -113,7 +117,7 @@ def create_deploy_parser():
help='File which contains Apex deploy settings')
deploy_parser.add_argument('-n', '--network-settings',
dest='network_settings_file',
- required=True,
+ required=False,
help='File which contains Apex network '
'settings')
deploy_parser.add_argument('-i', '--inventory-file',
@@ -174,13 +178,29 @@ def create_deploy_parser():
default='/usr/share/opnfv-apex',
help='Directory path for apex ansible '
'and third party libs')
- deploy_parser.add_argument('--quickstart', action='store_true',
+ deploy_parser.add_argument('-s', '--snapshot', action='store_true',
default=False,
- help='Use tripleo-quickstart to deploy')
+ help='Use snapshots for deployment')
+ deploy_parser.add_argument('--snap-cache', dest='snap_cache',
+ default="{}/snap_cache".format(
+ os.path.expanduser('~')),
+ help='Local directory to cache snapshot '
+ 'artifacts. Defaults to $HOME/snap_cache')
deploy_parser.add_argument('--upstream', action='store_true',
- default=False,
+ default=True,
help='Force deployment to use upstream '
- 'artifacts')
+ 'artifacts. This option is now '
+ 'deprecated and only upstream '
+ 'deployments are supported.')
+ deploy_parser.add_argument('--no-fetch', action='store_true',
+ default=False,
+ help='Ignore fetching latest upstream and '
+ 'use what is in cache')
+ deploy_parser.add_argument('-p', '--patches',
+ default='/etc/opnfv-apex/common-patches.yaml',
+ dest='patches_file',
+ help='File to include for common patches '
+ 'which apply to all deployment scenarios')
return deploy_parser
@@ -192,20 +212,25 @@ def validate_deploy_args(args):
"""
logging.debug('Validating arguments for deployment')
- if args.virtual and args.inventory_file is not None:
+ if args.snapshot:
+ logging.debug('Skipping inventory validation as it is not applicable'
+ 'to snapshot deployments')
+ elif args.virtual and args.inventory_file is not None:
logging.error("Virtual enabled but inventory file also given")
raise ApexDeployException('You should not specify an inventory file '
'with virtual deployments')
elif args.virtual:
args.inventory_file = os.path.join(APEX_TEMP_DIR,
'inventory-virt.yaml')
- elif os.path.isfile(args.inventory_file) is False:
+ elif not os.path.isfile(args.inventory_file):
logging.error("Specified inventory file does not exist: {}".format(
args.inventory_file))
raise ApexDeployException('Specified inventory file does not exist')
for settings_file in (args.deploy_settings_file,
args.network_settings_file):
+ if settings_file == args.network_settings_file and args.snapshot:
+ continue
if os.path.isfile(settings_file) is False:
logging.error("Specified settings file does not "
"exist: {}".format(settings_file))
@@ -234,74 +259,99 @@ def main():
console.setLevel(log_level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
+ utils.install_ansible()
validate_deploy_args(args)
# Parse all settings
deploy_settings = DeploySettings(args.deploy_settings_file)
logging.info("Deploy settings are:\n {}".format(pprint.pformat(
- deploy_settings)))
- net_settings = NetworkSettings(args.network_settings_file)
- logging.info("Network settings are:\n {}".format(pprint.pformat(
- net_settings)))
- os_version = deploy_settings['deploy_options']['os_version']
- net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
- net_env = NetworkEnvironment(net_settings, net_env_file,
- os_version=os_version)
- net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
- utils.dump_yaml(dict(net_env), net_env_target)
- ha_enabled = deploy_settings['global_params']['ha_enabled']
- if args.virtual:
- if args.virt_compute_ram is None:
- compute_ram = args.virt_default_ram
- else:
- compute_ram = args.virt_compute_ram
- if deploy_settings['deploy_options']['sdn_controller'] == \
- 'opendaylight' and args.virt_default_ram < 12:
- control_ram = 12
- logging.warning('RAM per controller is too low. OpenDaylight '
- 'requires at least 12GB per controller.')
- logging.info('Increasing RAM per controller to 12GB')
- elif args.virt_default_ram < 10:
- control_ram = 10
- logging.warning('RAM per controller is too low. nosdn '
- 'requires at least 10GB per controller.')
- logging.info('Increasing RAM per controller to 10GB')
- else:
- control_ram = args.virt_default_ram
- if ha_enabled and args.virt_compute_nodes < 2:
- logging.debug('HA enabled, bumping number of compute nodes to 2')
- args.virt_compute_nodes = 2
- virt_utils.generate_inventory(args.inventory_file, ha_enabled,
- num_computes=args.virt_compute_nodes,
- controller_ram=control_ram * 1024,
- compute_ram=compute_ram * 1024,
- vcpus=args.virt_cpus
- )
- inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
-
- validate_cross_settings(deploy_settings, net_settings, inventory)
+ deploy_settings)))
+
+ if not args.snapshot:
+ net_settings = NetworkSettings(args.network_settings_file)
+ logging.info("Network settings are:\n {}".format(pprint.pformat(
+ net_settings)))
+ os_version = deploy_settings['deploy_options']['os_version']
+ net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+ net_env = NetworkEnvironment(net_settings, net_env_file,
+ os_version=os_version)
+ net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+ utils.dump_yaml(dict(net_env), net_env_target)
+
+ # get global deploy params
+ ha_enabled = deploy_settings['global_params']['ha_enabled']
+ introspect = deploy_settings['global_params'].get('introspect', True)
+ net_list = net_settings.enabled_network_list
+ if args.virtual:
+ if args.virt_compute_ram is None:
+ compute_ram = args.virt_default_ram
+ else:
+ compute_ram = args.virt_compute_ram
+ if (deploy_settings['deploy_options']['sdn_controller'] ==
+ 'opendaylight' and args.virt_default_ram < 12):
+ control_ram = 12
+ logging.warning('RAM per controller is too low. OpenDaylight '
+ 'requires at least 12GB per controller.')
+ logging.info('Increasing RAM per controller to 12GB')
+ elif args.virt_default_ram < 10:
+ if platform.machine() == 'aarch64':
+ control_ram = 16
+ logging.warning('RAM per controller is too low for '
+ 'aarch64 ')
+ logging.info('Increasing RAM per controller to 16GB')
+ else:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
+ else:
+ control_ram = args.virt_default_ram
+ if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+ vcpus = 16
+ logging.warning('aarch64 requires at least 16 vCPUS per '
+ 'target VM. Increasing to 16.')
+ else:
+ vcpus = args.virt_cpus
+ if ha_enabled and args.virt_compute_nodes < 2:
+ logging.debug(
+ 'HA enabled, bumping number of compute nodes to 2')
+ args.virt_compute_nodes = 2
+ virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+ num_computes=args.virt_compute_nodes,
+ controller_ram=control_ram * 1024,
+ compute_ram=compute_ram * 1024,
+ vcpus=vcpus
+ )
+ inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+ logging.info("Inventory is:\n {}".format(pprint.pformat(
+ inventory)))
+
+ validate_cross_settings(deploy_settings, net_settings, inventory)
+ else:
+ # only one network with snapshots
+ net_list = [constants.ADMIN_NETWORK]
+
ds_opts = deploy_settings['deploy_options']
- if args.quickstart:
- deploy_settings_file = os.path.join(APEX_TEMP_DIR,
- 'apex_deploy_settings.yaml')
- utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
- deploy_settings_file)
- logging.info("File created: {}".format(deploy_settings_file))
- network_settings_file = os.path.join(APEX_TEMP_DIR,
- 'apex_network_settings.yaml')
- utils.dump_yaml(utils.dict_objects_to_str(net_settings),
- network_settings_file)
- logging.info("File created: {}".format(network_settings_file))
- deploy_quickstart(args, deploy_settings_file, network_settings_file,
- args.inventory_file)
+ ansible_args = {
+ 'virsh_enabled_networks': net_list,
+ 'snapshot': args.snapshot
+ }
+ utils.run_ansible(ansible_args,
+ os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'deploy_dependencies.yml'))
+ all_in_one = not bool(args.virt_compute_nodes)
+ if args.snapshot:
+ # Start snapshot Deployment
+ logging.info('Executing Snapshot Deployment...')
+ SnapshotDeployment(deploy_settings=deploy_settings,
+ snap_cache_dir=args.snap_cache,
+ fetch=not args.no_fetch,
+ all_in_one=all_in_one)
else:
+ # Start Standard TripleO Deployment
+ deployment = ApexDeployment(deploy_settings, args.patches_file,
+ args.deploy_settings_file)
# TODO (trozet): add logic back from:
# Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
- ansible_args = {
- 'virsh_enabled_networks': net_settings.enabled_network_list
- }
- utils.run_ansible(ansible_args,
- os.path.join(args.lib_dir, ANSIBLE_PATH,
- 'deploy_dependencies.yml'))
uc_external = False
if 'external' in net_settings.enabled_network_list:
uc_external = True
@@ -333,62 +383,93 @@ def main():
else:
root_pw = None
- upstream = (os_version != constants.DEFAULT_OS_VERSION or
- args.upstream)
+ if not args.upstream:
+ logging.warning("Using upstream is now required for Apex. "
+ "Forcing upstream to true")
if os_version == 'master':
branch = 'master'
else:
branch = "stable/{}".format(os_version)
- if upstream:
- logging.info("Deploying with upstream artifacts for OpenStack "
- "{}".format(os_version))
- args.image_dir = os.path.join(args.image_dir, os_version)
- upstream_url = constants.UPSTREAM_RDO.replace(
- constants.DEFAULT_OS_VERSION, os_version)
- upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
- utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
- upstream_targets)
- sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
- if ds_opts['sdn_controller'] == 'opendaylight':
- logging.info("Preparing upstream image with OpenDaylight")
- oc_builder.inject_opendaylight(
- odl_version=ds_opts['odl_version'],
- image=sdn_image,
- tmp_dir=APEX_TEMP_DIR
- )
- # copy undercloud so we don't taint upstream fetch
- uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
- uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
- shutil.copyfile(uc_fetch_img, uc_image)
- # prep undercloud with required packages
- uc_builder.add_upstream_packages(uc_image)
- # add patches from upstream to undercloud and overcloud
- logging.info('Adding patches to undercloud')
- patches = deploy_settings['global_params']['patches']
- c_builder.add_upstream_patches(patches['undercloud'], uc_image,
- APEX_TEMP_DIR, branch)
- logging.info('Adding patches to overcloud')
- c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
- APEX_TEMP_DIR, branch)
+
+ logging.info("Deploying with upstream artifacts for OpenStack "
+ "{}".format(os_version))
+ args.image_dir = os.path.join(args.image_dir, os_version)
+ upstream_url = constants.UPSTREAM_RDO.replace(
+ constants.DEFAULT_OS_VERSION, os_version)
+
+ upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+ if platform.machine() == 'aarch64':
+ upstream_targets.append('undercloud.qcow2')
+ utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+ upstream_targets,
+ fetch=not args.no_fetch)
+ # Copy ironic files and overcloud ramdisk and kernel into temp dir
+ # to be copied by ansible into undercloud /home/stack
+ # Note the overcloud disk does not need to be copied here as it will
+ # be modified and copied later
+ for tmp_file in UC_DISK_FILES:
+ shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+ os.path.join(APEX_TEMP_DIR, tmp_file))
+ if platform.machine() == 'aarch64':
+ sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
else:
- sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
- uc_image = 'undercloud.qcow2'
+ sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ # copy undercloud so we don't taint upstream fetch
+ uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+ uc_fetch_img = sdn_image
+ shutil.copyfile(uc_fetch_img, uc_image)
+ # prep undercloud with required packages
+ if platform.machine() != 'aarch64':
+ uc_builder.update_repos(image=uc_image,
+ branch=branch.replace('stable/', ''))
+ uc_builder.add_upstream_packages(uc_image)
+ uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
+ # add patches from upstream to undercloud and overcloud
+ logging.info('Adding patches to undercloud')
+ patches = deployment.determine_patches()
+ c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+ APEX_TEMP_DIR, branch)
+
+ # Create/Start Undercloud VM
undercloud = uc_lib.Undercloud(args.image_dir,
args.deploy_dir,
root_pw=root_pw,
external_network=uc_external,
- image_name=os.path.basename(uc_image))
+ image_name=os.path.basename(uc_image),
+ os_version=os_version)
undercloud.start()
+ undercloud_admin_ip = net_settings['networks'][
+ constants.ADMIN_NETWORK]['installer_vm']['ip']
+
+ if ds_opts['containers']:
+ tag = constants.DOCKER_TAG
+ else:
+ tag = None
# Generate nic templates
for role in 'compute', 'controller':
oc_cfg.create_nic_template(net_settings, deploy_settings, role,
args.deploy_dir, APEX_TEMP_DIR)
+ # Prepare/Upload docker images
+ docker_env = 'containers-prepare-parameter.yaml'
+ shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+ os.path.join(APEX_TEMP_DIR, docker_env))
+ # Upload extra ansible.cfg
+ if platform.machine() == 'aarch64':
+ ansible_env = 'ansible.cfg'
+ shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+ os.path.join(APEX_TEMP_DIR, ansible_env))
+
+ c_builder.prepare_container_images(
+ os.path.join(APEX_TEMP_DIR, docker_env),
+ branch=branch.replace('stable/', ''),
+ neutron_driver=c_builder.get_neutron_driver(ds_opts)
+ )
# Install Undercloud
- undercloud.configure(net_settings,
- os.path.join(args.lib_dir, ANSIBLE_PATH,
+ undercloud.configure(net_settings, deploy_settings,
+ os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'configure_undercloud.yml'),
- APEX_TEMP_DIR)
+ APEX_TEMP_DIR, virtual_oc=args.virtual)
# Prepare overcloud-full.qcow2
logging.info("Preparing Overcloud for deployment...")
@@ -398,33 +479,75 @@ def main():
net_data_file)
else:
net_data = False
- if upstream and args.env_file == 'opnfv-environment.yaml':
+
+ shutil.copyfile(os.path.join(args.deploy_dir, 'build_ovs_nsh.sh'),
+ os.path.join(APEX_TEMP_DIR, 'build_ovs_nsh.sh'))
+
+ # TODO(trozet): Either fix opnfv env or default to use upstream env
+ if args.env_file == 'opnfv-environment.yaml':
# Override the env_file if it is defaulted to opnfv
# opnfv env file will not work with upstream
args.env_file = 'upstream-environment.yaml'
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- if not upstream:
- oc_deploy.prep_env(deploy_settings, net_settings, inventory,
- opnfv_env, net_env_target, APEX_TEMP_DIR)
- oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
- root_pw=root_pw)
+ oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
+ if not args.virtual:
+ oc_deploy.LOOP_DEVICE_SIZE = "50G"
+ if platform.machine() == 'aarch64':
+ oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
else:
- shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
- 'overcloud-full.qcow2'))
- shutil.copyfile(
- opnfv_env,
- os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
- )
+ oc_image = sdn_image
+ patched_containers = oc_deploy.prep_image(
+ deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
+ root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
APEX_TEMP_DIR, args.virtual,
os.path.basename(opnfv_env),
net_data=net_data)
- deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ # Prepare undercloud with containers
+ docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'prepare_overcloud_containers.yml')
+ if ds_opts['containers']:
+ logging.info("Preparing Undercloud with Docker containers")
+ sdn_env = oc_deploy.get_docker_sdn_files(ds_opts)
+ sdn_env_files = str()
+ for sdn_file in sdn_env:
+ sdn_env_files += " -e {}".format(sdn_file)
+ if patched_containers:
+ oc_builder.archive_docker_patches(APEX_TEMP_DIR)
+ container_vars = dict()
+ container_vars['apex_temp_dir'] = APEX_TEMP_DIR
+ container_vars['patched_docker_services'] = list(
+ patched_containers)
+ container_vars['container_tag'] = constants.DOCKER_TAG
+ container_vars['stackrc'] = 'source /home/stack/stackrc'
+ container_vars['sdn'] = ds_opts['sdn_controller']
+ container_vars['undercloud_ip'] = undercloud_admin_ip
+ container_vars['os_version'] = os_version
+ container_vars['aarch64'] = platform.machine() == 'aarch64'
+ container_vars['sdn_env_file'] = sdn_env_files
+ container_vars['container_client'] = utils.find_container_client(
+ os_version)
+ try:
+ utils.run_ansible(container_vars, docker_playbook,
+ host=undercloud.ip, user='stack',
+ tmp_dir=APEX_TEMP_DIR)
+ logging.info("Container preparation complete")
+ except Exception:
+ logging.error("Unable to complete container prep on "
+ "Undercloud")
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
+ os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ raise
+
+ deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_overcloud.yml')
virt_env = 'virtual-environment.yaml'
bm_env = 'baremetal-environment.yaml'
- for p_env in virt_env, bm_env:
+ k8s_env = 'kubernetes-environment.yaml'
+ for p_env in virt_env, bm_env, k8s_env:
shutil.copyfile(os.path.join(args.deploy_dir, p_env),
os.path.join(APEX_TEMP_DIR, p_env))
@@ -434,13 +557,22 @@ def main():
deploy_vars['virtual'] = args.virtual
deploy_vars['debug'] = args.debug
deploy_vars['aarch64'] = platform.machine() == 'aarch64'
+ deploy_vars['introspect'] = not (args.virtual or
+ deploy_vars['aarch64'] or
+ not introspect)
deploy_vars['dns_server_args'] = ''
deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
deploy_vars['stackrc'] = 'source /home/stack/stackrc'
deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
- deploy_vars['upstream'] = upstream
+ deploy_vars['undercloud_ip'] = undercloud_admin_ip
+ deploy_vars['ha_enabled'] = ha_enabled
deploy_vars['os_version'] = os_version
+ deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
+ deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
+ deploy_vars['vim'] = ds_opts['vim']
+ deploy_vars['container_client'] = utils.find_container_client(
+ os_version)
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
@@ -449,10 +581,15 @@ def main():
user='stack', tmp_dir=APEX_TEMP_DIR)
logging.info("Overcloud deployment complete")
except Exception:
- logging.error("Deployment Failed. Please check log")
+ logging.error("Deployment Failed. Please check deploy log as "
+ "well as mistral logs in "
+ "{}".format(os.path.join(APEX_TEMP_DIR,
+ 'mistral_logs.tar.gz')))
raise
finally:
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
# Post install
logging.info("Executing post deploy configuration")
@@ -465,7 +602,7 @@ def main():
'UserKnownHostsFile=/dev/null -o ' \
'LogLevel=error'
deploy_vars['external_network_cmds'] = \
- oc_deploy.external_network_cmds(net_settings)
+ oc_deploy.external_network_cmds(net_settings, deploy_settings)
# TODO(trozet): just parse all ds_opts as deploy vars one time
deploy_vars['gluon'] = ds_opts['gluon']
deploy_vars['sdn'] = ds_opts['sdn_controller']
@@ -483,37 +620,129 @@ def main():
else:
deploy_vars['congress'] = False
deploy_vars['calipso'] = ds_opts.get('calipso', False)
- deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
- 'installer_vm']['ip']
- # TODO(trozet): this is probably redundant with getting external
- # network info from undercloud.py
- if 'external' in net_settings.enabled_network_list:
- ext_cidr = net_settings['networks']['external'][0]['cidr']
- else:
- ext_cidr = net_settings['networks']['admin']['cidr']
- deploy_vars['external_cidr'] = str(ext_cidr)
- if ext_cidr.version == 6:
- deploy_vars['external_network_ipv6'] = True
+ deploy_vars['calipso_ip'] = undercloud_admin_ip
+ # overcloudrc.v3 removed and set as default in queens and later
+ if os_version == 'pike':
+ deploy_vars['overcloudrc_files'] = ['overcloudrc',
+ 'overcloudrc.v3']
else:
- deploy_vars['external_network_ipv6'] = False
- post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ deploy_vars['overcloudrc_files'] = ['overcloudrc']
+
+ post_undercloud = os.path.join(args.lib_dir,
+ constants.ANSIBLE_PATH,
'post_deploy_undercloud.yml')
- logging.info("Executing post deploy configuration undercloud playbook")
+ logging.info("Executing post deploy configuration undercloud "
+ "playbook")
try:
- utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
- user='stack', tmp_dir=APEX_TEMP_DIR)
+ utils.run_ansible(deploy_vars, post_undercloud,
+ host=undercloud.ip, user='stack',
+ tmp_dir=APEX_TEMP_DIR)
logging.info("Post Deploy Undercloud Configuration Complete")
except Exception:
logging.error("Post Deploy Undercloud Configuration failed. "
"Please check log")
raise
+
+ # Deploy kubernetes if enabled
+ # (TODO)zshi move handling of kubernetes deployment
+ # to its own deployment class
+ if deploy_vars['vim'] == 'k8s':
+ # clone kubespray repo
+ git.Repo.clone_from(constants.KUBESPRAY_URL,
+ os.path.join(APEX_TEMP_DIR, 'kubespray'))
+ shutil.copytree(
+ os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+ 'sample'),
+ os.path.join(APEX_TEMP_DIR, 'kubespray', 'inventory',
+ 'apex'))
+ k8s_node_inventory = {
+ 'all':
+ {'hosts': {},
+ 'children': {
+ 'k8s-cluster': {
+ 'children': {
+ 'kube-master': {
+ 'hosts': {}
+ },
+ 'kube-node': {
+ 'hosts': {}
+ }
+ }
+ },
+ 'etcd': {
+ 'hosts': {}
+ }
+ }
+ }
+ }
+ for node, ip in deploy_vars['overcloud_nodes'].items():
+ k8s_node_inventory['all']['hosts'][node] = {
+ 'ansible_become': True,
+ 'ansible_ssh_host': ip,
+ 'ansible_become_user': 'root',
+ 'ip': ip
+ }
+ if 'controller' in node:
+ k8s_node_inventory['all']['children']['k8s-cluster'][
+ 'children']['kube-master']['hosts'][node] = None
+ k8s_node_inventory['all']['children']['etcd'][
+ 'hosts'][node] = None
+ elif 'compute' in node:
+ k8s_node_inventory['all']['children']['k8s-cluster'][
+ 'children']['kube-node']['hosts'][node] = None
+
+ kubespray_dir = os.path.join(APEX_TEMP_DIR, 'kubespray')
+ with open(os.path.join(kubespray_dir, 'inventory', 'apex',
+ 'apex.yaml'), 'w') as invfile:
+ yaml.dump(k8s_node_inventory, invfile,
+ default_flow_style=False)
+ k8s_deploy_vars = {}
+ # Add kubespray ansible control variables in k8s_deploy_vars,
+ # example: 'kube_network_plugin': 'flannel'
+ k8s_deploy = os.path.join(kubespray_dir, 'cluster.yml')
+ k8s_deploy_inv_file = os.path.join(kubespray_dir, 'inventory',
+ 'apex', 'apex.yaml')
+
+ k8s_remove_pkgs = os.path.join(args.lib_dir,
+ constants.ANSIBLE_PATH,
+ 'k8s_remove_pkgs.yml')
+ try:
+ logging.debug("Removing any existing overcloud docker "
+ "packages")
+ utils.run_ansible(k8s_deploy_vars, k8s_remove_pkgs,
+ host=k8s_deploy_inv_file,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("k8s Deploy Remove Existing Docker Related "
+ "Packages Complete")
+ except Exception:
+ logging.error("k8s Deploy Remove Existing Docker Related "
+ "Packages failed. Please check log")
+ raise
+
+ try:
+ utils.run_ansible(k8s_deploy_vars, k8s_deploy,
+ host=k8s_deploy_inv_file,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("k8s Deploy Overcloud Configuration Complete")
+ except Exception:
+ logging.error("k8s Deploy Overcloud Configuration failed."
+ "Please check log")
+ raise
+
# Post deploy overcloud node configuration
# TODO(trozet): just parse all ds_opts as deploy vars one time
deploy_vars['sfc'] = ds_opts['sfc']
deploy_vars['vpn'] = ds_opts['vpn']
+ deploy_vars['l2gw'] = ds_opts.get('l2gw')
+ deploy_vars['sriov'] = ds_opts.get('sriov')
+ deploy_vars['tacker'] = ds_opts.get('tacker')
+ deploy_vars['all_in_one'] = all_in_one
+ # TODO(trozet): need to set container client to docker until OOO
+ # migrates OC to podman. Remove this later.
+ deploy_vars['container_client'] = 'docker'
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
- post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'post_deploy_overcloud.yml')
# Run per overcloud node
for node, ip in deploy_vars['overcloud_nodes'].items():
diff --git a/apex/deployment/__init__.py b/apex/deployment/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/deployment/__init__.py
diff --git a/apex/deployment/snapshot.py b/apex/deployment/snapshot.py
new file mode 100644
index 00000000..b33907fb
--- /dev/null
+++ b/apex/deployment/snapshot.py
@@ -0,0 +1,241 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import fnmatch
+import logging
+import os
+import pprint
+import socket
+import time
+
+import libvirt
+
+import apex.common.constants as con
+from apex.common import exceptions as exc
+from apex.common import utils
+from apex.overcloud.node import OvercloudNode
+import apex.settings.deploy_settings as ds
+
+
+SNAP_FILE = 'snapshot.properties'
+CHECKSUM = 'OPNFV_SNAP_SHA512SUM'
+OVERCLOUD_RC = 'overcloudrc'
+SSH_KEY = 'id_rsa'
+OPENSTACK = 'openstack'
+OPENDAYLIGHT = 'opendaylight'
+SERVICES = (OPENSTACK, OPENDAYLIGHT)
+
+
+class SnapshotDeployment:
+ def __init__(self, deploy_settings, snap_cache_dir, fetch=True,
+ all_in_one=False):
+ self.id_rsa = None
+ self.fetch = fetch
+ ds_opts = deploy_settings['deploy_options']
+ self.os_version = ds_opts['os_version']
+ self.ha_enabled = deploy_settings['global_params']['ha_enabled']
+ if self.ha_enabled:
+ self.ha_ext = 'ha'
+ elif all_in_one:
+ self.ha_ext = 'noha-allinone'
+ else:
+ self.ha_ext = 'noha'
+ self.snap_cache_dir = os.path.join(snap_cache_dir,
+ "{}/{}".format(self.os_version,
+ self.ha_ext))
+ self.networks = []
+ self.oc_nodes = []
+ self.properties_url = "{}/apex/{}/{}".format(con.OPNFV_ARTIFACTS,
+ self.os_version,
+ self.ha_ext)
+ self.conn = libvirt.open('qemu:///system')
+ if not self.conn:
+ raise exc.SnapshotDeployException(
+ 'Unable to open libvirt connection')
+ if self.fetch:
+ self.pull_snapshot(self.properties_url, self.snap_cache_dir)
+ else:
+ logging.info('No fetch enabled. Will not attempt to pull latest '
+ 'snapshot')
+ self.deploy_snapshot()
+
+ @staticmethod
+ def pull_snapshot(url_path, snap_cache_dir):
+ """
+ Compare opnfv properties file and download and unpack snapshot if
+ necessary
+ :param url_path: path of latest snap info
+ :param snap_cache_dir: local directory for snap cache
+ :return: None
+ """
+ full_url = os.path.join(url_path, SNAP_FILE)
+ upstream_props = utils.fetch_properties(full_url)
+ logging.debug("Upstream properties are: {}".format(upstream_props))
+ try:
+ upstream_sha = upstream_props[CHECKSUM]
+ except KeyError:
+ logging.error('Unable to find {} for upstream properties: '
+ '{}'.format(CHECKSUM, upstream_props))
+ raise exc.SnapshotDeployException('Unable to find upstream '
+ 'properties checksum value')
+ local_prop_file = os.path.join(snap_cache_dir, SNAP_FILE)
+ try:
+ local_props = utils.fetch_properties(local_prop_file)
+ local_sha = local_props[CHECKSUM]
+ pull_snap = local_sha != upstream_sha
+ except (exc.FetchException, KeyError):
+ logging.info("No locally cached properties found, will pull "
+ "latest")
+ local_sha = None
+ pull_snap = True
+ logging.debug('Local sha: {}, Upstream sha: {}'.format(local_sha,
+ upstream_sha))
+ if pull_snap:
+ logging.info('SHA mismatch, will download latest snapshot')
+ full_snap_url = upstream_props['OPNFV_SNAP_URL']
+ snap_file = os.path.basename(full_snap_url)
+ snap_url = full_snap_url.replace(snap_file, '')
+ if not snap_url.startswith('http://'):
+ snap_url = 'http://' + snap_url
+ utils.fetch_upstream_and_unpack(dest=snap_cache_dir,
+ url=snap_url,
+ targets=[SNAP_FILE, snap_file]
+ )
+ else:
+ logging.info('SHA match, artifacts in cache are already latest. '
+ 'Will not download.')
+
+ def create_networks(self):
+ logging.info("Detecting snapshot networks")
+ try:
+ xmls = fnmatch.filter(os.listdir(self.snap_cache_dir), '*.xml')
+ except FileNotFoundError:
+ raise exc.SnapshotDeployException(
+ 'No XML files found in snap cache directory: {}'.format(
+ self.snap_cache_dir))
+ net_xmls = list()
+ for xml in xmls:
+ if xml.startswith('baremetal'):
+ continue
+ net_xmls.append(os.path.join(self.snap_cache_dir, xml))
+ if not net_xmls:
+ raise exc.SnapshotDeployException(
+ 'No network XML files detected in snap cache, '
+ 'please check local snap cache contents')
+ logging.info('Snapshot networks found: {}'.format(net_xmls))
+ for xml in net_xmls:
+ logging.debug('Creating network from {}'.format(xml))
+ with open(xml, 'r') as fh:
+ net_xml = fh.read()
+ net = self.conn.networkCreateXML(net_xml)
+ self.networks.append(net)
+ logging.info('Network started: {}'.format(net.name()))
+
+ def parse_and_create_nodes(self):
+ """
+ Parse snapshot node.yaml config file and create overcloud nodes
+ :return: None
+ """
+ node_file = os.path.join(self.snap_cache_dir, 'node.yaml')
+ if not os.path.isfile(node_file):
+ raise exc.SnapshotDeployException('Missing node definitions from '
+ ''.format(node_file))
+ node_data = utils.parse_yaml(node_file)
+ if 'servers' not in node_data:
+ raise exc.SnapshotDeployException('Invalid node.yaml format')
+ for node, data in node_data['servers'].items():
+ logging.info('Creating node: {}'.format(node))
+ logging.debug('Node data is:\n{}'.format(pprint.pformat(data)))
+ node_xml = os.path.join(self.snap_cache_dir,
+ '{}.xml'.format(data['vNode-name']))
+ node_qcow = os.path.join(self.snap_cache_dir,
+ '{}.qcow2'.format(data['vNode-name']))
+ self.oc_nodes.append(
+ OvercloudNode(ip=data['address'],
+ ovs_ctrlrs=data['ovs-controller'],
+ ovs_mgrs=data['ovs-managers'],
+ role=data['type'],
+ name=node,
+ node_xml=node_xml,
+ disk_img=node_qcow)
+ )
+ logging.info('Node Created')
+ logging.info('Starting nodes')
+ for node in self.oc_nodes:
+ node.start()
+
+ def get_controllers(self):
+ controllers = []
+ for node in self.oc_nodes:
+ if node.role == 'controller':
+ controllers.append(node)
+ return controllers
+
+ def is_service_up(self, service):
+ assert service in SERVICES
+ if service == OPENSTACK:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
+ controllers = self.get_controllers()
+ if not controllers:
+ raise exc.SnapshotDeployException('No OpenStack controllers found')
+
+ for node in controllers:
+ logging.info('Waiting until {} is up on controller: '
+ '{}'.format(service, node.name))
+ for x in range(10):
+ logging.debug('Checking {} is up attempt {}'.format(service,
+ str(x + 1)))
+ if service == OPENSTACK:
+ # Check if Neutron is up
+ if sock.connect_ex((node.ip, 9696)) == 0:
+ logging.info('{} is up on controller {}'.format(
+ service, node.name))
+ break
+ elif service == OPENDAYLIGHT:
+ url = 'http://{}:8081/diagstatus'.format(node.ip)
+ try:
+ utils.open_webpage(url)
+ logging.info('{} is up on controller {}'.format(
+ service, node.name))
+ break
+ except Exception as e:
+ logging.debug('Cannot contact ODL. Reason: '
+ '{}'.format(e))
+ time.sleep(60)
+ else:
+ logging.error('{} is not running after 10 attempts'.format(
+ service))
+ return False
+ return True
+
+ def deploy_snapshot(self):
+ # bring up networks
+ self.create_networks()
+ # check overcloudrc exists, id_rsa
+ for snap_file in (OVERCLOUD_RC, SSH_KEY):
+ if not os.path.isfile(os.path.join(self.snap_cache_dir,
+ snap_file)):
+ logging.warning('File is missing form snap cache: '
+ '{}'.format(snap_file))
+ # create nodes
+ self.parse_and_create_nodes()
+ # validate deployment
+ if self.is_service_up(OPENSTACK):
+ logging.info('OpenStack is up')
+ else:
+ raise exc.SnapshotDeployException('OpenStack is not alive')
+ if self.is_service_up(OPENDAYLIGHT):
+ logging.info('OpenDaylight is up')
+ else:
+ raise exc.SnapshotDeployException(
+ 'OpenDaylight {} is not reporting diag status')
+ # TODO(trozet): recreate external network/subnet if missing
+ logging.info('Snapshot deployment complete. Please use the {} file '
+ 'in {} to interact with '
+ 'OpenStack'.format(OVERCLOUD_RC, self.snap_cache_dir))
diff --git a/apex/deployment/tripleo.py b/apex/deployment/tripleo.py
new file mode 100644
index 00000000..c131e161
--- /dev/null
+++ b/apex/deployment/tripleo.py
@@ -0,0 +1,60 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# TODO(trozet): this will serve as the deployment class as we migrate logic out
+# of deploy.py
+import logging
+import os
+import pprint
+
+from apex.common.exceptions import ApexDeployException
+from apex.common import utils
+
+
+class ApexDeployment:
+ def __init__(self, deploy_settings, patch_file, ds_file):
+ self.ds = deploy_settings
+ # TODO(trozet): remove ds_file from args and have this class inherit
+ # super deployment class init which does all the settings
+ self.ds_file = ds_file
+ self.ds_globals = self.ds['global_params']
+ self.p_file = patch_file
+
+ def determine_patches(self):
+ patches = self.ds_globals['patches']
+ if not os.path.isfile(self.p_file):
+ new_file = os.path.join(os.path.dirname(self.ds_file),
+ 'common-patches.yaml')
+ if os.path.isfile(new_file):
+ logging.warning('Patch file {} not found, falling back to '
+ '{}'.format(self.p_file, new_file))
+ self.p_file = new_file
+ else:
+ logging.error('Unable to find common patch file: '
+ '{}'.format(self.p_file))
+ raise ApexDeployException(
+ 'Specified common patch file not found: {}'.format(
+ self.p_file))
+ logging.info('Loading patches from common patch file {}'.format(
+ self.p_file))
+ common_patches = utils.parse_yaml(self.p_file)
+ logging.debug('Content from common patch file is: {}'.format(
+ pprint.pformat(common_patches)))
+ os_version = self.ds['deploy_options']['os_version']
+ try:
+ common_patches = common_patches['patches'][os_version]
+ except KeyError:
+ logging.error('Error parsing common patches file, wrong format.')
+ raise ApexDeployException('Invalid format of common patch file')
+
+ for ptype in ('undercloud', 'overcloud'):
+ if ptype in common_patches:
+ patches[ptype] = utils.unique(patches[ptype] +
+ common_patches[ptype])
+ return patches
diff --git a/apex/inventory/inventory.py b/apex/inventory/inventory.py
index b5ffd2f8..0546fe9f 100644
--- a/apex/inventory/inventory.py
+++ b/apex/inventory/inventory.py
@@ -67,9 +67,12 @@ class Inventory(dict):
if ha and len(self['nodes']) < 5:
raise ApexInventoryException('You must provide at least 5 '
'nodes for HA deployment')
- elif len(self['nodes']) < 2:
- raise ApexInventoryException('You must provide at least 2 nodes '
+ elif len(self['nodes']) < 1:
+ raise ApexInventoryException('You must provide at least 1 node '
'for non-HA deployment')
+ elif list(self.get_node_counts())[0] < 1:
+ raise ApexInventoryException('You must provide at least 1 '
+ 'control node for deployment')
if virtual:
self['host-ip'] = '192.168.122.1'
diff --git a/apex/network/jumphost.py b/apex/network/jumphost.py
index c28c105e..86556659 100644
--- a/apex/network/jumphost.py
+++ b/apex/network/jumphost.py
@@ -53,12 +53,8 @@ def configure_bridges(ns):
if cidr.version == 6:
ipv6_br_path = "/proc/sys/net/ipv6/conf/{}/disable_" \
"ipv6".format(NET_MAP[network])
- try:
- subprocess.check_call('echo', 0, '>', ipv6_br_path)
- except subprocess.CalledProcessError:
- logging.error("Unable to enable ipv6 on "
- "bridge {}".format(NET_MAP[network]))
- raise
+ with open(ipv6_br_path, 'w') as f:
+ print(0, file=f)
try:
ip_prefix = "{}/{}".format(ovs_ip, cidr.prefixlen)
subprocess.check_call(['ip', 'addr', 'add', ip_prefix, 'dev',
diff --git a/apex/network/network_data.py b/apex/network/network_data.py
index 1177af09..6f330c50 100644
--- a/apex/network/network_data.py
+++ b/apex/network/network_data.py
@@ -83,7 +83,7 @@ def create_network_data(ns, target=None):
"{}".format(net))
raise NetworkDataException("cidr is null for network {}".format(
net))
-
+ tmp_net['mtu'] = network.get('mtu', 1500)
network_data.append(copy.deepcopy(tmp_net))
# have to do this due to the aforementioned bug
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
index ea71e0f3..52b4452a 100644
--- a/apex/network/network_environment.py
+++ b/apex/network/network_environment.py
@@ -82,7 +82,7 @@ class NetworkEnvironment(dict):
admin_prefix = str(admin_cidr.prefixlen)
self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
self[param_def]['ControlPlaneDefaultRoute'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
+ nets[ADMIN_NETWORK]['gateway']
self[param_def]['EC2MetadataIp'] = \
nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['DnsServers'] = net_settings['dns_servers']
@@ -186,6 +186,8 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ self._update_service_netmap(net_settings.enabled_network_list)
+
def _get_vlan(self, network):
if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
return network['nic_mapping'][CONTROLLER]['vlan']
@@ -218,6 +220,13 @@ class NetworkEnvironment(dict):
prefix = ''
self[reg][key] = self.tht_dir + prefix + postfix
+ def _update_service_netmap(self, network_list):
+ if 'ServiceNetMap' not in self[param_def]:
+ return
+ for service, network in self[param_def]['ServiceNetMap'].items():
+ if network not in network_list:
+ self[param_def]['ServiceNetMap'][service] = 'ctlplane'
+
class NetworkEnvException(Exception):
def __init__(self, value):
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
index a7f7d848..e8d8fbb0 100644
--- a/apex/overcloud/config.py
+++ b/apex/overcloud/config.py
@@ -52,6 +52,9 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
if ds.get('dvr') is True:
nets['admin']['nic_mapping'][role]['phys_type'] = \
'linux_bridge'
+ else:
+ nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+ 'linux_bridge'
elif ds['dataplane'] == 'ovs_dpdk':
ovs_dpdk_br = 'br-phy'
if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
@@ -66,9 +69,6 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
nets['tenant']['nic_mapping'][role]['interface-options'] =\
ds['performance'][role.title()]['vpp']['interface-options']
- if role == 'controller' and ds.get('sfc', None):
- ext_net = 'interface'
-
template_output = template.render(
nets=nets,
role=role,
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 809afc13..538f50a4 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -11,14 +11,20 @@ import base64
import fileinput
import logging
import os
+import platform
+import pprint
import shutil
import uuid
import struct
import time
+import yaml
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.common_builder as c_builder
from apex.common import constants as con
from apex.common.exceptions import ApexDeployException
from apex.common import parsers
+from apex.common import utils
from apex.virtual import utils as virt_utils
from cryptography.hazmat.primitives import serialization as \
crypto_serialization
@@ -37,6 +43,8 @@ SDN_FILE_MAP = {
'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
+ 'l2gw': 'neutron-l2gw-opendaylight.yaml',
+ 'sriov': 'neutron-opendaylight-sriov.yaml',
'default': 'neutron-opendaylight.yaml',
},
'onos': {
@@ -64,24 +72,75 @@ OVS_PERF_MAP = {
'NeutronDpdkMemoryChannels': 'memory_channels'
}
-OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
-OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
".noarch.rpm"
+LOOP_DEVICE_SIZE = "10G"
+
+LOSETUP_SERVICE = """[Unit]
+Description=Setup loop devices
+Before=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
+ExecStop=/sbin/losetup -d /dev/loop3
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
+"""
+
+DUPLICATE_COMPUTE_SERVICES = [
+ 'OS::TripleO::Services::ComputeNeutronCorePlugin',
+ 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
+ 'OS::TripleO::Services::ComputeNeutronOvsAgent',
+ 'OS::TripleO::Services::ComputeNeutronL3Agent'
+]
+
+NFS_VARS = [
+ 'NovaNfsEnabled',
+ 'GlanceNfsEnabled',
+ 'CinderNfsEnabledBackend'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
+ """
+ Builds a list of SDN environment files to be used in the deploy cmd.
+
+ This function recursively searches an sdn_map. First the sdn controller is
+ matched and then the function looks for enabled features for that
+ controller to determine which environment files should be used. By
+ default the feature will be checked if set to true in deploy settings to be
+ added to the list. If a feature does not have a boolean value, then the
+ key and value pair to compare with are checked as a tuple (k,v).
+
+ :param ds: deploy settings
+ :param sdn_map: SDN map to recursively search
+ :param env_list: recursive var to hold previously found env_list
+ :return: A list of env files
+ """
if env_list is None:
env_list = list()
for k, v in sdn_map.items():
- if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+ if ds['sdn_controller'] == k or (k in ds and ds[k]):
if isinstance(v, dict):
+ # Append default SDN env file first
+ # The assumption is that feature-enabled SDN env files
+ # override and do not conflict with previously set default
+ # settings
+ if ds['sdn_controller'] == k and 'default' in v:
+ env_list.append(os.path.join(con.THT_ENV_DIR,
+ v['default']))
env_list.extend(build_sdn_env_list(ds, v))
+ # check if the value is not a boolean
+ elif isinstance(v, tuple):
+ if ds[k] == v[0]:
+ env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
else:
env_list.append(os.path.join(con.THT_ENV_DIR, v))
- elif isinstance(v, tuple):
- if ds[k] == v[0]:
- env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
if len(env_list) == 0:
try:
env_list.append(os.path.join(
@@ -92,6 +151,26 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
return env_list
+def get_docker_sdn_files(ds_opts):
+ """
+ Returns docker env file for detected SDN
+ :param ds_opts: deploy options
+ :return: list of docker THT env files for an SDN
+ """
+ docker_services = con.VALID_DOCKER_SERVICES
+ tht_dir = con.THT_DOCKER_ENV_DIR
+ sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+ for i, sdn_file in enumerate(sdn_env_list):
+ sdn_base = os.path.basename(sdn_file)
+ if sdn_base in docker_services:
+ if docker_services[sdn_base] is not None:
+ sdn_env_list[i] = \
+ os.path.join(tht_dir, docker_services[sdn_base])
+ else:
+ sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
+ return sdn_env_list
+
+
def create_deploy_cmd(ds, ns, inv, tmp_dir,
virtual, env_file='opnfv-environment.yaml',
net_data=False):
@@ -99,22 +178,52 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
logging.info("Creating deployment command")
deploy_options = ['network-environment.yaml']
+ ds_opts = ds['deploy_options']
+
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker.yaml'))
+
+ if ds['global_params']['ha_enabled']:
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker-ha.yaml'))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'puppet-pacemaker.yaml'))
+
if env_file:
deploy_options.append(env_file)
- ds_opts = ds['deploy_options']
- deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+ if ds_opts['containers']:
+ deploy_options.append('docker-images.yaml')
+ sdn_docker_files = get_docker_sdn_files(ds_opts)
+ for sdn_docker_file in sdn_docker_files:
+ deploy_options.append(sdn_docker_file)
+ else:
+ deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for k, v in OTHER_FILE_MAP.items():
if k in ds_opts and ds_opts[k]:
- deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
+ "{}.yaml".format(k)))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
- if ds_opts['ceph']:
- prep_storage_env(ds, tmp_dir)
+ # TODO(trozet) Fix this check to look for if ceph is in controller services
+ # and not use name of the file
+ if ds_opts['ceph'] and 'csit' not in env_file:
+ prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
'storage-environment.yaml'))
- if ds['global_params']['ha_enabled']:
- deploy_options.append(os.path.join(con.THT_ENV_DIR,
- 'puppet-pacemaker.yaml'))
+ if ds_opts['sriov']:
+ prep_sriov_env(ds, tmp_dir)
+
+ # Check for 'k8s' here intentionally, as we may support other values
+ # such as openstack/openshift for 'vim' option.
+ if ds_opts['vim'] == 'k8s':
+ deploy_options.append('kubernetes-environment.yaml')
if virtual:
deploy_options.append('virtual-environment.yaml')
@@ -122,12 +231,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
deploy_options.append('baremetal-environment.yaml')
num_control, num_compute = inv.get_node_counts()
- if num_control == 0 or num_compute == 0:
- logging.error("Detected 0 control or compute nodes. Control nodes: "
- "{}, compute nodes{}".format(num_control, num_compute))
- raise ApexDeployException("Invalid number of control or computes")
- elif num_control > 1 and not ds['global_params']['ha_enabled']:
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
num_control = 1
+ if platform.machine() == 'aarch64':
+ # aarch64 deploys were not completing in the default 90 mins.
+ # Not sure if this is related to the hardware the OOO support
+ # was developed on or the virtualization support in CentOS
+ # Either way it will probably get better over time as the aarch
+ # support matures in CentOS and deploy time should be tested in
+ # the future so this multiplier can be removed.
+ con.DEPLOY_TIMEOUT *= 2
cmd = "openstack overcloud deploy --templates --timeout {} " \
.format(con.DEPLOY_TIMEOUT)
# build cmd env args
@@ -140,12 +253,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
if net_data:
cmd += ' --networks-file network_data.yaml'
libvirt_type = 'kvm'
- if virtual:
+ if virtual and (platform.machine() != 'aarch64'):
with open('/sys/module/kvm_intel/parameters/nested') as f:
nested_kvm = f.read().strip()
if nested_kvm != 'Y':
libvirt_type = 'qemu'
+ elif virtual and (platform.machine() == 'aarch64'):
+ libvirt_type = 'qemu'
cmd += ' --libvirt-type {}'.format(libvirt_type)
+ if platform.machine() == 'aarch64':
+ cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -153,13 +270,17 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
return cmd
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
+ patches=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
+ :param ns: network settings
:param img: sdn image
:param tmp_dir: dir to store modified sdn image
:param root_pw: password to configure for overcloud image
+ :param docker_tag: Docker image tag for RDO version (default None)
+ :param patches: List of patches to apply to overcloud image
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
@@ -172,6 +293,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
ds_opts = ds['deploy_options']
virt_cmds = list()
sdn = ds_opts['sdn_controller']
+ patched_containers = set()
# we need this due to rhbz #1436021
# fixed in systemd-219-37.el7
if sdn is not False:
@@ -186,7 +308,25 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
".service"
}])
+ if ns.get('http_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'http_proxy={}' >> /etc/environment".format(
+ ns['http_proxy'])})
+
+ if ns.get('https_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'https_proxy={}' >> /etc/environment".format(
+ ns['https_proxy'])})
+
+ tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+ shutil.copyfile(img, tmp_oc_image)
+ logging.debug("Temporary overcloud image stored as: {}".format(
+ tmp_oc_image))
+
if ds_opts['vpn']:
+ oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
virt_cmds.append({
con.VIRT_RUN_CMD:
@@ -226,15 +366,14 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
if root_pw:
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if ds_opts['sfc'] and dataplane == 'ovs':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_KMOD_RPM)},
- {con.VIRT_RUN_CMD: "yum downgrade -y "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_RPM)}
- ])
+
+ # FIXME(trozet) ovs build is failing in CentOS 7.6
+ # if dataplane == 'ovs':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ # https://review.rdoproject.org/r/#/c/13839/
+ # oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
+
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
# and add generic linux NS interface driver
@@ -248,48 +387,77 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
"/root/nosdn_vpp_rpms/*.rpm"}
])
+ undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+ 'installer_vm']['ip']
if sdn == 'opendaylight':
- if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
- {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
- "/root/puppet-opendaylight-"
- "{}.tar.gz".format(ds_opts['odl_version'])}
- ])
- if ds_opts['odl_version'] == 'master':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
- else:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
-
- elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
- and ds_opts['odl_vpp_netvirt']:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ODL_NETVIRT_VPP_RPM)}
- ])
-
- if sdn == 'ovn':
+ oc_builder.inject_opendaylight(
+ odl_version=ds_opts['odl_version'],
+ image=tmp_oc_image,
+ tmp_dir=tmp_dir,
+ uc_ip=undercloud_admin_ip,
+ os_version=ds_opts['os_version'],
+ docker_tag=docker_tag,
+ )
+ if docker_tag:
+ patched_containers = patched_containers.union({'opendaylight'})
+
+ if patches:
+ if ds_opts['os_version'] == 'master':
+ branch = ds_opts['os_version']
+ else:
+ branch = "stable/{}".format(ds_opts['os_version'])
+ logging.info('Adding patches to overcloud')
+ patched_containers = patched_containers.union(
+ c_builder.add_upstream_patches(patches,
+ tmp_oc_image, tmp_dir,
+ branch,
+ uc_ip=undercloud_admin_ip,
+ docker_tag=docker_tag))
+ # if containers with ceph, and no ceph device we need to use a
+ # persistent loop device for Ceph OSDs
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
+ tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
+ with open(tmp_losetup, 'w') as fh:
+ fh.write(LOSETUP_SERVICE)
virt_cmds.extend([
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
- "*openvswitch*"},
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
- "*openvswitch*"}
+ {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
+ },
+ {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
+ .format(LOOP_DEVICE_SIZE)},
+ {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
+ {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
-
- tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
- shutil.copyfile(img, tmp_oc_image)
- logging.debug("Temporary overcloud image stored as: {}".format(
- tmp_oc_image))
+ # TODO(trozet) remove this after LP#173474 is fixed
+ dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
+ virt_cmds.append(
+ {con.VIRT_RUN_CMD: "crudini --del {} Unit "
+ "ConditionPathExists".format(dhcp_unit)})
+ # Prep for NFS
+ virt_cmds.extend([
+ {con.VIRT_INSTALL: "nfs-utils"},
+ {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+ "/etc/systemd/system/multi-user.target.wants/"
+ "nfs-server.service"},
+ {con.VIRT_RUN_CMD: "mkdir -p /glance"},
+ {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
+ {con.VIRT_RUN_CMD: "mkdir -p /nova"},
+ {con.VIRT_RUN_CMD: "chmod 777 /glance"},
+ {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
+ {con.VIRT_RUN_CMD: "chmod 777 /nova"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
+ {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
+ "no_root_squash,no_acl)' > /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "exportfs -avr"},
+ ])
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
+ return patched_containers
def make_ssh_key():
@@ -341,6 +509,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
# SSH keys
private_key, public_key = make_ssh_key()
+ num_control, num_compute = inv.get_node_counts()
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
+ num_control = 1
+
# Make easier/faster variables to index in the file editor
if 'performance' in ds_opts:
perf = True
@@ -368,6 +540,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
else:
perf = False
+ tenant_settings = ns['networks']['tenant']
+ tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+ ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
# Modify OPNFV environment
# TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
@@ -391,6 +567,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ds_opts['dataplane'] == 'ovs_dpdk':
output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
'./ovs-dpdk-preconfig.yaml'
+ elif 'NeutronNetworkVLANRanges' in line:
+ vlan_setting = ''
+ if tenant_vlan_enabled:
+ if ns['networks']['tenant']['overlay_id_range']:
+ vlan_setting = ns['networks']['tenant']['overlay_id_range']
+ if 'datacentre' not in vlan_setting:
+ vlan_setting += ',datacentre:1:1000'
+ # SRIOV networks are VLAN based provider networks. In order to
+ # simplify the deployment, nfv_sriov will be the default physnet.
+ # VLANs are not needed in advance, and the user will have to create
+ # the network specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if vlan_setting:
+ vlan_setting += ",nfv_sriov"
+ else:
+ vlan_setting = "datacentre:1:1000,nfv_sriov"
+ if vlan_setting:
+ output_line = " NeutronNetworkVLANRanges: " + vlan_setting
+ elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " NeutronBridgeMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+ and ds_opts['sdn_controller'] == 'opendaylight':
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " OpenDaylightProviderMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+ output_line = " NeutronNetworkType: vlan\n" \
+ " NeutronTunnelTypes: ''"
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
@@ -400,16 +616,22 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ns['domain_name']))
elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
if 'NeutronVPPAgentPhysnets' in line:
- output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
- format(tenant_nic['Controller']))
+ # VPP interface tap0 will be used for external network
+ # connectivity.
+ output_line = (" NeutronVPPAgentPhysnets: "
+ "'datacentre:{},external:tap0'"
+ .format(tenant_nic['Controller']))
elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
'dvr') is True:
if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
output_line = ''
elif 'NeutronDhcpAgentsPerNetwork' in line:
- num_control, num_compute = inv.get_node_counts()
+ if num_compute == 0:
+ num_dhcp_agents = num_control
+ else:
+ num_dhcp_agents = num_compute
output_line = (" NeutronDhcpAgentsPerNetwork: {}"
- .format(num_compute))
+ .format(num_dhcp_agents))
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
@@ -475,7 +697,50 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
print(output_line)
+ # Merge compute services into control services if only a single
+ # node deployment
+ if num_compute == 0:
+ with open(tmp_opnfv_env, 'r') as fh:
+ data = yaml.safe_load(fh)
+ param_data = data['parameter_defaults']
+ logging.info("All in one deployment detected")
+ logging.info("Disabling NFS in env file")
+ # Check to see if any parameters are set for Compute
+ for param in param_data.keys():
+ if param != 'ComputeServices' and param.startswith('Compute'):
+ logging.warning("Compute parameter set, but will not be used "
+ "in deployment: {}. Please use Controller "
+ "based parameters when using All-in-one "
+ "deployments".format(param))
+ if param in NFS_VARS:
+ param_data[param] = False
+ logging.info("Checking if service merging required into "
+ "control services")
+ if ('ControllerServices' in param_data and 'ComputeServices' in
+ param_data):
+ logging.info("Services detected in environment file. Merging...")
+ ctrl_services = param_data['ControllerServices']
+ cmp_services = param_data['ComputeServices']
+ param_data['ControllerServices'] = list(set().union(
+ ctrl_services, cmp_services))
+ for dup_service in DUPLICATE_COMPUTE_SERVICES:
+ if dup_service in param_data['ControllerServices']:
+ param_data['ControllerServices'].remove(dup_service)
+ param_data.pop('ComputeServices')
+ logging.debug("Merged controller services: {}".format(
+ pprint.pformat(param_data['ControllerServices'])
+ ))
+ else:
+ logging.info("No services detected in env file, not merging "
+ "services")
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+ with open(tmp_opnfv_env, 'r') as fh:
+ logging.debug("opnfv-environment content is : {}".format(
+ pprint.pformat(yaml.safe_load(fh.read()))
+ ))
def generate_ceph_key():
@@ -484,11 +749,13 @@ def generate_ceph_key():
return base64.b64encode(header + key)
-def prep_storage_env(ds, tmp_dir):
+def prep_storage_env(ds, ns, virtual, tmp_dir):
"""
Creates storage environment file for deployment. Source file is copied by
undercloud playbook to host.
:param ds:
+ :param ns:
+ :param virtual:
:param tmp_dir:
:return:
"""
@@ -510,9 +777,35 @@ def prep_storage_env(ds, tmp_dir):
elif 'CephAdminKey' in line:
print(" CephAdminKey: {}".format(generate_ceph_key().decode(
'utf-8')))
+ elif 'CephClientKey' in line:
+ print(" CephClientKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
else:
print(line)
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+
+ if ds_opts['containers']:
+ ceph_params = {}
+
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
+ if virtual:
+ ceph_params['CephAnsibleExtraConfig'] = {
+ 'centos_package_dependencies': [],
+ 'ceph_osd_docker_memory_limit': '1g',
+ 'ceph_mds_docker_memory_limit': '1g',
+ }
+ ceph_device = ds_opts['ceph_device']
+ ceph_params['CephAnsibleDisksConfig'] = {
+ 'devices': [ceph_device],
+ 'journal_size': 512,
+ 'osd_scenario': 'collocated'
+ }
+ utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
+ elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
@@ -520,12 +813,58 @@ def prep_storage_env(ds, tmp_dir):
))
-def external_network_cmds(ns):
+def prep_sriov_env(ds, tmp_dir):
+ """
+ Creates SRIOV environment file for deployment. Source file is copied by
+ undercloud playbook to host.
+ :param ds:
+ :param tmp_dir:
+ :return:
+ """
+ ds_opts = ds['deploy_options']
+ sriov_iface = ds_opts['sriov']
+ sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
+ if not os.path.isfile(sriov_file):
+ logging.error("sriov-environment file is not in tmp directory: {}. "
+ "Check if file was copied from "
+ "undercloud".format(tmp_dir))
+ raise ApexDeployException("sriov-environment file not copied from "
+ "undercloud")
+ # TODO(rnoriega): Instead of line editing, refactor this code to load
+ # yaml file into a dict, edit it and write the file back.
+ for line in fileinput.input(sriov_file, inplace=True):
+ line = line.strip('\n')
+ if 'NovaSchedulerDefaultFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NovaSchedulerAvailableFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NeutronPhysicalDevMappings' in line:
+ print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
+ .format(sriov_iface))
+ elif 'NeutronSriovNumVFs' in line:
+ print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
+ elif 'NovaPCIPassthrough' in line:
+ print(" NovaPCIPassthrough:")
+ elif 'devname' in line:
+ print(" - devname: \"{}\"".format(sriov_iface))
+ elif 'physical_network' in line:
+ print(" physical_network: \"nfv_sriov\"")
+ else:
+ print(line)
+
+
+def external_network_cmds(ns, ds):
"""
Generates external network openstack commands
:param ns: network settings
+ :param ds: deploy settings
:return: list of commands to configure external network
"""
+ ds_opts = ds['deploy_options']
+ external_physnet = 'datacentre'
+ if ds_opts['dataplane'] == 'fdio' and \
+ ds_opts['sdn_controller'] != 'opendaylight':
+ external_physnet = 'external'
if 'external' in ns.enabled_network_list:
net_config = ns['networks']['external'][0]
external = True
@@ -546,7 +885,8 @@ def external_network_cmds(ns):
'compute']['vlan'])
cmds.append("openstack network create external --project service "
"--external --provider-network-type {} "
- "--provider-physical-network datacentre".format(ext_type))
+ "--provider-physical-network {}"
+ .format(ext_type, external_physnet))
# create subnet command
cidr = net_config['cidr']
subnet_cmd = "openstack subnet create external-subnet --project " \
@@ -554,8 +894,7 @@ def external_network_cmds(ns):
"--allocation-pool start={},end={} --subnet-range " \
"{}".format(gateway, pool_start, pool_end, str(cidr))
if external and cidr.version == 6:
- subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
- '--ipv6-address-mode slaac'
+ subnet_cmd += ' --ip-version 6'
cmds.append(subnet_cmd)
logging.debug("Neutron external network commands determined "
"as: {}".format(cmds))
diff --git a/apex/overcloud/node.py b/apex/overcloud/node.py
new file mode 100644
index 00000000..622d1fd1
--- /dev/null
+++ b/apex/overcloud/node.py
@@ -0,0 +1,147 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+import shutil
+import xml.etree.ElementTree as ET
+
+import distro
+import libvirt
+
+from apex.common.exceptions import OvercloudNodeException
+
+
+class OvercloudNode:
+ """
+ Overcloud server
+ """
+ def __init__(self, role, ip, ovs_ctrlrs, ovs_mgrs, name, node_xml,
+ disk_img):
+ self.role = role
+ self.ip = ip
+ self.ovs_ctrlrs = ovs_ctrlrs
+ self.ovs_mgrs = ovs_mgrs
+ self.name = name
+ self.node_xml_file = node_xml
+ self.node_xml = None
+ self.vm = None
+ self.disk_img = None
+ if not os.path.isfile(self.node_xml_file):
+ raise OvercloudNodeException('XML definition file not found: '
+ '{}'.format(self.node_xml_file))
+ if not os.path.isfile(disk_img):
+ raise OvercloudNodeException('Disk image file not found: '
+ '{}'.format(disk_img))
+ self.conn = libvirt.open('qemu:///system')
+ if not self.conn:
+ raise OvercloudNodeException('Unable to open libvirt connection')
+
+ self.create(src_disk=disk_img)
+
+ def _configure_disk(self, disk):
+ # find default storage pool path
+ pool = self.conn.storagePoolLookupByName('default')
+ if pool is None:
+ raise OvercloudNodeException('Cannot find default storage pool')
+ pool_xml = pool.XMLDesc()
+ logging.debug('Default storage pool xml: {}'.format(pool_xml))
+ etree = ET.fromstring(pool_xml)
+ try:
+ path = etree.find('target').find('path').text
+ logging.info('System libvirt default pool path: {}'.format(path))
+ except AttributeError as e:
+ logging.error('Failure to find libvirt storage path: {}'.format(
+ e))
+ raise OvercloudNodeException('Cannot find default storage path')
+ # copy disk to system path
+ self.disk_img = os.path.join(path, os.path.basename(disk))
+ logging.info('Copying disk image to: {}. This may take some '
+ 'time...'.format(self.disk_img))
+ shutil.copyfile(disk, self.disk_img)
+
+ @staticmethod
+ def _update_xml(xml, disk_path=None):
+ """
+ Updates a libvirt XML file for the current architecture and OS of this
+ machine
+ :param xml: XML string of Libvirt domain definition
+ :param disk_path: Optional file path to update for the backing disk
+ image
+ :return: Updated XML
+ """
+ logging.debug('Parsing xml')
+ try:
+ etree = ET.fromstring(xml)
+ except ET.ParseError:
+ logging.error('Unable to parse node XML: {}'.format(xml))
+ raise OvercloudNodeException('Unable to parse node XML')
+
+ try:
+ type_element = etree.find('os').find('type')
+ if 'machine' in type_element.keys():
+ type_element.set('machine', 'pc')
+ logging.debug('XML updated with machine "pc"')
+ except AttributeError:
+ logging.warning('Failure to set XML machine type')
+
+ # qemu-kvm path may differ per system, need to detect it and update xml
+ linux_ver = distro.linux_distribution()[0]
+ if linux_ver == 'Fedora':
+ qemu_path = '/usr/bin/qemu-kvm'
+ else:
+ qemu_path = '/usr/libexec/qemu-kvm'
+
+ try:
+ etree.find('devices').find('emulator').text = qemu_path
+ logging.debug('XML updated with emulator location: '
+ '{}'.format(qemu_path))
+ xml = ET.tostring(etree).decode('utf-8')
+ except AttributeError:
+ logging.warning('Failure to update XML qemu path')
+
+ if disk_path:
+ try:
+ disk_element = etree.find('devices').find('disk').find(
+ 'source')
+ disk_element.set('file', disk_path)
+ logging.debug('XML updated with file path: {}'.format(
+ disk_path))
+ except AttributeError:
+ logging.error('Failure to parse XML and set disk type')
+ raise OvercloudNodeException(
+ 'Unable to set new disk path in xml {}'.format(xml))
+
+ return ET.tostring(etree).decode('utf-8')
+
+ def create(self, src_disk):
+ # copy disk to pool and get new disk location
+ logging.debug('Preparing disk image')
+ self._configure_disk(src_disk)
+ logging.debug('Parsing node XML from {}'.format(self.node_xml_file))
+ with open(self.node_xml_file, 'r') as fh:
+ self.node_xml = fh.read()
+ # if machine is not pc we need to set, also need to update qemu-kvm and
+ # storage location
+ self.node_xml = self._update_xml(self.node_xml, self.disk_img)
+ logging.info('Creating node {} in libvirt'.format(self.name))
+ self.vm = self.conn.defineXML(self.node_xml)
+
+ def start(self):
+ """
+ Boot node in libvirt
+ :return:
+ """
+ try:
+ self.vm.create()
+ logging.info('Node {} started'.format(self.name))
+ except libvirt.libvirtError as e:
+ logging.error('Failed to start domain: {}'.format(self.name))
+ raise OvercloudNodeException('Failed to start VM. Reason: '
+ '{}'.format(e))
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index f2012b24..9f8a6f18 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -23,11 +23,15 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
'ceph',
'gluon',
'rt_kvm',
- 'os_version']
+ 'os_version',
+ 'l2gw',
+ 'sriov',
+ 'containers',
+ 'ceph_device',
+ 'vim']
OPT_DEPLOY_SETTINGS = ['performance',
'vsperf',
- 'ceph_device',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
@@ -102,12 +106,16 @@ class DeploySettings(dict):
self['deploy_options'][req_set] = 'ovs'
elif req_set == 'ceph':
self['deploy_options'][req_set] = True
+ elif req_set == 'ceph_device':
+ self['deploy_options'][req_set] = '/dev/loop3'
elif req_set == 'odl_version':
self['deploy_options'][req_set] = \
constants.DEFAULT_ODL_VERSION
elif req_set == 'os_version':
self['deploy_options'][req_set] = \
constants.DEFAULT_OS_VERSION
+ elif req_set == 'vim':
+ self['deploy_options'][req_set] = 'openstack'
else:
self['deploy_options'][req_set] = False
elif req_set == 'odl_version' and self['deploy_options'][
@@ -115,9 +123,11 @@ class DeploySettings(dict):
raise DeploySettingsException(
"Invalid ODL version: {}".format(self[deploy_options][
'odl_version']))
-
- if self['deploy_options']['odl_version'] == 'oxygen':
- self['deploy_options']['odl_version'] = 'master'
+ elif req_set == 'sriov':
+ if self['deploy_options'][req_set] is True:
+ raise DeploySettingsException(
+ "Invalid SRIOV interface name: {}".format(
+ self['deploy_options']['sriov']))
if 'performance' in deploy_options:
if not isinstance(deploy_options['performance'], dict):
diff --git a/apex/settings/network_settings.py b/apex/settings/network_settings.py
index f6566834..36d143cb 100644
--- a/apex/settings/network_settings.py
+++ b/apex/settings/network_settings.py
@@ -167,10 +167,13 @@ class NetworkSettings(dict):
"""
_network = self.get_network(network)
# if vlan not defined then default it to native
- if network is not ADMIN_NETWORK:
- for role in ROLES:
+ for role in ROLES:
+ if network is not ADMIN_NETWORK:
if 'vlan' not in _network['nic_mapping'][role]:
_network['nic_mapping'][role]['vlan'] = 'native'
+ else:
+ # ctlplane network must be native
+ _network['nic_mapping'][role]['vlan'] = 'native'
cidr = _network.get('cidr')
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
new file mode 100644
index 00000000..96462d5f
--- /dev/null
+++ b/apex/tests/config/98faaca.diff
@@ -0,0 +1,331 @@
+From 98faacad44e39a456d9fe1a1d21f5a65e8de4fc1 Mon Sep 17 00:00:00 2001
+From: Janki Chhatbar <jchhatba@redhat.com>
+Date: Tue, 23 Jan 2018 22:43:49 +0530
+Subject: [PATCH] Minor update steps for ODL
+
+Updating OpenStack (within release) means updating ODL from v1 to v1.1.
+This is done by "openstack overcloud update" which collects
+update_tasks. ODL needs 2 different steps to achieve this
+minor update. These are called Level1 and Level2. L1 is
+simple - stop ODL, update, start. This is taken care by paunch
+and no separate implementation is needed. L2 has extra steps
+which are implemented in update_tasks and post_update_tasks.
+
+Updating ODL within the same major release (1->1.1) consists of either
+L1 or L2 steps. These steps are decided from ODLUpdateLevel parameter
+specified in environments/services-docker/update-odl.yaml.
+
+Upgrading ODL to the next major release (1.1->2) requires
+only the L2 steps. These are implemented as upgrade_tasks and
+post_upgrade_tasks in https://review.opendev.org/489201.
+
+Steps involved in level 2 update are
+ 1. Block OVS instances to connect to ODL
+ 2. Set ODL upgrade flag to True
+ 3. Start ODL
+ 4. Start Neutron re-sync and wait for it to finish
+ 5. Delete OVS groups and ports
+ 6. Stop OVS
+ 7. Unblock OVS ports
+ 8. Start OVS
+ 9. Unset ODL upgrade flag
+
+These steps are exactly same as upgrade_tasks.
+The logic implemented is:
+follow upgrade_tasks; when update_level == 2
+
+Change-Id: Ie532800663dd24313a7350b5583a5080ddb796e7
+---
+
+diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2
+index 595e16c..c4fb05f 100644
+--- a/common/deploy-steps.j2
++++ b/common/deploy-steps.j2
+@@ -23,6 +23,7 @@
+ {% set post_upgrade_steps_max = 4 -%}
+ {% set fast_forward_upgrade_steps_max = 9 -%}
+ {% set fast_forward_upgrade_prep_steps_max = 3 -%}
++{% set post_update_steps_max = 4 -%}
+
+ heat_template_version: queens
+
+@@ -590,3 +591,15 @@
+ - include_tasks: {{role.name}}/fast_forward_upgrade_tasks.yaml
+ when: role_name == '{{role.name}}' and ansible_hostname == {{role.name}}[0]
+ {%- endfor %}
++ post_update_steps_tasks: |
++{%- for role in roles %}
++ - include: {{role.name}}/post_update_tasks.yaml
++ when: role_name == '{{role.name}}'
++{%- endfor %}
++ post_update_steps_playbook: |
++ - hosts: overcloud
++ tasks:
++ - include: post_update_steps_tasks.yaml
++ with_sequence: start=0 end={{post_update_steps_max-1}}
++ loop_control:
++ loop_var: step
+diff --git a/common/services.yaml b/common/services.yaml
+index 2a62c1b..c197b05 100644
+--- a/common/services.yaml
++++ b/common/services.yaml
+@@ -283,6 +283,16 @@
+ expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+
++ PostUpdateTasks:
++ type: OS::Heat::Value
++ properties:
++ type: comma_delimited_list
++ value:
++ yaql:
++ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
++ expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct()
++ data: {get_attr: [ServiceChain, role_data]}
++
+ UpgradeBatchTasks:
+ type: OS::Heat::Value
+ properties:
+@@ -349,6 +359,7 @@
+ upgrade_tasks: {get_attr: [UpgradeTasks, value]}
+ post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]}
+ update_tasks: {get_attr: [UpdateTasks, value]}
++ post_update_tasks: {get_attr: [PostUpdateTasks, value]}
+ upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]}
+ service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+diff --git a/docker/services/opendaylight-api.yaml b/docker/services/opendaylight-api.yaml
+index 6175db9..3cafe53 100644
+--- a/docker/services/opendaylight-api.yaml
++++ b/docker/services/opendaylight-api.yaml
+@@ -44,6 +44,14 @@
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
++ ODLUpdateLevel:
++ default: 1
++ description: Specify the level of update
++ type: number
++ constraints:
++ - allowed_values:
++ - 1
++ - 2
+
+ conditions:
+
+@@ -167,23 +175,25 @@
+ - opendaylight_enabled.rc == 0
+ service: name=opendaylight state=stopped enabled=no
+ # Containarised deployment upgrade steps
+- - name: remove journal and snapshots
+- when: step|int == 0
+- file:
+- path: /var/lib/opendaylight/{{item}}
+- state: absent
+- with_items:
+- - snapshots
+- - journal
+- - name: Set ODL upgrade flag to True
+- copy:
+- dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
+- content: |
+- <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
+- <upgradeInProgress>true</upgradeInProgress>
+- </config>
+- when: step|int == 1
+- post_upgrade_tasks:
++ - name: ODL container L2 update and upgrade tasks
++ block: &odl_container_upgrade_tasks
++ - name: remove journal and snapshots
++ when: step|int == 0
++ file:
++ path: /var/lib/opendaylight/{{item}}
++ state: absent
++ with_items:
++ - snapshots
++ - journal
++ - name: Set ODL upgrade flag to True
++ copy:
++ dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
++ content: |
++ <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
++ <upgradeInProgress>true</upgradeInProgress>
++ </config>
++ when: step|int == 1
++ post_upgrade_tasks: &odl_container_post_upgrade_tasks
+ - name: Unset upgrade flag in ODL
+ shell:
+ str_replace:
+@@ -192,7 +202,20 @@
+ -H "Content-Type: application/json" \
+ $ODL_URI/restconf/config/genius-mdsalutil:config'
+ params:
+- $ODL_USERNAME: {get_param: [OpenDaylightBase, OpenDaylightUsername]}
+- $ODL_PASSWORD: {get_param: [OpenDaylightBase, OpenDaylightPassword]}
++ $ODL_USERNAME: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::username']}
++ $ODL_PASSWORD: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::password']}
+ $ODL_URI: {get_param: [EndpointMap, OpenDaylightInternal, uri]}
+ when: step|int == 0
++ update_tasks:
++ - name: Get ODL update level
++ block: &get_odl_update_level
++ - name: store update level to update_level variable
++ set_fact:
++ odl_update_level: {get_param: ODLUpdateLevel}
++ - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++ block: *odl_container_upgrade_tasks
++ when: odl_update_level == 2
++ post_update_tasks:
++ - block: *get_odl_update_level
++ - block: *odl_container_post_upgrade_tasks
++ when: odl_update_level == 2
+\ No newline at end of file
+diff --git a/environments/services-docker/update-odl.yaml b/environments/services-docker/update-odl.yaml
+new file mode 100644
+index 0000000..87d74ef
+--- /dev/null
++++ b/environments/services-docker/update-odl.yaml
+@@ -0,0 +1,11 @@
++# This file describes parameters needed for ODL update.
++# This file is to be used along with other env files during
++# level 2 minor update.
++# Level 2 update involves yang changes in ODL within same ODL release and
++# hence needs DB wipe and resync.
++# Level 1 is simple update - stop ODL, pull new image, start ODL
++# This file is not be used during level1 update or major upgrade.
++# In case doubt, please reach out to ODL developers on #tripleo IRC channel
++
++parameter_defaults:
++ ODLUpdateLevel: 2
+\ No newline at end of file
+diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
+index 3390645..958e1bb 100644
+--- a/puppet/services/opendaylight-ovs.yaml
++++ b/puppet/services/opendaylight-ovs.yaml
+@@ -104,6 +104,14 @@
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
++ ODLUpdateLevel:
++ default: 1
++ description: Specify the level of update
++ type: number
++ constraints:
++ - allowed_values:
++ - 1
++ - 2
+
+ parameter_groups:
+ - label: deprecated
+@@ -230,14 +238,16 @@
+ - openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
+ # Container upgrade steps.
+- - name: Block connections to ODL. #This rule will be inserted at the top.
+- iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
+- when: step|int == 0
+- with_items:
+- - 6640
+- - 6653
+- - 6633
+- post_upgrade_tasks:
++ - name: ODL container L2 update and upgrade tasks
++ block: &odl_container_upgrade_tasks
++ - name: Block connections to ODL. #This rule will be inserted at the top.
++ iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
++ when: step|int == 0
++ with_items:
++ - 6640
++ - 6653
++ - 6633
++ post_upgrade_tasks: &odl_container_post_upgrade_tasks
+ - name: Check service openvswitch is running
+ command: systemctl is-active --quiet openvswitch
+ tags: common
+@@ -260,6 +270,20 @@
+ - name: start openvswitch service
+ when: step|int == 3
+ service : name=openvswitch state=started
++ update_tasks:
++ - name: Get ODL update level
++ block: &get_odl_update_level
++ - name: store update level to update_level variable
++ set_fact:
++ odl_update_level: {get_param: ODLUpdateLevel}
++ - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++ block: *odl_container_upgrade_tasks
++ when: odl_update_level == 2
++ post_update_tasks:
++ - block: *get_odl_update_level
++ - block: *odl_container_post_upgrade_tasks
++ when: odl_update_level == 2
++
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+@@ -267,4 +291,4 @@
+ - service: ovs
+ network: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ type: node
+- - null
++ - null
+\ No newline at end of file
+diff --git a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+index 45703d0..e2943de 100644
+--- a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
++++ b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+@@ -1,6 +1,6 @@
+ ---
+
+-features:
++upgrade:
+ - Add ODL upgradability
+ Steps of upgrade are as follows
+ 1. Block OVS instances to connect to ODL done in upgrade_tasks
+diff --git a/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+new file mode 100644
+index 0000000..1bcf8ed
+--- /dev/null
++++ b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+@@ -0,0 +1,19 @@
++---
++features:
++ - Minor update ODL steps are added. ODL minor update (within same ODL
++ release) can have 2 different workflow. These are called level 1 and
++ level2. Level 1 is simple - stop, update and start ODL. Level 2 is
++ complex and involved yang model changes. This requires wiping of
++ DB and resync to repopulate the data.
++ Steps involved in level 2 update are
++ 1. Block OVS instances to connect to ODL
++ 2. Set ODL upgrade flag to True
++ 3. Start ODL
++ 4. Start Neutron re-sync and wait for it to finish
++ 5. Delete OVS groups and ports
++ 6. Stop OVS
++ 7. Unblock OVS ports
++ 8. Start OVS
++ 9. Unset ODL upgrade flag
++ To achieve L2 update, use "-e environments/services-docker/
++ update-odl.yaml" along with other env files to the update command.
+\ No newline at end of file
+diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
+index 59473f5..9ab6a87 100755
+--- a/tools/yaml-validate.py
++++ b/tools/yaml-validate.py
+@@ -46,11 +46,11 @@
+ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'fast_forward_upgrade_tasks',
+ 'post_upgrade_tasks', 'update_tasks',
+- 'service_config_settings', 'host_prep_tasks',
+- 'metadata_settings', 'kolla_config',
+- 'global_config_settings', 'logging_source',
+- 'logging_groups', 'external_deploy_tasks',
+- 'external_post_deploy_tasks',
++ 'post_update_tasks', 'service_config_settings',
++ 'host_prep_tasks', 'metadata_settings',
++ 'kolla_config', 'global_config_settings',
++ 'logging_source', 'logging_groups',
++ 'external_deploy_tasks', 'external_post_deploy_tasks',
+ 'docker_config_scripts', 'step_config']
+ REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+ 'config_image']
diff --git a/apex/tests/config/admin.xml b/apex/tests/config/admin.xml
new file mode 100644
index 00000000..69b15b1f
--- /dev/null
+++ b/apex/tests/config/admin.xml
@@ -0,0 +1,7 @@
+<network connections='1' ipv6='yes'>
+ <name>admin</name>
+ <uuid>761c34f8-2a72-4205-8e69-5ed6626c6efa</uuid>
+ <forward mode='bridge'/>
+ <bridge name='br-admin'/>
+ <virtualport type='openvswitch'/>
+</network>
diff --git a/apex/tests/config/baremetal0.xml b/apex/tests/config/baremetal0.xml
new file mode 100644
index 00000000..4ff8f65a
--- /dev/null
+++ b/apex/tests/config/baremetal0.xml
@@ -0,0 +1,73 @@
+<domain type='kvm'>
+ <name>baremetal0</name>
+ <uuid>25bf15b6-130c-4bca-87af-e5cbc14bb454</uuid>
+ <memory unit='KiB'>12582912</memory>
+ <currentMemory unit='KiB'>12582912</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+ <boot dev='hd'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-passthrough'/>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/libexec/qemu-kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source file='/home/images/baremetal0.qcow2'/>
+ <target dev='sda' bus='sata'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+ </disk>
+ <controller type='scsi' index='0' model='virtio-scsi'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </controller>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <controller type='sata' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </controller>
+ <interface type='bridge'>
+ <mac address='00:5b:06:25:0c:dc'/>
+ <source bridge='br-admin'/>
+ <virtualport type='openvswitch'>
+ <parameters interfaceid='04b63cb9-21a9-4385-bbd6-df677a5eeecf'/>
+ </virtualport>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'>
+ <listen type='address' address='127.0.0.1'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='16384' heads='1' primary='yes'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='selinux' relabel='yes'/>
+ <seclabel type='dynamic' model='dac' relabel='yes'/>
+</domain>
diff --git a/apex/tests/config/common-patches.yaml b/apex/tests/config/common-patches.yaml
new file mode 100644
index 00000000..fef8fcd0
--- /dev/null
+++ b/apex/tests/config/common-patches.yaml
@@ -0,0 +1,6 @@
+---
+patches:
+ queens:
+ undercloud:
+ - change-id: I2e0a40d7902f592e4b7bd727f57048111e0bea36
+ project: openstack/tripleo-common
diff --git a/apex/tests/config/dummy-deploy-settings.yaml b/apex/tests/config/dummy-deploy-settings.yaml
new file mode 100644
index 00000000..54890f38
--- /dev/null
+++ b/apex/tests/config/dummy-deploy-settings.yaml
@@ -0,0 +1,19 @@
+---
+global_params:
+ ha_enabled: false
+ patches:
+ undercloud:
+ - change-id: I2e0a40d7902f592e4b7bd727f57048111e0bea36
+ project: openstack/tripleo-common
+ overcloud:
+ - change-id: Ie988ba6a2d444a614e97c0edf5fce24b23970310
+ project: openstack/puppet-tripleo
+deploy_options:
+ containers: true
+ os_version: queens
+ sdn_controller: opendaylight
+ odl_version: oxygen
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/apex/tests/config/inventory-virt-1-compute-node.yaml b/apex/tests/config/inventory-virt-1-compute-node.yaml
new file mode 100644
index 00000000..4c2dc5d4
--- /dev/null
+++ b/apex/tests/config/inventory-virt-1-compute-node.yaml
@@ -0,0 +1,14 @@
+---
+nodes:
+ node0:
+ arch: x86_64
+ capabilities: profile:compute
+ cpu: 4
+ disk: 41
+ ipmi_ip: 192.168.122.1
+ ipmi_pass: password
+ ipmi_user: admin
+ mac_address: 00:a8:58:29:f9:99
+ memory: 10240
+ pm_port: 6230
+ pm_type: pxe_ipmitool
diff --git a/apex/tests/config/node.yaml b/apex/tests/config/node.yaml
new file mode 100644
index 00000000..e05644c9
--- /dev/null
+++ b/apex/tests/config/node.yaml
@@ -0,0 +1,12 @@
+---
+servers:
+ overcloud-controller-0.opnfvlf.org:
+ address: 192.0.2.28
+ orig-ctl-mac: 00:5b:06:25:0c:dc
+ ovs-controller: tcp:192.0.2.28:6653
+ ovs-managers:
+ - ptcp:6639:127.0.0.1
+ - tcp:192.0.2.28:6640
+ type: controller
+ user: heat-admin
+ vNode-name: baremetal0
diff --git a/apex/tests/config/snapshot.properties b/apex/tests/config/snapshot.properties
new file mode 100644
index 00000000..64c149e2
--- /dev/null
+++ b/apex/tests/config/snapshot.properties
@@ -0,0 +1,2 @@
+OPNFV_SNAP_URL=artifacts.opnfv.org/apex/master/noha/apex-csit-snap-2018-08-05.tar.gz
+OPNFV_SNAP_SHA512SUM=bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc236e36a74d05ee813584f3e5bb92aa23dec775846317b75d574f8c86186c666f78a299c24fb68849897bdd4bc
diff --git a/apex/tests/test_apex_build_utils.py b/apex/tests/test_apex_build_utils.py
index d9d542d6..36caaf1f 100644
--- a/apex/tests/test_apex_build_utils.py
+++ b/apex/tests/test_apex_build_utils.py
@@ -9,17 +9,20 @@
import argparse
import git
+import os
+import unittest
from mock import patch
from apex import build_utils
+from apex.tests import constants as con
from nose.tools import (
assert_is_instance,
assert_raises)
-class TestBuildUtils(object):
+class TestBuildUtils(unittest.TestCase):
@classmethod
def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@@ -165,3 +168,25 @@ class TestBuildUtils(object):
def test_main_debug(self, mock_get_parser):
with patch.object(build_utils.sys, 'argv', self.sys_argv_debug):
build_utils.main()
+
+ def test_strip_patch_sections(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ tmp_patch = build_utils.strip_patch_sections(dummy_patch)
+ self.assertNotRegex(tmp_patch, 'releasenotes')
+ self.assertNotRegex(tmp_patch, 'Minor update ODL steps')
+ self.assertNotRegex(tmp_patch, 'Steps of upgrade are as follows')
+ self.assertNotRegex(tmp_patch, 'Steps invlolved in level 2 update')
+
+ def test_is_path_in_patch(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ self.assertTrue(build_utils.is_path_in_patch(dummy_patch,
+ 'releasenotes/'))
+
+ def test_strip_no_patch_sections(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ tmp_patch = build_utils.strip_patch_sections(dummy_patch,
+ sections=[])
+ self.assertEqual(dummy_patch, tmp_patch)
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
index c32f72c9..3ff95bb5 100644
--- a/apex/tests/test_apex_common_builder.py
+++ b/apex/tests/test_apex_common_builder.py
@@ -10,11 +10,22 @@
import unittest
from apex.builders import common_builder as c_builder
+from apex.builders import exceptions
from apex.common import constants as con
from mock import patch
from mock import mock_open
from mock import MagicMock
+DOCKER_YAML = {
+ 'resource_registry': {
+ 'OS::TripleO::Services::NovaApi': '../docker/services/nova-api.yaml',
+ 'OS::TripleO::Services::NovaConductor':
+ '../docker/services/nova-conductor.yaml'
+ }
+}
+
+a_mock_open = mock_open(read_data=None)
+
class TestCommonBuilder(unittest.TestCase):
@classmethod
@@ -39,13 +50,55 @@ class TestCommonBuilder(unittest.TestCase):
path = '/etc/puppet/modules/tripleo'
self.assertEquals(c_builder.project_to_path(project), path)
project = 'openstack/nova'
- path = '/usr/lib/python2.7/site-packages/nova'
+ path = '/usr/lib/python2.7/site-packages/'
self.assertEquals(c_builder.project_to_path(project), path)
+ def test_is_patch_promoted(self):
+ dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ self.assertTrue(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ con.DOCKERHUB_OOO))
+
+ def test_is_patch_promoted_docker(self):
+ dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ dummy_image = 'centos-binary-opendaylight'
+ self.assertTrue(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ con.DOCKERHUB_OOO,
+ docker_image=dummy_image))
+
+ def test_patch_not_promoted(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ con.DOCKERHUB_OOO))
+
+ def test_patch_not_promoted_docker(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ dummy_image = 'centos-binary-opendaylight'
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ con.DOCKERHUB_OOO,
+ docker_image=dummy_image))
+
+ def test_patch_not_promoted_and_not_merged(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'BLAH'}
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ con.DOCKERHUB_OOO))
+
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
@patch('apex.build_utils.get_patch')
@patch('apex.virtual.utils.virt_customize')
- def test_add_upstream_patches(self, mock_customize, mock_get_patch):
+ def test_add_upstream_patches(self, mock_customize, mock_get_patch,
+ mock_get_change, mock_is_patch_promoted):
mock_get_patch.return_value = None
change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
patches = [{
@@ -64,10 +117,116 @@ class TestCommonBuilder(unittest.TestCase):
{con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
project_path, patch_file)}]
mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_add_upstream_patches_docker_puppet(
+ self, mock_customize, mock_get_patch, mock_get_change,
+ mock_is_patch_promoted):
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/puppet-tripleo'
+ }]
+ project_path = '/etc/puppet/modules/tripleo'
+ patch_file = "{}.patch".format(change_id)
+ patch_file_path = "/dummytmp/{}".format(patch_file)
+ test_virt_ops = [
+ {con.VIRT_INSTALL: 'patch'},
+ {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+ project_path)},
+ {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+ project_path, patch_file)}]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
+ c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.builders.common_builder.project_to_docker_image')
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_add_upstream_patches_docker_python(
+ self, mock_customize, mock_get_patch, mock_build_docker_file,
+ mock_project2docker, ock_get_change, mock_is_patch_promoted):
+ mock_project2docker.return_value = ['NovaApi']
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/nova'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
+ services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
+ '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+ assert mock_build_docker_file.called
+ self.assertSetEqual(services, {'NovaApi'})
+
+ @patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.builders.common_builder.project_to_docker_image')
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_not_add_upstream_patches_docker_python(
+ self, mock_customize, mock_get_patch, mock_build_docker_file,
+ mock_project2docker, ock_get_change, mock_is_patch_promoted):
+ # Test that the calls are not made when the patch is already merged and
+ # promoted
+ mock_project2docker.return_value = ['NovaApi']
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/nova'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = True
+ services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
+ '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+ assert mock_build_docker_file.not_called
+ assert len(services) == 0
+
+ @patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_not_upstream_patches_docker_puppet(
+ self, mock_customize, mock_get_patch, mock_get_change,
+ mock_is_patch_promoted):
+ # Test that the calls are not made when the patch is already merged and
+ # promoted
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/puppet-tripleo'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = True
+ c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+
+ @patch('builtins.open', mock_open())
@patch('apex.virtual.utils.virt_customize')
def test_add_repo(self, mock_customize):
c_builder.add_repo('fake/url', 'dummyrepo', 'dummy.qcow2',
@@ -85,3 +244,67 @@ class TestCommonBuilder(unittest.TestCase):
self.assertEqual(c_builder.create_git_archive('fake/url', 'dummyrepo',
'/dummytmp/'),
'/dummytmp/dummyrepo.tar')
+
+ def test_project_to_docker_image(self):
+ found_services = c_builder.project_to_docker_image('nova',
+ con.DOCKERHUB_OOO)
+ assert 'nova-api' in found_services
+
+ @patch('apex.common.utils.open_webpage')
+ def test_project_to_docker_image_bad_web_content(
+ self, mock_open_web):
+ mock_open_web.return_value = b'{"blah": "blah"}'
+ self.assertRaises(exceptions.ApexCommonBuilderException,
+ c_builder.project_to_docker_image,
+ 'nova',
+ con.DOCKERHUB_OOO)
+
+ def test_get_neutron_driver(self):
+ ds_opts = {'dataplane': 'fdio',
+ 'sdn_controller': 'opendaylight',
+ 'odl_version': 'master',
+ 'vpn': False,
+ 'sriov': False}
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'odl')
+ ds_opts['sdn_controller'] = None
+ ds_opts['vpp'] = True
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'vpp')
+ ds_opts['sdn_controller'] = 'ovn'
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'ovn')
+
+ @patch('apex.builders.common_builder.yaml')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_prepare_container_images(self, mock_is_file, mock_yaml):
+ mock_yaml.safe_load.return_value = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'blah',
+ 'neutron_driver': 'null',
+ }
+ }
+ ]
+ }
+ }
+ expected_output = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'docker.io/tripleoqueens',
+ 'neutron_driver': 'odl',
+ }
+ }
+ ]
+ }
+ }
+
+ c_builder.prepare_container_images('dummy.yaml', 'queens',
+ 'odl')
+ mock_yaml.safe_dump.assert_called_with(
+ expected_output,
+ a_mock_open.return_value,
+ default_flow_style=False)
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 6f2a9476..1ecb7df6 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -12,12 +12,14 @@ import os
import shutil
import urllib.error
+from apex.common import exceptions
from apex.common import utils
from apex.settings.network_settings import NetworkSettings
from apex.tests.constants import (
TEST_CONFIG_DIR,
TEST_PLAYBOOK_DIR)
+from mock import patch, mock_open
from nose.tools import (
assert_equal,
assert_is_instance,
@@ -25,6 +27,7 @@ from nose.tools import (
assert_raises)
NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
+a_mock_open = mock_open(read_data=None)
class TestCommonUtils:
@@ -61,8 +64,11 @@ class TestCommonUtils:
def test_run_ansible(self):
playbook = 'apex/tests/playbooks/test_playbook.yaml'
+ extra_vars = [{'testvar1': 'value1', 'testvar2': 'value2'}]
assert_equal(utils.run_ansible(None, os.path.join(playbook),
dry_run=True), None)
+ assert_equal(utils.run_ansible(extra_vars, os.path.join(playbook),
+ dry_run=True, host='1.1.1.1'), None)
def test_failed_run_ansible(self):
playbook = 'apex/tests/playbooks/test_failed_playbook.yaml'
@@ -78,7 +84,7 @@ class TestCommonUtils:
def test_fetch_upstream_previous_file(self):
test_file = 'overcloud-full.tar.md5'
- url = 'https://images.rdoproject.org/master/delorean/' \
+ url = 'https://images.rdoproject.org/master/rdo_trunk/' \
'current-tripleo/stable/'
os.makedirs('/tmp/fetch_test', exist_ok=True)
open("/tmp/fetch_test/{}".format(test_file), 'w').close()
@@ -100,3 +106,57 @@ class TestCommonUtils:
url, ['dummy_test.tar'])
assert os.path.isfile('/tmp/fetch_test/test.txt')
shutil.rmtree('/tmp/fetch_test')
+
+ def test_nofetch_upstream_and_unpack(self):
+ test_file = 'overcloud-full.tar.md5'
+ url = 'https://images.rdoproject.org/master/delorean/' \
+ 'current-tripleo/stable/'
+ os.makedirs('/tmp/fetch_test', exist_ok=True)
+ target = "/tmp/fetch_test/{}".format(test_file)
+ open(target, 'w').close()
+ target_mtime = os.path.getmtime(target)
+ utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+ url, [test_file], fetch=False)
+ post_target_mtime = os.path.getmtime(target)
+ shutil.rmtree('/tmp/fetch_test')
+ assert_equal(target_mtime, post_target_mtime)
+
+ def test_nofetch_upstream_and_unpack_no_target(self):
+ test_file = 'overcloud-full.tar.md5'
+ url = 'https://images.rdoproject.org/master/delorean/' \
+ 'current-tripleo/stable/'
+ utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+ url, [test_file])
+ assert os.path.isfile("/tmp/fetch_test/{}".format(test_file))
+ shutil.rmtree('/tmp/fetch_test')
+
+ def test_open_webpage(self):
+ output = utils.open_webpage('http://opnfv.org')
+ assert output is not None
+
+ def test_open_invalid_webpage(self):
+ assert_raises(exceptions.FetchException, utils.open_webpage,
+ 'http://inv4lIdweb-page.com')
+
+ @patch('builtins.open', a_mock_open)
+ @patch('yaml.safe_dump')
+ @patch('yaml.safe_load')
+ def test_edit_tht_env(self, mock_yaml_load, mock_yaml_dump):
+ settings = {'SomeParameter': 'some_value'}
+ mock_yaml_load.return_value = {
+ 'parameter_defaults': {'SomeParameter': 'dummy'}
+ }
+ utils.edit_tht_env('/dummy-environment.yaml', 'parameter_defaults',
+ settings)
+ new_data = {'parameter_defaults': settings}
+ mock_yaml_dump.assert_called_once_with(new_data, a_mock_open(),
+ default_flow_style=False)
+
+ def test_unique(self):
+ dummy_list = [1, 2, 1, 3, 4, 5, 5]
+ assert_equal(utils.unique(dummy_list), [1, 2, 3, 4, 5])
+
+ def test_find_container_client(self):
+ for version in 'rocky', 'queens':
+ assert_equal(utils.find_container_client(version), 'docker')
+ assert_equal(utils.find_container_client('master'), 'podman')
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index 403b7099..004c21c1 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -8,6 +8,7 @@
##############################################################################
import argparse
+import os
import unittest
from mock import patch
@@ -17,12 +18,12 @@ from mock import mock_open
from apex.common.exceptions import ApexDeployException
from apex.common.constants import DEFAULT_OS_VERSION
-from apex.deploy import deploy_quickstart
from apex.deploy import validate_cross_settings
from apex.deploy import build_vms
from apex.deploy import create_deploy_parser
from apex.deploy import validate_deploy_args
from apex.deploy import main
+from apex.tests.constants import TEST_DUMMY_CONFIG
from nose.tools import (
assert_is_instance,
@@ -48,9 +49,6 @@ class TestDeploy(unittest.TestCase):
def teardown(self):
"""This method is run once after _each_ test method is executed"""
- def test_deloy_quickstart(self):
- deploy_quickstart(None, None, None)
-
def test_validate_cross_settings(self):
deploy_settings = {'deploy_options': {'dataplane': 'ovs'}}
net_settings = Mock()
@@ -85,12 +83,23 @@ class TestDeploy(unittest.TestCase):
args = Mock()
args.inventory_file = None
args.virtual = True
+ args.snapshot = False
+ validate_deploy_args(args)
+
+ def test_validate_snapshot_deploy_args(self):
+ args = Mock()
+ args.deploy_settings_file = os.path.join(TEST_DUMMY_CONFIG,
+ 'dummy-deploy-settings.yaml')
+ args.inventory_file = None
+ args.virtual = True
+ args.snapshot = True
validate_deploy_args(args)
def test_validate_deploy_args_no_virt_no_inv(self):
args = Mock()
args.inventory_file = 'file_name'
args.virtual = False
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
@patch('apex.deploy.os.path')
@@ -99,14 +108,19 @@ class TestDeploy(unittest.TestCase):
args = Mock()
args.inventory_file = None
args.virtual = True
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
def test_validate_deploy_args_virt_and_inv_file(self):
args = Mock()
args.inventory_file = 'file_name'
args.virtual = True
+ args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
+ @patch('apex.deploy.c_builder')
+ @patch('apex.deploy.ApexDeployment')
+ @patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@patch('apex.deploy.shutil')
@patch('apex.deploy.oc_deploy')
@@ -132,7 +146,8 @@ class TestDeploy(unittest.TestCase):
mock_deploy_sets, mock_net_sets, mock_net_env,
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
- mock_oc_deploy, mock_shutil, mock_network_data):
+ mock_oc_deploy, mock_shutil, mock_network_data,
+ mock_uc_builder, mock_deployment, mock_c_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
@@ -142,13 +157,16 @@ class TestDeploy(unittest.TestCase):
'dataplane': 'ovs',
'sfc': False,
'vpn': False,
+ 'vim': 'openstack',
'yardstick': 'test',
- 'os_version': DEFAULT_OS_VERSION}}
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': False}}
args = mock_parser.return_value.parse_args.return_value
args.virtual = False
args.quickstart = False
args.debug = False
- args.upstream = False
+ args.snapshot = False
+ args.upstream = True
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['external']
net_sets.__getitem__.side_effect = net_sets_dict.__getitem__
@@ -159,6 +177,7 @@ class TestDeploy(unittest.TestCase):
mock_parsers.parse_nova_output.return_value = {'testnode1': 'test'}
main()
+ @patch('apex.deploy.SnapshotDeployment')
@patch('apex.deploy.validate_cross_settings')
@patch('apex.deploy.virt_utils')
@patch('apex.deploy.utils')
@@ -169,15 +188,19 @@ class TestDeploy(unittest.TestCase):
@patch('apex.deploy.os')
@patch('apex.deploy.create_deploy_parser')
@patch('builtins.open', a_mock_open, create=True)
- def test_main_qs(self, mock_parser, mock_os, mock_deploy,
- mock_net_sets, mock_net_env, mock_inv, mock_utils,
- mock_virt_utils, mock_cross):
+ def test_main_snapshot(self, mock_parser, mock_os, mock_deploy,
+ mock_net_sets, mock_net_env, mock_inv, mock_utils,
+ mock_virt_utils, mock_cross, mock_snap_deployment):
args = mock_parser.return_value.parse_args.return_value
args.virtual = False
- args.quickstart = True
+ args.snapshot = True
args.debug = True
main()
+ mock_snap_deployment.assert_called()
+ @patch('apex.deploy.c_builder')
+ @patch('apex.deploy.ApexDeployment')
+ @patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@patch('apex.deploy.shutil')
@patch('apex.deploy.oc_deploy')
@@ -203,7 +226,8 @@ class TestDeploy(unittest.TestCase):
mock_deploy_sets, mock_net_sets, mock_net_env,
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
- mock_oc_deploy, mock_shutil, mock_network_data):
+ mock_oc_deploy, mock_shutil, mock_network_data,
+ mock_uc_builder, mock_deployment, mock_c_builder):
# didn't work yet line 412
# net_sets_dict = {'networks': {'admin': {'cidr': MagicMock()}},
# 'dns_servers': 'test'}
@@ -215,8 +239,10 @@ class TestDeploy(unittest.TestCase):
'dataplane': 'ovs',
'sfc': False,
'vpn': False,
+ 'vim': 'openstack',
'yardstick': 'test',
- 'os_version': DEFAULT_OS_VERSION}}
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': False}}
args = mock_parser.return_value.parse_args.return_value
args.virtual = True
args.quickstart = False
@@ -226,7 +252,72 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_nodes = 1
args.virt_compute_ram = None
args.virt_default_ram = 12
- args.upstream = False
+ args.upstream = True
+ args.snapshot = False
+ net_sets = mock_net_sets.return_value
+ net_sets.enabled_network_list = ['admin']
+ deploy_sets = mock_deploy_sets.return_value
+ deploy_sets.__getitem__.side_effect = ds_opts_dict.__getitem__
+ deploy_sets.__contains__.side_effect = ds_opts_dict.__contains__
+ main()
+ args.virt_compute_ram = 16
+ args.virt_default_ram = 10
+ main()
+
+ @patch('apex.deploy.ApexDeployment')
+ @patch('apex.deploy.c_builder')
+ @patch('apex.deploy.uc_builder')
+ @patch('apex.deploy.oc_builder')
+ @patch('apex.deploy.network_data.create_network_data')
+ @patch('apex.deploy.shutil')
+ @patch('apex.deploy.oc_deploy')
+ @patch('apex.deploy.uc_lib')
+ @patch('apex.deploy.build_vms')
+ @patch('apex.deploy.Inventory')
+ @patch('apex.deploy.virt_utils')
+ @patch('apex.deploy.oc_cfg')
+ @patch('apex.deploy.parsers')
+ @patch('apex.deploy.utils')
+ @patch('apex.deploy.NetworkEnvironment')
+ @patch('apex.deploy.NetworkSettings')
+ @patch('apex.deploy.DeploySettings')
+ @patch('apex.deploy.os')
+ @patch('apex.deploy.json')
+ @patch('apex.deploy.jumphost')
+ @patch('apex.deploy.validate_cross_settings')
+ @patch('apex.deploy.validate_deploy_args')
+ @patch('apex.deploy.create_deploy_parser')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_main_virt_containers_upstream(
+ self, mock_parser, mock_val_args, mock_cross_sets, mock_jumphost,
+ mock_json, mock_os, mock_deploy_sets, mock_net_sets, mock_net_env,
+ mock_utils, mock_parsers, mock_oc_cfg, mock_virt_utils,
+ mock_inv, mock_build_vms, mock_uc_lib, mock_oc_deploy,
+ mock_shutil, mock_network_data, mock_oc_builder,
+ mock_uc_builder, mock_c_builder, mock_deployment):
+
+ ds_opts_dict = {'global_params': MagicMock(),
+ 'deploy_options': {'gluon': False,
+ 'congress': False,
+ 'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs',
+ 'sfc': False,
+ 'vpn': False,
+ 'vim': 'openstack',
+ 'yardstick': 'test',
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': True}}
+ args = mock_parser.return_value.parse_args.return_value
+ args.virtual = True
+ args.quickstart = False
+ args.debug = True
+ args.virt_default_ram = 10
+ args.ha_enabled = True
+ args.virt_compute_nodes = 1
+ args.virt_compute_ram = None
+ args.virt_default_ram = 12
+ args.upstream = True
+ args.snapshot = False
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['admin']
deploy_sets = mock_deploy_sets.return_value
@@ -236,3 +327,67 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_ram = 16
args.virt_default_ram = 10
main()
+ mock_oc_deploy.prep_image.assert_called()
+ # TODO(trozet) add assertions here with arguments for functions in
+ # deploy main
+
+ @patch('apex.deploy.c_builder')
+ @patch('apex.deploy.ApexDeployment')
+ @patch('apex.deploy.uc_builder')
+ @patch('apex.deploy.network_data.create_network_data')
+ @patch('apex.deploy.shutil')
+ @patch('apex.deploy.git')
+ @patch('apex.deploy.oc_deploy')
+ @patch('apex.deploy.uc_lib')
+ @patch('apex.deploy.build_vms')
+ @patch('apex.deploy.Inventory')
+ @patch('apex.deploy.virt_utils')
+ @patch('apex.deploy.oc_cfg')
+ @patch('apex.deploy.parsers')
+ @patch('apex.deploy.utils')
+ @patch('apex.deploy.NetworkEnvironment')
+ @patch('apex.deploy.NetworkSettings')
+ @patch('apex.deploy.DeploySettings')
+ @patch('apex.deploy.os')
+ @patch('apex.deploy.json')
+ @patch('apex.deploy.jumphost')
+ @patch('apex.deploy.validate_cross_settings')
+ @patch('apex.deploy.validate_deploy_args')
+ @patch('apex.deploy.create_deploy_parser')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_main_k8s(self, mock_parser, mock_val_args, mock_cross_sets,
+ mock_jumphost, mock_json, mock_os,
+ mock_deploy_sets, mock_net_sets, mock_net_env,
+ mock_utils, mock_parsers, mock_oc_cfg,
+ mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
+ mock_oc_deploy, mock_git, mock_shutil,
+ mock_network_data, mock_uc_builder, mock_deployment,
+ mock_c_builder):
+ net_sets_dict = {'networks': MagicMock(),
+ 'dns_servers': 'test'}
+ ds_opts_dict = {'global_params': MagicMock(),
+ 'deploy_options': {'gluon': False,
+ 'congress': True,
+ 'sdn_controller': False,
+ 'dataplane': 'ovs',
+ 'sfc': False,
+ 'vpn': False,
+ 'vim': 'k8s',
+ 'yardstick': 'test',
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': False}}
+ args = mock_parser.return_value.parse_args.return_value
+ args.virtual = False
+ args.quickstart = False
+ args.debug = False
+ args.upstream = False
+ args.snapshot = False
+ net_sets = mock_net_sets.return_value
+ net_sets.enabled_network_list = ['external']
+ net_sets.__getitem__.side_effect = net_sets_dict.__getitem__
+ net_sets.__contains__.side_effect = net_sets_dict.__contains__
+ deploy_sets = mock_deploy_sets.return_value
+ deploy_sets.__getitem__.side_effect = ds_opts_dict.__getitem__
+ deploy_sets.__contains__.side_effect = ds_opts_dict.__contains__
+ mock_parsers.parse_nova_output.return_value = {'testnode1': 'test'}
+ main()
diff --git a/apex/tests/test_apex_deployment_snapshot.py b/apex/tests/test_apex_deployment_snapshot.py
new file mode 100644
index 00000000..d7542585
--- /dev/null
+++ b/apex/tests/test_apex_deployment_snapshot.py
@@ -0,0 +1,374 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from mock import patch
+import os
+import unittest
+import urllib.request
+
+from apex.common import exceptions as exc
+from apex.deployment.snapshot import SnapshotDeployment
+from apex.settings.deploy_settings import DeploySettings
+from apex.tests.constants import TEST_DUMMY_CONFIG
+
+DUMMY_SNAP_DIR = '/tmp/dummy_cache'
+
+
+class TestSnapshotDeployment(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_init(self, mock_deploy_snap, mock_libvirt_open, mock_pull_snap):
+
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=True, all_in_one=False)
+ snap_dir = os.path.join(DUMMY_SNAP_DIR, 'queens', 'noha')
+ self.assertEqual(d.snap_cache_dir, snap_dir)
+ mock_pull_snap.assert_called()
+ mock_deploy_snap.assert_called()
+ self.assertEqual(d.ha_ext, 'noha')
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_init_allinone_no_fetch(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap):
+
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=True)
+ snap_dir = os.path.join(DUMMY_SNAP_DIR, 'queens', 'noha-allinone')
+ self.assertEqual(d.snap_cache_dir, snap_dir)
+ mock_pull_snap.assert_not_called()
+ mock_deploy_snap.assert_called()
+ self.assertEqual(d.ha_ext, 'noha-allinone')
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_is_latest(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.return_value = {
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': 'bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'
+ }
+ SnapshotDeployment.pull_snapshot('http://dummy_url',
+ TEST_DUMMY_CONFIG)
+ mock_fetch_artifact.assert_not_called()
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_fetch_props_failure(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.side_effect = exc.FetchException
+ self.assertRaises(exc.FetchException,
+ SnapshotDeployment.pull_snapshot,
+ 'http://dummy_url', TEST_DUMMY_CONFIG)
+
+ @patch('apex.deployment.snapshot.utils.fetch_upstream_and_unpack')
+ @patch('apex.deployment.snapshot.utils.fetch_properties')
+ def test_pull_snapshot_is_not_latest(self, mock_fetch_props,
+ mock_fetch_artifact):
+ mock_fetch_props.side_effect = [{
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': '123c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'},
+ {
+ 'OPNFV_SNAP_URL': 'artifacts.opnfv.org/apex/master/noha/'
+ 'apex-csit-snap-2018-08-05.tar.gz',
+ 'OPNFV_SNAP_SHA512SUM': 'bb0c6fa0e675dcb39cfad11d81bb99f309d5cfc23'
+ '6e36a74d05ee813584f3e5bb92aa23dec77584631'
+ '7b75d574f8c86186c666f78a299c24fb68849897b'
+ 'dd4bc'}]
+ SnapshotDeployment.pull_snapshot('http://dummy_url',
+ TEST_DUMMY_CONFIG)
+ mock_fetch_artifact.assert_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ conn = mock_libvirt_open('qemu:///system')
+ d.create_networks()
+ conn.networkCreateXML.assert_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks_invalid_cache(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = '/doesnotexist/'
+ self.assertRaises(exc.SnapshotDeployException, d.create_networks)
+
+ @patch('apex.deployment.snapshot.fnmatch')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_create_networks_no_net_xmls(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_fnmatch):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = '/doesnotexist/'
+ mock_fnmatch.filter.return_value = []
+ self.assertRaises(exc.SnapshotDeployException, d.create_networks)
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_parse_and_create_nodes(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ d.parse_and_create_nodes()
+ node.start.assert_called()
+ self.assertListEqual([node], d.oc_nodes)
+
+ @patch('apex.deployment.snapshot.utils.parse_yaml')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_parse_and_create_nodes_invalid_node_yaml(
+ self, mock_deploy_snap, mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_parse_yaml):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ mock_parse_yaml.return_value = {'blah': 'dummy'}
+ self.assertRaises(exc.SnapshotDeployException,
+ d.parse_and_create_nodes)
+ node.start.assert_not_called()
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_get_controllers(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.role = 'controller'
+ d.oc_nodes = [node]
+ self.assertListEqual(d.get_controllers(), [node])
+
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_get_controllers_none(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.role = 'compute'
+ d.oc_nodes = [node]
+ self.assertListEqual(d.get_controllers(), [])
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.socket')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_openstack_up(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_socket,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ sock = mock_socket.socket(mock_socket.AF_INET, mock_socket.SOCK_STREAM)
+ sock.connect_ex.return_value = 0
+ self.assertTrue(d.is_service_up('openstack'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.socket')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_openstack_up_false(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_socket,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ sock = mock_socket.socket(mock_socket.AF_INET, mock_socket.SOCK_STREAM)
+ sock.connect_ex.return_value = 1
+ self.assertFalse(d.is_service_up('openstack'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.utils')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_opendaylight_up(self, mock_deploy_snap, mock_libvirt_open,
+ mock_pull_snap, mock_oc_node, mock_utils,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ mock_utils.open_webpage.return_value = 0
+ self.assertTrue(d.is_service_up('opendaylight'))
+
+ @patch('apex.deployment.snapshot.SnapshotDeployment.get_controllers')
+ @patch('apex.deployment.snapshot.time')
+ @patch('apex.deployment.snapshot.utils')
+ @patch('apex.deployment.snapshot.OvercloudNode')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.deploy_snapshot')
+ def test_is_opendaylight_up_false(self, mock_deploy_snap,
+ mock_libvirt_open, mock_pull_snap,
+ mock_oc_node, mock_utils,
+ mock_time, mock_get_ctrls):
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ d = SnapshotDeployment(deploy_settings=ds,
+ snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ d.snap_cache_dir = TEST_DUMMY_CONFIG
+ node = mock_oc_node()
+ node.ip = '123.123.123.123'
+ node.name = 'dummy-controller-0'
+ mock_get_ctrls.return_value = [node]
+ mock_utils.open_webpage.side_effect = urllib.request.URLError(
+ reason='blah')
+ self.assertFalse(d.is_service_up('opendaylight'))
+
+ @patch('apex.deployment.snapshot.os.path.isfile')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.is_service_up')
+ @patch('apex.deployment.snapshot.SnapshotDeployment'
+ '.parse_and_create_nodes')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.create_networks')
+ def test_deploy_snapshot(self, mock_create_networks, mock_libvirt_open,
+ mock_pull_snap, mock_parse_create,
+ mock_service_up, mock_is_file):
+ mock_is_file.return_value = True
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ SnapshotDeployment(deploy_settings=ds, snap_cache_dir=DUMMY_SNAP_DIR,
+ fetch=False, all_in_one=False)
+ mock_parse_create.assert_called()
+ mock_create_networks.assert_called()
+ mock_service_up.assert_called()
+
+ @patch('apex.deployment.snapshot.os.path.isfile')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.is_service_up')
+ @patch('apex.deployment.snapshot.SnapshotDeployment'
+ '.parse_and_create_nodes')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.pull_snapshot')
+ @patch('apex.deployment.snapshot.libvirt.open')
+ @patch('apex.deployment.snapshot.SnapshotDeployment.create_networks')
+ def test_deploy_snapshot_services_down(self, mock_create_networks,
+ mock_libvirt_open,
+ mock_pull_snap, mock_parse_create,
+ mock_service_up, mock_is_file):
+ mock_is_file.return_value = True
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ mock_service_up.return_value = False
+ self.assertRaises(exc.SnapshotDeployException,
+ SnapshotDeployment,
+ ds, DUMMY_SNAP_DIR, False, False)
+
+ mock_service_up.side_effect = [True, False]
+ self.assertRaises(exc.SnapshotDeployException,
+ SnapshotDeployment,
+ ds, DUMMY_SNAP_DIR, False, False)
diff --git a/apex/tests/test_apex_deployment_tripleo.py b/apex/tests/test_apex_deployment_tripleo.py
new file mode 100644
index 00000000..912fe104
--- /dev/null
+++ b/apex/tests/test_apex_deployment_tripleo.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import unittest
+
+from apex.deployment.tripleo import ApexDeployment
+from apex.settings.deploy_settings import DeploySettings
+from apex.tests.constants import TEST_DUMMY_CONFIG
+
+
+class TestApexDeployment(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_determine_patches(self):
+ self.maxDiff = None
+ ds_file = os.path.join(TEST_DUMMY_CONFIG, 'dummy-deploy-settings.yaml')
+ ds = DeploySettings(ds_file)
+ patches_file = os.path.join(TEST_DUMMY_CONFIG, 'common-patches.yaml')
+ d = ApexDeployment(deploy_settings=ds, patch_file=patches_file,
+ ds_file=ds_file)
+ patches = d.determine_patches()
+ test_patches = {
+ 'undercloud':
+ [{'change-id': 'I2e0a40d7902f592e4b7bd727f57048111e0bea36',
+ 'project': 'openstack/tripleo-common'}],
+ 'overcloud':
+ [{'change-id': 'Ie988ba6a2d444a614e97c0edf5fce24b23970310',
+ 'project': 'openstack/puppet-tripleo'}]
+ }
+ self.assertDictEqual(patches, test_patches)
diff --git a/apex/tests/test_apex_inventory.py b/apex/tests/test_apex_inventory.py
index 71979465..38a4271a 100644
--- a/apex/tests/test_apex_inventory.py
+++ b/apex/tests/test_apex_inventory.py
@@ -56,10 +56,15 @@ class TestInventory:
os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'),
virtual=True, ha=True)
+ def test_inventory_valid_allinone_count(self):
+ i = Inventory(os.path.join(TEST_DUMMY_CONFIG,
+ 'inventory-virt-1-node.yaml'), ha=False)
+ assert_equal(list(i.get_node_counts()), [1, 0])
+
def test_inventory_invalid_noha_count(self):
assert_raises(ApexInventoryException, Inventory,
os.path.join(TEST_DUMMY_CONFIG,
- 'inventory-virt-1-node.yaml'),
+ 'inventory-virt-1-compute-node.yaml'),
virtual=True, ha=False)
def test_inventory_virtual(self):
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
index 79a72a55..7aa6ef15 100644
--- a/apex/tests/test_apex_network_environment.py
+++ b/apex/tests/test_apex_network_environment.py
@@ -165,3 +165,10 @@ class TestNetworkEnvironment:
e = NetworkEnvException("test")
print(e)
assert_is_instance(e, NetworkEnvException)
+
+ def test_service_netmap(self):
+ ns = copy(self.ns)
+ ns.enabled_network_list = ['admin']
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ for network in ne['parameter_defaults']['ServiceNetMap'].values():
+ assert_equal(network, 'ctlplane')
diff --git a/apex/tests/test_apex_network_settings.py b/apex/tests/test_apex_network_settings.py
index 5e2fa072..764c9ef4 100644
--- a/apex/tests/test_apex_network_settings.py
+++ b/apex/tests/test_apex_network_settings.py
@@ -112,6 +112,9 @@ class TestNetworkSettings:
# remove vlan from storage net
storage_net_nicmap['compute'].pop('vlan', None)
assert_is_instance(NetworkSettings(ns), NetworkSettings)
+ for role in ('compute', 'controller'):
+ assert_equal(ns['networks'][ADMIN_NETWORK]['nic_mapping'][
+ role]['vlan'], 'native')
# TODO
# need to manipulate interfaces some how
diff --git a/apex/tests/test_apex_overcloud_builder.py b/apex/tests/test_apex_overcloud_builder.py
index e9a6e6cf..8bed3d70 100644
--- a/apex/tests/test_apex_overcloud_builder.py
+++ b/apex/tests/test_apex_overcloud_builder.py
@@ -11,7 +11,9 @@ import unittest
from apex.builders import overcloud_builder as oc_builder
from apex.common import constants as con
-from mock import patch
+from mock import patch, mock_open
+
+a_mock_open = mock_open(read_data=None)
class TestOvercloudBuilder(unittest.TestCase):
@@ -37,14 +39,71 @@ class TestOvercloudBuilder(unittest.TestCase):
mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
archive = '/dummytmp/puppet-opendaylight.tar'
test_virt_ops = [
- {con.VIRT_INSTALL: 'opendaylight'},
{con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
{con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
- "puppet-opendaylight.tar"}
+ "puppet-opendaylight.tar"},
+ {con.VIRT_INSTALL: "java-1.8.0-openjdk"},
+ {con.VIRT_INSTALL: 'opendaylight'}
]
oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION, 'dummy.qcow2',
- '/dummytmp/')
+ '/dummytmp/', uc_ip='192.0.2.2',
+ os_version=con.DEFAULT_OS_VERSION)
+ assert mock_git_archive.called
+ assert mock_add_repo.called
+ mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.builders.common_builder.create_git_archive')
+ @patch('apex.builders.common_builder.add_repo')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_inject_opendaylight_docker(self, mock_customize, mock_add_repo,
+ mock_git_archive, mock_build_docker):
+ mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
+ archive = '/dummytmp/puppet-opendaylight.tar'
+ test_virt_ops = [
+ {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
+ {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
+ {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
+ "puppet-opendaylight.tar"},
+ {con.VIRT_INSTALL: "java-1.8.0-openjdk"},
+ ]
+ oc_builder.inject_opendaylight('oxygen', 'dummy.qcow2',
+ '/dummytmp/', uc_ip='192.0.2.2',
+ os_version=con.DEFAULT_OS_VERSION,
+ docker_tag='latest')
+ odl_url = "https://nexus.opendaylight.org/content/repositories" \
+ "/opendaylight-oxygen-epel-7-x86_64-devel/"
+ docker_cmds = [
+ "RUN yum remove opendaylight -y",
+ "RUN echo $'[opendaylight]\\n\\",
+ "baseurl={}\\n\\".format(odl_url),
+ "gpgcheck=0\\n\\",
+ "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+ "RUN yum -y install opendaylight"
+ ]
+ src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
assert mock_git_archive.called
assert mock_add_repo.called
+ assert mock_build_docker.called_once_with(
+ 'opendaylight', '/dummytmp', docker_cmds, src_img_uri
+ )
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('builtins.open', a_mock_open)
+ @patch('os.makedirs')
+ @patch('os.path.isfile')
+ @patch('os.path.isdir')
+ def test_build_dockerfile(self, mock_isdir, mock_isfile, mock_makedirs):
+ src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
+ oc_builder.build_dockerfile('nova-api', '/tmpdummy/', ['RUN dummy'],
+ src_img_uri)
+ a_mock_open.assert_called_with(
+ '/tmpdummy/containers/nova-api/Dockerfile', 'a+')
+ a_mock_open().write.assert_called_once_with('RUN dummy')
+
+ @patch('tarfile.open')
+ @patch('os.path.isdir')
+ def test_archive_docker_patches(self, mock_isdir, mock_tarfile):
+ oc_builder.archive_docker_patches('/tmpdummy/')
+ assert mock_tarfile.assert_called
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index 59e9048f..79dbf54b 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import mock
+import os
import sys
import unittest
@@ -24,8 +26,11 @@ from apex.overcloud.deploy import make_ssh_key
from apex.overcloud.deploy import prep_env
from apex.overcloud.deploy import generate_ceph_key
from apex.overcloud.deploy import prep_storage_env
+from apex.overcloud.deploy import prep_sriov_env
from apex.overcloud.deploy import external_network_cmds
from apex.overcloud.deploy import create_congress_cmds
+from apex.overcloud.deploy import SDN_FILE_MAP
+from apex.overcloud.deploy import get_docker_sdn_files
from nose.tools import (
assert_regexp_matches,
@@ -70,19 +75,41 @@ class TestOvercloudDeploy(unittest.TestCase):
res = '/usr/share/openstack-tripleo-heat-templates/environments/test'
assert_equal(build_sdn_env_list(ds, sdn_map), [res])
+ def test_build_sdn_env_list_with_string(self):
+ ds = {'sdn_controller': 'opendaylight',
+ 'sriov': 'xxx'}
+ prefix = '/usr/share/openstack-tripleo-heat-templates/environments'
+ res = [os.path.join(prefix, 'neutron-opendaylight.yaml'),
+ os.path.join(prefix, 'neutron-opendaylight-sriov.yaml')]
+ assert_equal(build_sdn_env_list(ds, SDN_FILE_MAP), res)
+
+ def test_build_sdn_env_list_with_default(self):
+ ds = {'sdn_controller': 'opendaylight',
+ 'vpn': True}
+ prefix = '/usr/share/openstack-tripleo-heat-templates/environments'
+ res = [os.path.join(prefix, 'neutron-opendaylight.yaml'),
+ os.path.join(prefix, 'neutron-bgpvpn-opendaylight.yaml')]
+ assert_equal(build_sdn_env_list(ds, SDN_FILE_MAP), res)
+
+ @patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@patch('apex.overcloud.deploy.build_sdn_env_list')
@patch('builtins.open', mock_open())
- def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage):
+ def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage,
+ mock_prep_sriov):
mock_sdn_list.return_value = []
- ds = {'deploy_options': MagicMock(),
+ ds = {'deploy_options':
+ {'ha_enabled': True,
+ 'congress': True,
+ 'tacker': True,
+ 'containers': False,
+ 'barometer': True,
+ 'ceph': False,
+ 'sriov': False,
+ 'vim': 'openstack'
+ },
'global_params': MagicMock()}
- ds['global_params'].__getitem__.side_effect = \
- lambda i: True if i == 'ha_enabled' else MagicMock()
- ds['deploy_options'].__getitem__.side_effect = \
- lambda i: True if i == 'congress' else MagicMock()
- ds['deploy_options'].__contains__.side_effect = \
- lambda i: True if i == 'congress' else MagicMock()
+
ns = {'ntp': ['ntp']}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
@@ -96,16 +123,63 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_in('--control-scale 3', result_cmd)
assert_in('--compute-scale 2', result_cmd)
+ @patch('apex.overcloud.deploy.prep_sriov_env')
+ @patch('apex.overcloud.deploy.prep_storage_env')
+ @patch('builtins.open', mock_open())
+ def test_create_deploy_cmd_containers_sdn(self, mock_prep_storage,
+ mock_prep_sriov):
+ ds = {'deploy_options':
+ {'ha_enabled': True,
+ 'congress': False,
+ 'tacker': False,
+ 'containers': True,
+ 'barometer': False,
+ 'vpn': False,
+ 'ceph': True,
+ 'sdn_controller': 'opendaylight',
+ 'sriov': False,
+ 'os_version': 'queens',
+ 'vim': 'openstack'
+ },
+ 'global_params': MagicMock()}
+
+ ns = {'ntp': ['ntp']}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ virt = True
+ result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
+ assert_in('--ntp-server ntp', result_cmd)
+ assert_not_in('enable_tacker.yaml', result_cmd)
+ assert_not_in('enable_congress.yaml', result_cmd)
+ assert_not_in('enable_barometer.yaml', result_cmd)
+ assert_in('virtual-environment.yaml', result_cmd)
+ assert_in('--control-scale 3', result_cmd)
+ assert_in('--compute-scale 2', result_cmd)
+ assert_in('docker-images.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/docker.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
+ 'storage-environment.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/services/neutron-opendaylight.yaml', result_cmd)
+ ds['deploy_options']['os_version'] = 'master'
+ result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/services/neutron-opendaylight.yaml', result_cmd)
+
+ @patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@patch('apex.overcloud.deploy.build_sdn_env_list')
@patch('builtins.open', mock_open())
def test_create_deploy_cmd_no_ha_bm(self, mock_sdn_list,
- mock_prep_storage):
+ mock_prep_storage, mock_prep_sriov):
mock_sdn_list.return_value = []
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
ds['global_params'].__getitem__.side_effect = \
lambda i: False if i == 'ha_enabled' else MagicMock()
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: 'master' if i == 'os_version' else MagicMock()
ns = {'ntp': ['ntp']}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
@@ -119,57 +193,61 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_not_in('enable_congress.yaml', result_cmd)
assert_not_in('enable_barometer.yaml', result_cmd)
- @patch('apex.overcloud.deploy.prep_storage_env')
- @patch('apex.overcloud.deploy.build_sdn_env_list')
- def test_create_deploy_cmd_raises(self, mock_sdn_list, mock_prep_storage):
- mock_sdn_list.return_value = []
- ds = {'deploy_options': MagicMock(),
- 'global_params': MagicMock()}
- ns = {}
- inv = MagicMock()
- inv.get_node_counts.return_value = (0, 0)
- virt = False
- assert_raises(ApexDeployException, create_deploy_cmd,
- ds, ns, inv, '/tmp', virt)
-
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
- @patch('apex.overcloud.deploy.os.path')
+ @patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
- def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils):
+ def test_prep_image(self, mock_is_file, mock_shutil, mock_virt_utils,
+ mock_inject_odl):
+ mock_is_file.return_value = True
ds_opts = {'dataplane': 'fdio',
'sdn_controller': 'opendaylight',
- 'odl_version': 'master'}
+ 'odl_version': 'master',
+ 'vpn': False,
+ 'sriov': False}
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
- @patch('apex.overcloud.deploy.os.path')
+ @patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
- def test_prep_image_sdn_false(self, mock_os_path, mock_shutil,
+ def test_prep_image_sdn_false(self, mock_is_file, mock_shutil,
mock_virt_utils):
+ mock_is_file.return_value = True
ds_opts = {'dataplane': 'fdio',
+ 'vpn': False,
'sdn_controller': False}
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
+ @patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
- @patch('apex.overcloud.deploy.os.path')
+ @patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
- def test_prep_image_sdn_odl(self, mock_os_path, mock_shutil,
- mock_virt_utils):
+ def test_prep_image_sdn_odl(self, mock_is_file, mock_shutil,
+ mock_virt_utils, mock_inject_odl,
+ mock_fetch, mock_ovs_nsh):
+ mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
+ 'vpn': False,
+ 'sfc': False,
'odl_version': con.DEFAULT_ODL_VERSION,
'odl_vpp_netvirt': True}
ds = {'deploy_options': MagicMock(),
@@ -178,15 +256,80 @@ class TestOvercloudDeploy(unittest.TestCase):
lambda i: ds_opts.get(i, MagicMock())
ds['deploy_options'].__contains__.side_effect = \
lambda i: True if i in ds_opts else MagicMock()
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
+ # mock_ovs_nsh.assert_called()
+ @patch('apex.overcloud.deploy.c_builder')
+ @patch('apex.overcloud.deploy.oc_builder')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
- @patch('apex.overcloud.deploy.os.path')
+ @patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
- def test_prep_image_sdn_odl_not_def(self, mock_os_path,
- mock_shutil, mock_virt_utils):
+ def test_prep_image_sdn_odl_upstream_containers_patches(
+ self, mock_is_file, mock_shutil, mock_virt_utils,
+ mock_oc_builder, mock_c_builder):
+ mock_is_file.return_value = True
+ ds_opts = {'dataplane': 'ovs',
+ 'sdn_controller': 'opendaylight',
+ 'odl_version': con.DEFAULT_ODL_VERSION,
+ 'odl_vpp_netvirt': True}
+ ds = {'deploy_options': MagicMock(),
+ 'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ ds['deploy_options'].__contains__.side_effect = \
+ lambda i: True if i in ds_opts else MagicMock()
+ ns = MagicMock()
+ mock_c_builder.add_upstream_patches.return_value = ['nova-api']
+ patches = ['dummy_nova_patch']
+ rv = prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test',
+ docker_tag='latest', patches=patches)
+ mock_oc_builder.inject_opendaylight.assert_called()
+ mock_virt_utils.virt_customize.assert_called()
+ mock_c_builder.add_upstream_patches.assert_called()
+ self.assertListEqual(sorted(rv), ['nova-api', 'opendaylight'])
+
+ @patch('apex.overcloud.deploy.c_builder')
+ @patch('apex.overcloud.deploy.oc_builder')
+ @patch('apex.overcloud.deploy.virt_utils')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_image_nosdn_upstream_containers_patches(
+ self, mock_is_file, mock_shutil, mock_virt_utils,
+ mock_oc_builder, mock_c_builder):
+ mock_is_file.return_value = True
+ ds_opts = {'dataplane': 'ovs',
+ 'sdn_controller': False,
+ 'odl_version': con.DEFAULT_ODL_VERSION,
+ 'odl_vpp_netvirt': False}
+ ds = {'deploy_options': MagicMock(),
+ 'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ ds['deploy_options'].__contains__.side_effect = \
+ lambda i: True if i in ds_opts else MagicMock()
+ ns = MagicMock()
+ mock_c_builder.add_upstream_patches.return_value = ['nova-api']
+ patches = ['dummy_nova_patch']
+ rv = prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test',
+ docker_tag='latest', patches=patches)
+ mock_virt_utils.virt_customize.assert_called()
+ mock_c_builder.add_upstream_patches.assert_called()
+ self.assertListEqual(sorted(rv), ['nova-api'])
+
+ @patch('apex.overcloud.deploy.oc_builder')
+ @patch('apex.overcloud.deploy.virt_utils')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_image_sdn_odl_not_def(self, mock_is_file,
+ mock_shutil, mock_virt_utils,
+ mock_oc_builder):
+ mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
'odl_version': 'uncommon'}
@@ -194,71 +337,152 @@ class TestOvercloudDeploy(unittest.TestCase):
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_oc_builder.inject_opendaylight.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
- @patch('apex.overcloud.deploy.os.path')
+ @patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
- def test_prep_image_sdn_ovn(self, mock_os_path, mock_shutil,
- mock_virt_utils):
+ def test_prep_image_sdn_ovn(self, mock_is_file, mock_shutil,
+ mock_virt_utils, mock_ovs_nsh):
+ mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
+ 'vpn': False,
+ 'sfc': False,
'sdn_controller': 'ovn'}
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ # mock_ovs_nsh.assert_called()
+
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
+ @patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
+ @patch('apex.builders.overcloud_builder.inject_quagga')
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
+ @patch('apex.overcloud.deploy.virt_utils')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_image_sdn_odl_vpn(self, mock_is_file, mock_shutil,
+ mock_virt_utils, mock_inject_odl,
+ mock_inject_quagga, mock_fetch,
+ mock_ovs_nsh):
+ mock_is_file.return_value = True
+ ds_opts = {'dataplane': 'ovs',
+ 'sdn_controller': 'opendaylight',
+ 'vpn': True,
+ 'sfc': False,
+ 'odl_version': con.DEFAULT_ODL_VERSION,
+ 'odl_vpp_netvirt': True}
+ ds = {'deploy_options': MagicMock(),
+ 'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ ds['deploy_options'].__contains__.side_effect = \
+ lambda i: True if i in ds_opts else MagicMock()
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
+ mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
+ mock_inject_quagga.assert_called()
+ # mock_ovs_nsh.assert_called()
+
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
+ @patch('apex.overcloud.deploy.virt_utils')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_image_sdn_odl_sfc(self, mock_is_file, mock_shutil,
+ mock_virt_utils, mock_inject_odl,
+ mock_inject_ovs_nsh):
+ mock_is_file.return_value = True
+ ds_opts = {'dataplane': 'ovs',
+ 'sdn_controller': 'opendaylight',
+ 'vpn': False,
+ 'sfc': True,
+ 'odl_version': con.DEFAULT_ODL_VERSION,
+ 'odl_vpp_netvirt': True}
+ ds = {'deploy_options': MagicMock(),
+ 'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ ds['deploy_options'].__contains__.side_effect = \
+ lambda i: True if i in ds_opts else MagicMock()
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
+ mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
+ # mock_inject_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.os.path.isfile')
def test_prep_image_no_image(self, mock_isfile):
mock_isfile.return_value = False
assert_raises(ApexDeployException, prep_image,
- {}, 'undercloud.qcow2', '/tmp')
+ {}, {}, 'undercloud.qcow2', '/tmp')
def test_make_ssh_key(self):
priv, pub = make_ssh_key()
assert_in('-----BEGIN PRIVATE KEY-----', priv)
assert_in('ssh-rsa', pub)
+ @patch('apex.overcloud.deploy.yaml')
@patch('apex.overcloud.deploy.fileinput')
@patch('apex.overcloud.deploy.shutil')
- def test_prep_env(self, mock_shutil, mock_fileinput):
+ @patch('builtins.open', mock_open())
+ def test_prep_env(self, mock_shutil, mock_fileinput, mock_yaml):
mock_fileinput.input.return_value = \
['CloudDomain', 'replace_private_key', 'replace_public_key',
'opendaylight::vpp_routing_node', 'ControllerExtraConfig',
'NovaComputeExtraConfig', 'ComputeKernelArgs', 'HostCpusList',
'ComputeExtraConfigPre', 'resource_registry',
'NovaSchedulerDefaultFilters']
- ds = {'deploy_options':
+ mock_yaml.safe_load.return_value = {
+ 'parameter_defaults': {
+ 'ControllerServices': [1, 2, 3],
+ 'ComputeServices': [3, 4, 5]
+ }}
+ ds = {'global_params': {'ha_enabled': False},
+ 'deploy_options':
{'sdn_controller': 'opendaylight',
'odl_vpp_routing_node': 'test',
'dataplane': 'ovs_dpdk',
+ 'sriov': 'xxx',
'performance': {'Compute': {'vpp': {'main-core': 'test',
'corelist-workers': 'test'},
'ovs': {'dpdk_cores': 'test'},
'kernel': {'test': 'test'}},
'Controller': {'vpp': 'test'}}}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
- inv = None
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (1, 0)
try:
# Swap stdout
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
@@ -266,42 +490,56 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_in('ssh-rsa', output)
assert_in('ComputeKernelArgs: \'test=test \'', output)
assert_in('fdio::vpp_cpu_main_core: \'test\'', output)
+ mock_yaml.safe_dump.assert_called_with(
+ {'parameter_defaults': {
+ 'ControllerServices': [1, 2, 3, 4, 5],
+ }},
+ mock.ANY, default_flow_style=False
+ )
finally:
# put stdout back
sys.stdout = saved_stdout
@patch('apex.overcloud.deploy.fileinput')
@patch('apex.overcloud.deploy.shutil')
+ @patch('builtins.open', mock_open())
def test_prep_env_round_two(self, mock_shutil, mock_fileinput):
mock_fileinput.input.return_value = \
['NeutronVPPAgentPhysnets']
- ds = {'deploy_options':
+ ds = {'global_params': {'ha_enabled': False},
+ 'deploy_options':
{'sdn_controller': False,
'dataplane': 'fdio',
+ 'sriov': 'xxx',
'performance': {'Compute': {},
'Controller': {}}}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
- inv = None
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
try:
# Swap stdout
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
- assert_in('NeutronVPPAgentPhysnets: \'datacentre:tenant_nic\'',
+ assert_in('NeutronVPPAgentPhysnets: '
+ '\'datacentre:tenant_nic,external:tap0\'',
output)
assert_in('NeutronVPPAgentPhysnets', output)
finally:
@@ -310,26 +548,29 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('apex.overcloud.deploy.fileinput')
@patch('apex.overcloud.deploy.shutil')
+ @patch('builtins.open', mock_open())
def test_prep_env_round_three(self, mock_shutil, mock_fileinput):
mock_fileinput.input.return_value = \
['OS::TripleO::Services::NeutronDhcpAgent',
'NeutronDhcpAgentsPerNetwork', 'ComputeServices']
- ds = {'deploy_options':
+ ds = {'global_params': {'ha_enabled': False},
+ 'deploy_options':
{'sdn_controller': 'opendaylight',
'dataplane': 'fdio',
+ 'sriov': 'xxx',
'dvr': True}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
try:
@@ -337,6 +578,9 @@ class TestOvercloudDeploy(unittest.TestCase):
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
@@ -345,6 +589,111 @@ class TestOvercloudDeploy(unittest.TestCase):
# put stdout back
sys.stdout = saved_stdout
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('builtins.open', mock_open())
+ def test_prep_env_tenant_vlan(self, mock_shutil, mock_fileinput):
+ mock_fileinput.input.return_value = \
+ ['NeutronNetworkVLANRanges',
+ 'NeutronNetworkType', 'NeutronBridgeMappings']
+ ds = {'global_params': {'ha_enabled': False},
+ 'deploy_options':
+ {'sdn_controller': False,
+ 'dataplane': 'ovs',
+ 'sriov': 'xxx',
+ 'dvr': True}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}},
+ 'segmentation_type': 'vlan',
+ 'overlay_id_range': 'vlan:500:600'
+ },
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ # run test
+ prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NeutronNetworkVLANRanges: '
+ 'vlan:500:600,datacentre:1:1000', output)
+ assert_in('NeutronNetworkType: vlan', output)
+ assert_in('NeutronBridgeMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ assert_not_in('OpenDaylightProviderMappings', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('builtins.open', mock_open())
+ def test_prep_env_tenant_vlan_odl(self, mock_shutil, mock_fileinput):
+ mock_fileinput.input.return_value = \
+ ['NeutronNetworkVLANRanges',
+ 'NeutronNetworkType',
+ 'NeutronBridgeMappings',
+ 'OpenDaylightProviderMappings']
+ ds = {'global_params': {'ha_enabled': False},
+ 'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs',
+ 'sriov': 'xxx',
+ 'dvr': True}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}},
+ 'segmentation_type': 'vlan',
+ 'overlay_id_range': 'vlan:500:600'
+ },
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ # run test
+ prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NeutronNetworkVLANRanges: '
+ 'vlan:500:600,datacentre:1:1000', output)
+ assert_in('NeutronNetworkType: vlan', output)
+ assert_in('NeutronBridgeMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ assert_in('OpenDaylightProviderMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
def test_generate_ceph_key(self):
assert_equal(len(generate_ceph_key()), 40)
@@ -356,21 +705,109 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_ceph_key):
mock_fileinput.input.return_value = \
['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
- ds = {'deploy_options': MagicMock()}
- ds['deploy_options'].__getitem__.side_effect = \
- lambda i: '/dev/sdx' if i == 'ceph_device' else MagicMock()
- ds['deploy_options'].__contains__.side_effect = \
- lambda i: True if i == 'ceph_device' else MagicMock()
- prep_storage_env(ds, '/tmp')
+ ds = {'deploy_options': {
+ 'ceph_device': '/dev/sdx',
+ 'containers': False
+ }}
+ ns = {}
+ prep_storage_env(ds, ns, virtual=False, tmp_dir='/tmp')
+
+ @patch('apex.overcloud.deploy.utils.edit_tht_env')
+ @patch('apex.overcloud.deploy.generate_ceph_key')
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_storage_env_containers(self, mock_isfile, mock_fileinput,
+ mock_ceph_key, mock_edit_tht):
+ mock_fileinput.input.return_value = \
+ ['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
+ ds = {'deploy_options': {
+ 'ceph_device': '/dev/sdx',
+ 'containers': True,
+ 'os_version': 'master'
+ }, 'global_params': {'ha_enabled': False}}
+ ns = {'networks': {con.ADMIN_NETWORK: {'installer_vm':
+ {'ip': '192.0.2.1'}}}
+ }
+ prep_storage_env(ds, ns, virtual=True, tmp_dir='/tmp')
+ ceph_params = {
+ 'CephPoolDefaultSize': 2,
+ 'CephAnsibleExtraConfig': {
+ 'centos_package_dependencies': [],
+ 'ceph_osd_docker_memory_limit': '1g',
+ 'ceph_mds_docker_memory_limit': '1g'
+ },
+ 'CephPoolDefaultPgNum': 32,
+ 'CephAnsibleDisksConfig': {
+ 'devices': ['/dev/sdx'],
+ 'journal_size': 512,
+ 'osd_scenario': 'collocated'
+ }
+ }
+ mock_edit_tht.assert_called_with('/tmp/storage-environment.yaml',
+ 'parameter_defaults',
+ ceph_params)
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_storage_env_raises(self, mock_isfile):
mock_isfile.return_value = False
ds = {'deploy_options': MagicMock()}
- assert_raises(ApexDeployException, prep_storage_env, ds, '/tmp')
+ ns = {}
+ assert_raises(ApexDeployException, prep_storage_env, ds,
+ ns, virtual=False, tmp_dir='/tmp')
+
+ @patch('apex.overcloud.deploy.generate_ceph_key')
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_sriov_env(self, mock_isfile, mock_fileinput, mock_ceph_key):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'sriov': 'xxx'}}
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ # Run tests
+ mock_fileinput.input.return_value = \
+ ['# NovaSchedulerDefaultFilters',
+ '# NovaSchedulerAvailableFilters',
+ '#NeutronPhysicalDevMappings: "datacentre:ens20f2"',
+ '#NeutronSriovNumVFs: \"ens20f2:5\"',
+ '#NovaPCIPassthrough:',
+ '# - devname: \"ens20f2\"',
+ '# physical_network: \"datacentre\"']
+ prep_sriov_env(ds, '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NovaSchedulerDefaultFilters', output)
+ assert_in('NovaSchedulerAvailableFilters', output)
+ assert_in('NeutronPhysicalDevMappings: \"nfv_sriov:xxx\"', output)
+ assert_in('NeutronSriovNumVFs: \"xxx:8\"', output)
+ assert_in('NovaPCIPassthrough:', output)
+ assert_in('- devname: \"xxx\"', output)
+ assert_in('physical_network: \"nfv_sriov\"', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_sriov_env_raises(self, mock_isfile):
+ ds_opts = {'sriov': True}
+ ds = {'deploy_options': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ mock_isfile.return_value = False
+ ds = {'deploy_options': MagicMock()}
+ assert_raises(ApexDeployException, prep_sriov_env, ds, '/tmp')
def test_external_network_cmds(self):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs'}}
+
cidr = MagicMock()
cidr.version = 6
ns_dict = {'networks':
@@ -382,13 +819,41 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
ns.enabled_network_list = ['external']
ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
- cmds = ' '.join(external_network_cmds(ns))
+ cmds = ' '.join(external_network_cmds(ns, ds))
assert_in('--external', cmds)
assert_in('--allocation-pool start=0,end=1', cmds)
assert_in('--gateway gw', cmds)
assert_in('--network external', cmds)
+ assert_in('--provider-physical-network datacentre', cmds)
+
+ def test_external_network_cmds_nosdn_fdio(self):
+ ds = {'deploy_options':
+ {'sdn_controller': False,
+ 'dataplane': 'fdio'}}
+
+ cidr = MagicMock()
+ cidr.version = 6
+ ns_dict = {'networks':
+ {'external': [{'floating_ip_range': (0, 1),
+ 'nic_mapping':
+ {'compute': {'vlan': 'native'}},
+ 'gateway': 'gw',
+ 'cidr': cidr}]}}
+ ns = MagicMock()
+ ns.enabled_network_list = ['external']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ cmds = ' '.join(external_network_cmds(ns, ds))
+ assert_in('--external', cmds)
+ assert_in('--allocation-pool start=0,end=1', cmds)
+ assert_in('--gateway gw', cmds)
+ assert_in('--network external', cmds)
+ assert_in('--provider-physical-network external', cmds)
def test_external_network_cmds_no_ext(self):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs'}}
+
cidr = MagicMock()
cidr.version = 6
ns_dict = {'apex':
@@ -402,8 +867,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
ns.enabled_network_list = ['admin']
ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
- external_network_cmds(ns)
- cmds = ' '.join(external_network_cmds(ns))
+ cmds = ' '.join(external_network_cmds(ns, ds))
assert_in('--external', cmds)
assert_in('--allocation-pool start=0,end=1', cmds)
assert_in('--network external', cmds)
@@ -417,3 +881,21 @@ class TestOvercloudDeploy(unittest.TestCase):
def test_create_congress_cmds_raises(self, mock_parsers):
mock_parsers.return_value.__getitem__.side_effect = KeyError()
assert_raises(KeyError, create_congress_cmds, 'overcloud_file')
+
+ def test_get_docker_sdn_files(self):
+ ds_opts = {'ha_enabled': True,
+ 'congress': True,
+ 'tacker': True,
+ 'containers': False,
+ 'barometer': True,
+ 'ceph': False,
+ 'vpn': True,
+ 'sdn_controller': 'opendaylight',
+ 'os_version': 'queens'
+ }
+ output = get_docker_sdn_files(ds_opts)
+ compare = ['/usr/share/openstack-tripleo-heat-templates/'
+ 'environments/services/neutron-opendaylight.yaml',
+ '/usr/share/openstack-tripleo-heat-templates/environments'
+ '/services/neutron-bgpvpn-opendaylight.yaml']
+ self.assertEqual(output, compare)
diff --git a/apex/tests/test_apex_overcloud_node.py b/apex/tests/test_apex_overcloud_node.py
new file mode 100644
index 00000000..4c67b1d8
--- /dev/null
+++ b/apex/tests/test_apex_overcloud_node.py
@@ -0,0 +1,191 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from libvirt import libvirtError
+from mock import patch
+from mock import MagicMock
+import os
+import unittest
+import urllib.request
+
+from apex.common import exceptions as exc
+from apex.overcloud.node import OvercloudNode
+from apex.settings.deploy_settings import DeploySettings
+from apex.tests.constants import TEST_DUMMY_CONFIG
+
+DUMMY_SNAP_DIR = '/tmp/dummy_cache'
+
+
+class TestSnapshotDeployment(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_init(self, mock_libvirt_open, mock_is_file, mock_node_create):
+ mock_is_file.return_value = True
+ OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0', node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ mock_node_create.assert_called()
+
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_init_invalid_files(self, mock_libvirt_open, mock_node_create):
+ self.assertRaises(exc.OvercloudNodeException,
+ OvercloudNode, 'controller', '123.123.123',
+ None, None, 'dummy-controller-0', 'dummynode.xml',
+ 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value.XMLDesc.return_value = """
+ <pool type='dir'>
+ <target>
+ <path>/var/lib/libvirt/images</path>
+ </target>
+ </pool>
+ """
+ node._configure_disk('dummy.qcow2')
+ mock_copy.assert_called()
+ self.assertEqual(node.disk_img, '/var/lib/libvirt/images/dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk_bad_path(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value.XMLDesc.return_value = """
+ <pool type='dir'>
+ <target>
+ </target>
+ </pool>
+ """
+ self.assertRaises(exc.OvercloudNodeException,
+ node._configure_disk, 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.shutil.copyfile')
+ @patch('apex.overcloud.node.OvercloudNode.create')
+ @patch('apex.overcloud.node.os.path.isfile')
+ @patch('apex.overcloud.node.libvirt.open')
+ def test_configure_disk_no_pool(self, mock_libvirt_open, mock_is_file,
+ mock_node_create, mock_copy):
+ mock_is_file.return_value = True
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml='dummynode.xml',
+ disk_img='dummy.qcow2')
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value = None
+ self.assertRaises(exc.OvercloudNodeException,
+ node._configure_disk, 'dummy.qcow2')
+
+ @patch('apex.overcloud.node.distro.linux_distribution')
+ def test_update_xml(self, mock_linux_distro):
+ mock_linux_distro.return_value = ['Fedora']
+ xml_file = os.path.join(TEST_DUMMY_CONFIG, 'baremetal0.xml')
+ with open(xml_file, 'r') as fh:
+ xml = fh.read()
+ new_xml = OvercloudNode._update_xml(
+ xml=xml, disk_path='/dummy/disk/path/blah.qcow2')
+ self.assertIn('/dummy/disk/path/blah.qcow2', new_xml)
+ self.assertIn('/usr/bin/qemu-kvm', new_xml)
+
+ @patch('apex.overcloud.node.distro.linux_distribution')
+ def test_update_xml_no_disk(self, mock_linux_distro):
+ mock_linux_distro.return_value = ['Fedora']
+ xml_file = os.path.join(TEST_DUMMY_CONFIG, 'baremetal0.xml')
+ with open(xml_file, 'r') as fh:
+ xml = fh.read()
+ new_xml = OvercloudNode._update_xml(xml=xml)
+ self.assertIn('/home/images/baremetal0.qcow2', new_xml)
+ self.assertIn('/usr/bin/qemu-kvm', new_xml)
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_create(self, mock_isfile, mock_libvirt_conn, mock_configure_disk,
+ mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ self.assertIs(node.vm, domain)
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_start(self, mock_isfile, mock_libvirt_conn, mock_configure_disk,
+ mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ node.start()
+ domain.create.assert_called()
+
+ @patch('apex.overcloud.node.OvercloudNode._update_xml')
+ @patch('apex.overcloud.node.OvercloudNode._configure_disk')
+ @patch('apex.overcloud.node.libvirt.open')
+ @patch('apex.overcloud.node.os.path.isfile')
+ def test_start_fail(self, mock_isfile, mock_libvirt_conn,
+ mock_configure_disk, mock_update_xml):
+ mock_isfile.return_value = True
+ domain = mock_libvirt_conn.return_value.defineXML.return_value
+ domain.create.side_effect = libvirtError('blah')
+ node = OvercloudNode(role='controller', ip='123.123.123.123',
+ ovs_ctrlrs=None, ovs_mgrs=None,
+ name='dummy-controller-0',
+ node_xml=os.path.join(TEST_DUMMY_CONFIG,
+ 'baremetal0.xml'),
+ disk_img='dummy.qcow2')
+ self.assertRaises(exc.OvercloudNodeException, node.start)
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
index 9458bf9f..14586528 100644
--- a/apex/tests/test_apex_undercloud.py
+++ b/apex/tests/test_apex_undercloud.py
@@ -7,8 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import ipaddress
import libvirt
import os
+import platform
import subprocess
import unittest
@@ -23,6 +25,7 @@ from nose.tools import (
assert_regexp_matches,
assert_raises,
assert_true,
+ assert_false,
assert_equal)
@@ -117,11 +120,111 @@ class TestUndercloud(unittest.TestCase):
@patch.object(Undercloud, 'generate_config', return_value={})
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
+ def test_detect_nat_with_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.1.1.1',
+ },
+ 'external':
+ [{'enabled': True,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.0.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ assert_true(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_detect_nat_no_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.0.2.1',
+ },
+ 'external':
+ [{'enabled': False,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.1.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=False)
+ assert_true(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_detect_no_nat_no_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.0.2.3',
+ },
+ 'external':
+ [{'enabled': False,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.1.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=False)
+ assert_false(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
def test_configure(self, mock_create, mock_get_vm,
mock_generate_config, mock_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
ns = MagicMock()
- uc.configure(ns, 'playbook', '/tmp/dir')
+ ds = MagicMock()
+ uc.configure(ns, ds, 'playbook', '/tmp/dir')
@patch('apex.undercloud.undercloud.utils')
@patch.object(Undercloud, 'generate_config', return_value={})
@@ -131,18 +234,22 @@ class TestUndercloud(unittest.TestCase):
mock_generate_config, mock_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
ns = MagicMock()
+ ds = MagicMock()
subps_err = subprocess.CalledProcessError(1, 'cmd')
mock_utils.run_ansible.side_effect = subps_err
assert_raises(ApexUndercloudException,
- uc.configure, ns, 'playbook', '/tmp/dir')
+ uc.configure, ns, ds, 'playbook', '/tmp/dir')
+ @patch('apex.undercloud.undercloud.virt_utils')
+ @patch('apex.undercloud.undercloud.uc_builder')
@patch('apex.undercloud.undercloud.os.remove')
@patch('apex.undercloud.undercloud.os.path')
@patch('apex.undercloud.undercloud.shutil')
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
def test_setup_vols(self, mock_get_vm, mock_create,
- mock_shutil, mock_os_path, mock_os_remove):
+ mock_shutil, mock_os_path, mock_os_remove,
+ mock_uc_builder, mock_virt_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
mock_os_path.isfile.return_value = True
mock_os_path.exists.return_value = True
@@ -152,6 +259,9 @@ class TestUndercloud(unittest.TestCase):
src_img = os.path.join(uc.image_path, img_file)
dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
mock_shutil.copyfile.assert_called_with(src_img, dest_img)
+ if platform.machine() != 'aarch64':
+ mock_uc_builder.expand_disk.assert_called()
+ mock_virt_utils.virt_customize.assert_called()
@patch('apex.undercloud.undercloud.os.path')
@patch.object(Undercloud, '_get_vm', return_value=None)
@@ -173,24 +283,64 @@ class TestUndercloud(unittest.TestCase):
{'--upload':
'/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'},
{'--run-command': 'chmod 600 /root/.ssh/authorized_keys'},
- {'--run-command': 'restorecon /root/.ssh/authorized_keys'},
+ {'--run-command': 'restorecon '
+ '-R -v /root/.ssh'},
+ {'--run-command': 'id -u stack || useradd -m stack'},
+ {'--run-command': 'mkdir -p /home/stack/.ssh'},
+ {'--run-command': 'chown stack:stack /home/stack/.ssh'},
{'--run-command':
'cp /root/.ssh/authorized_keys /home/stack/.ssh/'},
{'--run-command':
'chown stack:stack /home/stack/.ssh/authorized_keys'},
{'--run-command':
- 'chmod 600 /home/stack/.ssh/authorized_keys'}]
+ 'chmod 600 /home/stack/.ssh/authorized_keys'},
+ {'--run-command':
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> '
+ '/etc/sudoers'},
+ {'--run-command': 'touch /etc/cloud/cloud-init.disabled'}]
mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
def test_generate_config(self, mock_get_vm, mock_create):
- ns_net = MagicMock()
- ns_net.__getitem__.side_effect = \
- lambda i: '1234/24' if i is 'cidr' else MagicMock()
- ns = {'apex': MagicMock(),
- 'dns-domain': 'dns',
- 'networks': {'admin': ns_net,
- 'external': [ns_net]}}
-
- Undercloud('img_path', 'tplt_path').generate_config(ns)
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'ntp': 'pool.ntp.org',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30']
+ },
+ 'external':
+ [{'enabled': True,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'}
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+ ds = {'global_params': {},
+ 'deploy_options': {}}
+
+ Undercloud('img_path', 'tplt_path').generate_config(ns, ds)
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ @patch('apex.undercloud.undercloud.virt_utils')
+ def test_update_delorean(self, mock_vutils, mock_uc_create, mock_get_vm):
+ uc = Undercloud('img_path', 'tmplt_path', external_network=True)
+ uc._update_delorean_repo()
+ download_cmd = (
+ "curl -L -f -o "
+ "/etc/yum.repos.d/deloran.repo "
+ "https://trunk.rdoproject.org/centos7-{}"
+ "/current-tripleo/delorean.repo".format(
+ constants.DEFAULT_OS_VERSION))
+ test_ops = [{'--run-command': download_cmd}]
+ mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
diff --git a/apex/tests/test_apex_virtual_utils.py b/apex/tests/test_apex_virtual_utils.py
index 643069f3..a9eb78dd 100644
--- a/apex/tests/test_apex_virtual_utils.py
+++ b/apex/tests/test_apex_virtual_utils.py
@@ -12,6 +12,7 @@ import unittest
from mock import patch
+from apex.virtual.exceptions import ApexVirtualException
from apex.virtual.utils import DEFAULT_VIRT_IP
from apex.virtual.utils import get_virt_ip
from apex.virtual.utils import generate_inventory
@@ -66,13 +67,30 @@ class TestVirtualUtils(unittest.TestCase):
assert_is_instance(generate_inventory('target_file', ha_enabled=True),
dict)
+ @patch('apex.virtual.utils.get_virt_ip')
+ @patch('apex.virtual.utils.subprocess.check_output')
@patch('apex.virtual.utils.iptc')
@patch('apex.virtual.utils.subprocess.check_call')
@patch('apex.virtual.utils.vbmc_lib')
- def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc):
+ def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc,
+ mock_check_output, mock_get_virt_ip):
+ mock_get_virt_ip.return_value = '192.168.122.1'
+ mock_check_output.return_value = b'blah |dummy \nstatus | running'
host_setup({'test': 2468})
mock_subprocess.assert_called_with(['vbmc', 'start', 'test'])
+ @patch('apex.virtual.utils.get_virt_ip')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup_vbmc_fails(self, mock_vbmc_lib, mock_subprocess,
+ mock_iptc, mock_check_output,
+ mock_get_virt_ip):
+ mock_get_virt_ip.return_value = '192.168.122.1'
+ mock_check_output.return_value = b'blah |dummy \nstatus | stopped'
+ assert_raises(ApexVirtualException, host_setup, {'test': 2468})
+
@patch('apex.virtual.utils.iptc')
@patch('apex.virtual.utils.subprocess.check_call')
@patch('apex.virtual.utils.vbmc_lib')
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 013570d3..5ee487c2 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -15,6 +15,7 @@ import shutil
import subprocess
import time
+from apex.builders import undercloud_builder as uc_builder
from apex.virtual import utils as virt_utils
from apex.virtual import configure_vm as vm_lib
from apex.common import constants
@@ -31,8 +32,10 @@ class Undercloud:
"""
def __init__(self, image_path, template_path,
root_pw=None, external_network=False,
- image_name='undercloud.qcow2'):
+ image_name='undercloud.qcow2',
+ os_version=constants.DEFAULT_OS_VERSION):
self.ip = None
+ self.os_version = os_version
self.root_pw = root_pw
self.external_net = external_network
self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
@@ -61,27 +64,40 @@ class Undercloud:
if self.external_net:
networks.append('external')
console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
+ root = 'vda2' if platform.machine() == 'aarch64' else 'sda'
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
baremetal_interfaces=networks,
direct_boot='overcloud-full',
kernel_args=['console={}'.format(console),
- 'root=/dev/sda'],
+ 'root=/dev/{}'.format(root)],
default_network=True,
- template_dir=self.template_path)
+ template_dir=self.template_path,
+ memory=10240)
self.setup_volumes()
self.inject_auth()
- def _set_ip(self):
- ip_out = self.vm.interfaceAddresses(
+ @staticmethod
+ def _get_ip(vm):
+ ip_out = vm.interfaceAddresses(
libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0)
if ip_out:
for (name, val) in ip_out.items():
for ipaddr in val['addrs']:
if ipaddr['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
- self.ip = ipaddr['addr']
- return True
+ return ipaddr['addr']
+
+ def _set_ip(self):
+ ip = self._get_ip(self.vm)
+ if ip:
+ self.ip = ip
+ return True
+
+ @staticmethod
+ def get_ip():
+ vm = Undercloud._get_vm()
+ return Undercloud._get_ip(vm)
def start(self):
"""
@@ -96,7 +112,7 @@ class Undercloud:
# give 10 seconds to come up
time.sleep(10)
# set IP
- for x in range(5):
+ for x in range(10):
if self._set_ip():
logging.info("Undercloud started. IP Address: {}".format(
self.ip))
@@ -110,19 +126,37 @@ class Undercloud:
"Unable to find IP for undercloud. Check if VM booted "
"correctly")
- def configure(self, net_settings, playbook, apex_temp_dir):
+ def detect_nat(self, net_settings):
+ if self.external_net:
+ net = net_settings['networks'][constants.EXTERNAL_NETWORK][0]
+ else:
+ net = net_settings['networks'][constants.ADMIN_NETWORK]
+ if net['gateway'] == net['installer_vm']['ip']:
+ return True
+ else:
+ return False
+
+ def configure(self, net_settings, deploy_settings,
+ playbook, apex_temp_dir, virtual_oc=False):
"""
Configures undercloud VM
- :param net_setings: Network settings for deployment
+ :param net_settings: Network settings for deployment
+ :param deploy_settings: Deployment settings for deployment
:param playbook: playbook to use to configure undercloud
:param apex_temp_dir: temporary apex directory to hold configs/logs
+ :param virtual_oc: Boolean to determine if overcloud is virt
:return: None
"""
logging.info("Configuring Undercloud...")
# run ansible
- ansible_vars = Undercloud.generate_config(net_settings)
+ ansible_vars = Undercloud.generate_config(net_settings,
+ deploy_settings)
ansible_vars['apex_temp_dir'] = apex_temp_dir
+
+ ansible_vars['nat'] = self.detect_nat(net_settings)
+ ansible_vars['container_client'] = utils.find_container_client(
+ self.os_version)
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -150,11 +184,19 @@ class Undercloud:
if os.path.exists(dest_img):
os.remove(dest_img)
shutil.copyfile(src_img, dest_img)
+ if img_file == self.image_name and platform.machine() != 'aarch64':
+ uc_builder.expand_disk(dest_img)
+ self.expand_root_fs()
+
shutil.chown(dest_img, user='qemu', group='qemu')
os.chmod(dest_img, 0o0744)
- # TODO(trozet):check if resize needed right now size is 50gb
+
+ def expand_root_fs(self):
# there is a lib called vminspect which has some dependencies and is
# not yet available in pip. Consider switching to this lib later.
+ logging.debug("Expanding root filesystem on /dev/sda partition")
+ virt_ops = [{constants.VIRT_RUN_CMD: 'xfs_growfs /dev/sda'}]
+ virt_utils.virt_customize(virt_ops, self.volume)
def inject_auth(self):
virt_ops = list()
@@ -169,39 +211,59 @@ class Undercloud:
'/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'})
run_cmds = [
'chmod 600 /root/.ssh/authorized_keys',
- 'restorecon /root/.ssh/authorized_keys',
+ 'restorecon -R -v /root/.ssh',
+ 'id -u stack || useradd -m stack',
+ 'mkdir -p /home/stack/.ssh',
+ 'chown stack:stack /home/stack/.ssh',
'cp /root/.ssh/authorized_keys /home/stack/.ssh/',
'chown stack:stack /home/stack/.ssh/authorized_keys',
- 'chmod 600 /home/stack/.ssh/authorized_keys'
+ 'chmod 600 /home/stack/.ssh/authorized_keys',
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers',
+ 'touch /etc/cloud/cloud-init.disabled'
]
for cmd in run_cmds:
virt_ops.append({constants.VIRT_RUN_CMD: cmd})
virt_utils.virt_customize(virt_ops, self.volume)
@staticmethod
- def generate_config(ns):
+ def generate_config(ns, ds):
"""
Generates a dictionary of settings for configuring undercloud
:param ns: network settings to derive undercloud settings
+ :param ds: deploy settings to derive undercloud settings
:return: dictionary of settings
"""
ns_admin = ns['networks']['admin']
intro_range = ns['apex']['networks']['admin']['introspection_range']
config = dict()
+ # Check if this is an ARM deployment
+ config['aarch64'] = platform.machine() == 'aarch64'
+ # Configuration for undercloud.conf
config['undercloud_config'] = [
"enable_ui false",
"undercloud_update_packages false",
"undercloud_debug false",
"inspection_extras false",
+ "ipxe_enabled {}".format(
+ str(ds['global_params'].get('ipxe', True) and
+ not config['aarch64'])),
"undercloud_hostname undercloud.{}".format(ns['dns-domain']),
"local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
str(ns_admin['cidr']).split('/')[1]),
- "network_gateway {}".format(str(ns_admin['installer_vm']['ip'])),
- "network_cidr {}".format(str(ns_admin['cidr'])),
+ "generate_service_certificate false",
+ "undercloud_ntp_servers {}".format(str(ns['ntp'][0])),
+ "container_images_file "
+ "/home/stack/containers-prepare-parameter.yaml",
+ "undercloud_enable_selinux false"
+ ]
+
+ config['undercloud_network_config'] = [
+ "gateway {}".format(str(ns_admin['installer_vm']['ip'])),
+ "cidr {}".format(str(ns_admin['cidr'])),
"dhcp_start {}".format(str(ns_admin['dhcp_range'][0])),
"dhcp_end {}".format(str(ns_admin['dhcp_range'][1])),
- "inspection_iprange {}".format(','.join(intro_range))
+ "inspection_iprange {}".format(','.join(intro_range)),
]
config['ironic_config'] = [
@@ -225,8 +287,35 @@ class Undercloud:
"prefix": str(ns_external['cidr']).split('/')[1],
"enabled": ns_external['enabled']
}
-
- # Check if this is an ARM deployment
- config['aarch64'] = platform.machine() == 'aarch64'
+ # We will NAT external network if it is enabled. If external network
+ # is IPv6, we will NAT admin network in case we need IPv4 connectivity
+ # for things like DNS server.
+ if 'external' in ns.enabled_network_list and \
+ ns_external['cidr'].version == 4:
+ nat_cidr = ns_external['cidr']
+ else:
+ nat_cidr = ns['networks']['admin']['cidr']
+ config['nat_cidr'] = str(nat_cidr)
+ if nat_cidr.version == 6:
+ config['nat_network_ipv6'] = True
+ else:
+ config['nat_network_ipv6'] = False
+ config['http_proxy'] = ns.get('http_proxy', '')
+ config['https_proxy'] = ns.get('https_proxy', '')
return config
+
+ def _update_delorean_repo(self):
+ if utils.internet_connectivity():
+ logging.info('Updating delorean repo on Undercloud')
+ delorean_repo = (
+ "https://trunk.rdoproject.org/centos7-{}"
+ "/current-tripleo/delorean.repo".format(self.os_version))
+ cmd = ("curl -L -f -o "
+ "/etc/yum.repos.d/deloran.repo {}".format(delorean_repo))
+ try:
+ virt_utils.virt_customize([{constants.VIRT_RUN_CMD: cmd}],
+ self.volume)
+ except Exception:
+ logging.warning("Failed to download and update delorean repo "
+ "for Undercloud")
diff --git a/apex/utils.py b/apex/utils.py
new file mode 100644
index 00000000..f7914613
--- /dev/null
+++ b/apex/utils.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# TODO(trozet) migrate rest of utils.sh here
+
+import argparse
+import datetime
+import logging
+import os
+import sys
+import tempfile
+
+from apex.common import constants
+from apex.common import parsers
+from apex.undercloud import undercloud as uc_lib
+from apex.common import utils
+
+VALID_UTILS = ['fetch_logs']
+START_TIME = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M")
+APEX_TEMP_DIR = tempfile.mkdtemp(prefix="apex-logs-{}-".format(START_TIME))
+
+
+def fetch_logs(args):
+ uc_ip = uc_lib.Undercloud.get_ip()
+ if not uc_ip:
+ raise Exception('No Undercloud IP found')
+ logging.info("Undercloud IP is: {}".format(uc_ip))
+ fetch_vars = dict()
+ fetch_vars['stackrc'] = 'source /home/stack/stackrc'
+ fetch_vars['apex_temp_dir'] = APEX_TEMP_DIR
+ fetch_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'fetch_overcloud_nodes.yml')
+ try:
+ utils.run_ansible(fetch_vars, fetch_playbook, host=uc_ip,
+ user='stack', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Retrieved overcloud nodes info")
+ except Exception:
+ logging.error("Failed to retrieve overcloud nodes. Please check log")
+ raise
+ nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
+ fetch_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
+ fetch_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
+ 'GlobalKnownHostsFile=/dev/null -o ' \
+ 'UserKnownHostsFile=/dev/null -o ' \
+ 'LogLevel=error'
+ fetch_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'fetch_overcloud_logs.yml')
+ # Run per overcloud node
+ for node, ip in fetch_vars['overcloud_nodes'].items():
+ logging.info("Executing fetch logs overcloud playbook on "
+ "node {}".format(node))
+ try:
+ utils.run_ansible(fetch_vars, fetch_playbook, host=ip,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Logs retrieved for node {}".format(node))
+ except Exception:
+ logging.error("Log retrieval failed "
+ "for node {}. Please check log".format(node))
+ raise
+ logging.info("Log retrieval complete and stored in {}".format(
+ APEX_TEMP_DIR))
+
+
+def execute_actions(args):
+ for action in VALID_UTILS:
+ if hasattr(args, action) and getattr(args, action):
+ util_module = __import__('apex').utils
+ func = getattr(util_module, action)
+ logging.info("Executing action: {}".format(action))
+ func(args)
+
+
+def main():
+ util_parser = argparse.ArgumentParser()
+ util_parser.add_argument('-f', '--fetch-logs',
+ dest='fetch_logs',
+ required=False,
+ default=False,
+ action='store_true',
+ help='Fetch all overcloud logs')
+ util_parser.add_argument('--lib-dir',
+ default='/usr/share/opnfv-apex',
+ help='Directory path for apex ansible '
+ 'and third party libs')
+ args = util_parser.parse_args(sys.argv[1:])
+ os.makedirs(os.path.dirname('./apex_util.log'), exist_ok=True)
+ formatter = '%(asctime)s %(levelname)s: %(message)s'
+ logging.basicConfig(filename='./apex_util.log',
+ format=formatter,
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging.DEBUG)
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ console.setFormatter(logging.Formatter(formatter))
+ logging.getLogger('').addHandler(console)
+
+ execute_actions(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
index 3b2c4462..9d47bf03 100755
--- a/apex/virtual/configure_vm.py
+++ b/apex/virtual/configure_vm.py
@@ -102,6 +102,10 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
source_template = f.read()
imagefile = os.path.realpath(image)
+
+ if arch == 'aarch64' and diskbus == 'sata':
+ diskbus = 'virtio'
+
memory = int(memory) * 1024
params = {
'name': name,
@@ -118,9 +122,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
'user_interface': '',
}
- # assign scsi as default for aarch64
- if arch == 'aarch64' and diskbus == 'sata':
- diskbus = 'scsi'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
@@ -171,7 +172,7 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
"""
params['user_interface'] = """
<controller type='virtio-serial' index='0'>
- <address type='virtio-mmio'/>
+ <address type='pci'/>
</controller>
<serial type='pty'>
<target port='0'/>
diff --git a/apex/virtual/exceptions.py b/apex/virtual/exceptions.py
new file mode 100644
index 00000000..e3dff51a
--- /dev/null
+++ b/apex/virtual/exceptions.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexVirtualException(Exception):
+ pass
diff --git a/apex/virtual/utils.py b/apex/virtual/utils.py
index 226af1b5..8b24bc40 100644
--- a/apex/virtual/utils.py
+++ b/apex/virtual/utils.py
@@ -18,6 +18,8 @@ import xml.etree.ElementTree as ET
from apex.common import utils as common_utils
from apex.virtual import configure_vm as vm_lib
+from apex.virtual import exceptions as exc
+from time import sleep
from virtualbmc import manager as vbmc_lib
DEFAULT_RAM = 8192
@@ -131,11 +133,39 @@ def host_setup(node):
chain.insert_rule(rule)
try:
subprocess.check_call(['vbmc', 'start', name])
- logging.debug("Started vbmc for domain {}".format(name))
+ logging.debug("Started VBMC for domain {}".format(name))
except subprocess.CalledProcessError:
- logging.error("Failed to start vbmc for {}".format(name))
+ logging.error("Failed to start VBMC for {}".format(name))
raise
- logging.debug('vmbcs setup: {}'.format(vbmc_manager.list()))
+
+ logging.info("Checking VBMC {} is up".format(name))
+ is_running = False
+ for x in range(0, 4):
+ logging.debug("Polling to see if VBMC is up, attempt {}".format(x))
+ try:
+ output = subprocess.check_output(['vbmc', 'show', name],
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ logging.warning('Unable to issue "vbmc show" cmd')
+ continue
+ for line in output.decode('utf-8').split('\n'):
+ if 'status' in line:
+ if 'running' in line:
+ is_running = True
+ break
+ else:
+ logging.debug('VBMC status is not "running"')
+ break
+ if is_running:
+ break
+ sleep(1)
+ if is_running:
+ logging.info("VBMC {} is up and running".format(name))
+ else:
+ logging.error("Failed to verify VBMC is running")
+ raise exc.ApexVirtualException("Failed to bring up vbmc "
+ "{}".format(name))
+ logging.debug('VBMCs setup: {}'.format(vbmc_manager.list()))
def virt_customize(ops, target):