summaryrefslogtreecommitdiffstats
path: root/apex
diff options
context:
space:
mode:
Diffstat (limited to 'apex')
-rw-r--r--apex/build.py17
-rw-r--r--apex/build_utils.py10
-rw-r--r--apex/builders/common_builder.py107
-rw-r--r--apex/builders/overcloud_builder.py2
-rw-r--r--apex/builders/undercloud_builder.py9
-rw-r--r--apex/clean.py8
-rw-r--r--apex/common/constants.py16
-rw-r--r--apex/common/utils.py42
-rw-r--r--apex/deploy.py98
-rw-r--r--apex/network/network_environment.py2
-rw-r--r--apex/overcloud/deploy.py59
-rw-r--r--apex/settings/deploy_settings.py6
-rw-r--r--apex/settings/network_settings.py7
-rw-r--r--apex/tests/test_apex_common_builder.py103
-rw-r--r--apex/tests/test_apex_deploy.py12
-rw-r--r--apex/tests/test_apex_network_settings.py3
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py36
-rw-r--r--apex/tests/test_apex_undercloud.py100
-rw-r--r--apex/undercloud/undercloud.py17
19 files changed, 470 insertions, 184 deletions
diff --git a/apex/build.py b/apex/build.py
index dff25ac8..6e903814 100644
--- a/apex/build.py
+++ b/apex/build.py
@@ -109,11 +109,15 @@ def unpack_cache(cache_dest, cache_dir=None):
def build(build_root, version, iso=False, rpms=False):
if iso:
- make_targets = ['iso']
+ logging.warning("iso is deprecated. Will not build iso and build rpm "
+ "instead.")
+ make_targets = ['rpm']
elif rpms:
- make_targets = ['rpms']
+ make_targets = ['rpm']
else:
- make_targets = ['images', 'rpms-check']
+ logging.warning("Nothing specified to build, and images are no "
+ "longer supported in Apex. Will only run rpm check")
+ make_targets = ['rpm-check']
if version is not None:
make_args = ['RELEASE={}'.format(version)]
else:
@@ -234,9 +238,7 @@ def main():
logging.error("Must be in an Apex git repo to execute build")
raise
apex_build_root = os.path.join(apex_root, BUILD_ROOT)
- if os.path.isdir(apex_build_root):
- cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
- else:
+ if not os.path.isdir(apex_build_root):
logging.error("You must execute this script inside of the Apex "
"local code repository")
raise ApexBuildException("Invalid path for apex root: {}. Must be "
@@ -245,10 +247,7 @@ def main():
dep_playbook = os.path.join(apex_root,
'lib/ansible/playbooks/build_dependencies.yml')
utils.run_ansible(None, dep_playbook)
- unpack_cache(cache_tmp_dir, args.cache_dir)
build(apex_build_root, args.build_version, args.iso, args.rpms)
- build_cache(cache_tmp_dir, args.cache_dir)
- prune_cache(args.cache_dir)
if __name__ == '__main__':
diff --git a/apex/build_utils.py b/apex/build_utils.py
index 1c413dfd..213ae115 100644
--- a/apex/build_utils.py
+++ b/apex/build_utils.py
@@ -27,7 +27,7 @@ def get_change(url, repo, branch, change_id):
:param repo: name of repo
:param branch: branch of repo
:param change_id: SHA change id
- :return: change if found and not abandoned, closed, or merged
+ :return: change if found and not abandoned, closed
"""
rest = GerritRestAPI(url=url)
change_path = "{}~{}~{}".format(quote_plus(repo), quote_plus(branch),
@@ -37,12 +37,8 @@ def get_change(url, repo, branch, change_id):
try:
assert change['status'] not in 'ABANDONED' 'CLOSED', \
'Change {} is in {} state'.format(change_id, change['status'])
- if change['status'] == 'MERGED':
- logging.info('Change {} is merged, ignoring...'
- .format(change_id))
- return None
- else:
- return change
+ logging.debug('Change found: {}'.format(change))
+ return change
except KeyError:
logging.error('Failed to get valid change data structure from url '
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index 05a81efe..10fab9b6 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -9,11 +9,13 @@
# Common building utilities for undercloud and overcloud
+import datetime
import git
import json
import logging
import os
import re
+import urllib.parse
import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
@@ -25,7 +27,7 @@ from apex.virtual import utils as virt_utils
def project_to_path(project):
"""
- Translates project to absolute file path
+ Translates project to absolute file path to use in patching
:param project: name of project
:return: File path
"""
@@ -36,8 +38,12 @@ def project_to_path(project):
elif 'tripleo-heat-templates' in project:
return "/usr/share/openstack-tripleo-heat-templates"
else:
- # assume python
- return "/usr/lib/python2.7/site-packages/{}".format(project)
+ # assume python. python patches will apply to a project name subdir.
+ # For example, python-tripleoclient patch will apply to the
+ # tripleoclient directory, which is the directory extracted during
+ # python install into the PYTHONPATH. Therefore we need to just be
+ # in the PYTHONPATH directory to apply a patch
+ return "/usr/lib/python2.7/site-packages/"
def project_to_docker_image(project):
@@ -48,7 +54,9 @@ def project_to_docker_image(project):
"""
# Fetch all docker containers in docker hub with tripleo and filter
# based on project
- hub_output = utils.open_webpage(con.DOCKERHUB_OOO, timeout=10)
+
+ hub_output = utils.open_webpage(
+ urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10)
try:
results = json.loads(hub_output.decode())['results']
except Exception as e:
@@ -68,6 +76,60 @@ def project_to_docker_image(project):
return docker_images
+def is_patch_promoted(change, branch, docker_image=None):
+ """
+ Checks to see if a patch that is in merged exists in either the docker
+ container or the promoted tripleo images
+ :param change: gerrit change json output
+ :param branch: branch to use when polling artifacts (does not include
+ stable prefix)
+ :param docker_image: container this applies to if (defaults to None)
+ :return: True if the patch exists in a promoted artifact upstream
+ """
+ assert isinstance(change, dict)
+ assert 'status' in change
+
+ # if not merged we already know this is not closed/abandoned, so we know
+ # this is not promoted
+ if change['status'] != 'MERGED':
+ return False
+ assert 'submitted' in change
+ # drop microseconds cause who cares
+ stime = re.sub('\..*$', '', change['submitted'])
+ submitted_date = datetime.datetime.strptime(stime, "%Y-%m-%d %H:%M:%S")
+ # Patch applies to overcloud/undercloud
+ if docker_image is None:
+ oc_url = urllib.parse.urljoin(
+ con.UPSTREAM_RDO.replace('master', branch), 'overcloud-full.tar')
+ oc_mtime = utils.get_url_modified_date(oc_url)
+ if oc_mtime > submitted_date:
+ logging.debug("oc image was last modified at {}, which is"
+ "newer than merge date: {}".format(oc_mtime,
+ submitted_date))
+ return True
+ else:
+ # must be a docker patch, check docker tag modified time
+ docker_url = con.DOCKERHUB_OOO.replace('tripleomaster',
+ "tripleo{}".format(branch))
+ url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
+ docker_url = urllib.parse.urljoin(docker_url, url_path)
+ logging.debug("docker url is: {}".format(docker_url))
+ docker_output = utils.open_webpage(docker_url, 10)
+ logging.debug('Docker web output: {}'.format(docker_output))
+ hub_mtime = json.loads(docker_output.decode())['last_updated']
+ hub_mtime = re.sub('\..*$', '', hub_mtime)
+ # docker modified time is in this format '2018-06-11T15:23:55.135744Z'
+ # and we drop microseconds
+ hub_dtime = datetime.datetime.strptime(hub_mtime, "%Y-%m-%dT%H:%M:%S")
+ if hub_dtime > submitted_date:
+ logging.debug("docker image: {} was last modified at {}, which is"
+ "newer than merge date: {}".format(docker_image,
+ hub_dtime,
+ submitted_date))
+ return True
+ return False
+
+
def add_upstream_patches(patches, image, tmp_dir,
default_branch=os.path.join('stable',
con.DEFAULT_OS_VERSION),
@@ -95,20 +157,29 @@ def add_upstream_patches(patches, image, tmp_dir,
branch = default_branch
patch_diff = build_utils.get_patch(patch['change-id'],
patch['project'], branch)
- if patch_diff:
+ project_path = project_to_path(patch['project'])
+ # If docker tag and python we know this patch belongs on docker
+ # container for a docker service. Therefore we build the dockerfile
+ # and move the patch into the containers directory. We also assume
+ # this builder call is for overcloud, because we do not support
+ # undercloud containers
+ if docker_tag and 'python' in project_path:
+ # Projects map to multiple THT services, need to check which
+ # are supported
+ ooo_docker_services = project_to_docker_image(patch['project'])
+ docker_img = ooo_docker_services[0]
+ else:
+ ooo_docker_services = []
+ docker_img = None
+ change = build_utils.get_change(con.OPENSTACK_GERRIT,
+ patch['project'], branch,
+ patch['change-id'])
+ patch_promoted = is_patch_promoted(change,
+ branch.replace('/stable', ''),
+ docker_img)
+
+ if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
- project_path = project_to_path(patch['project'])
- # If docker tag and python we know this patch belongs on docker
- # container for a docker service. Therefore we build the dockerfile
- # and move the patch into the containers directory. We also assume
- # this builder call is for overcloud, because we do not support
- # undercloud containers
- if docker_tag and 'python' in project_path:
- # Projects map to multiple THT services, need to check which
- # are supported
- ooo_docker_services = project_to_docker_image(patch['project'])
- else:
- ooo_docker_services = []
# If we found services, then we treat the patch like it applies to
# docker only
if ooo_docker_services:
@@ -120,7 +191,7 @@ def add_upstream_patches(patches, image, tmp_dir,
"ADD {} {}".format(patch_file, project_path),
"RUN patch -p1 < {}".format(patch_file)
]
- src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+ src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, service,
docker_tag)
oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index a84d100b..d2a31001 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -53,7 +53,7 @@ def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
"enabled=1' > /etc/yum.repos.d/opendaylight.repo",
"RUN yum -y install opendaylight"
]
- src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+ src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, 'opendaylight',
docker_tag)
build_dockerfile('opendaylight', tmp_dir, docker_cmds, src_img_uri)
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index 268bad7f..f82e79d1 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -20,11 +20,6 @@ def add_upstream_packages(image):
:return: None
"""
virt_ops = list()
- # FIXME(trozet): we have to lock to this beta ceph ansible package because
- # the current RPM versioning is wrong and an older package has a higher
- # version than this package. We should change to just 'ceph-ansible'
- # once the package/repo has been fixed. Note: luminous is fine here
- # because Apex will only support container deployment for Queens and later
pkgs = [
'openstack-utils',
'ceph-common',
@@ -34,8 +29,8 @@ def add_upstream_packages(image):
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
- 'http://mirror.centos.org/centos/7/storage/x86_64/ceph-luminous' +
- '/ceph-ansible-3.1.0-0.beta3.1.el7.noarch.rpm'
+ 'ceph-ansible',
+ 'python-tripleoclient'
]
for pkg in pkgs:
diff --git a/apex/clean.py b/apex/clean.py
index f56287e1..3e33c8e4 100644
--- a/apex/clean.py
+++ b/apex/clean.py
@@ -114,7 +114,13 @@ def clean_networks():
logging.debug("Destroying virsh network: {}".format(network))
if virsh_net.isActive():
virsh_net.destroy()
- virsh_net.undefine()
+ try:
+ virsh_net.undefine()
+ except libvirt.libvirtError as e:
+ if 'Network not found' in e.get_error_message():
+ logging.debug('Network already undefined')
+ else:
+ raise
def main():
diff --git a/apex/common/constants.py b/apex/common/constants.py
index 4f72b082..89c3e6e1 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -39,10 +39,13 @@ VIRT_PW = '--root-password'
THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
-THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services-docker')
+THT_DOCKER_ENV_DIR = {
+ 'master': os.path.join(THT_ENV_DIR, 'services'),
+ 'queens': os.path.join(THT_ENV_DIR, 'services-docker')
+}
-DEFAULT_OS_VERSION = 'pike'
-DEFAULT_ODL_VERSION = 'nitrogen'
+DEFAULT_OS_VERSION = 'master'
+DEFAULT_ODL_VERSION = 'oxygen'
VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
CEPH_VERSION_MAP = {'pike': 'jewel',
'queens': 'luminous',
@@ -52,7 +55,8 @@ PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
DEBUG_OVERCLOUD_PW = 'opnfvapex'
NET_ENV_FILE = 'network-environment.yaml'
DEPLOY_TIMEOUT = 90
-UPSTREAM_RDO = 'https://images.rdoproject.org/pike/delorean/current-tripleo/'
+UPSTREAM_RDO = 'https://images.rdoproject.org/master/delorean/current' \
+ '-tripleo-rdo/'
OPENSTACK_GERRIT = 'https://review.openstack.org'
DOCKER_TAG = 'current-tripleo-rdo'
@@ -64,5 +68,5 @@ VALID_DOCKER_SERVICES = {
'neutron-opendaylight-sriov.yaml': None,
'neutron-ml2-ovn.yaml': 'neutron-ovn.yaml'
}
-DOCKERHUB_OOO = ('https://registry.hub.docker.com/v2/repositories'
- '/tripleoupstream/?page_size=1024')
+DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \
+ '/tripleomaster/'
diff --git a/apex/common/utils.py b/apex/common/utils.py
index cb7cbe13..2ac900a3 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -141,6 +141,28 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
raise Exception(e)
+def get_url_modified_date(url):
+ """
+ Returns the last modified date for an Tripleo image artifact
+ :param url: URL to examine
+ :return: datetime object of when artifact was last modified
+ """
+ try:
+ u = urllib.request.urlopen(url)
+ except urllib.error.URLError as e:
+ logging.error("Failed to fetch target url. Error: {}".format(
+ e.reason))
+ raise
+
+ metadata = u.info()
+ headers = metadata.items()
+ for header in headers:
+ if isinstance(header, tuple) and len(header) == 2:
+ if header[0] == 'Last-Modified':
+ return datetime.datetime.strptime(header[1],
+ "%a, %d %b %Y %X GMT")
+
+
def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
"""
Fetches targets from a url destination and downloads them if they are
@@ -171,30 +193,14 @@ def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
if download_target:
logging.debug("Fetching and comparing upstream"
" target: \n{}".format(target_url))
- try:
- u = urllib.request.urlopen(target_url)
- except urllib.error.URLError as e:
- logging.error("Failed to fetch target url. Error: {}".format(
- e.reason))
- raise
# Check if previous file and fetch we need to compare files to
# determine if download is necessary
if target_exists and download_target:
logging.debug("Previous file found: {}".format(target_dest))
- metadata = u.info()
- headers = metadata.items()
- target_url_date = None
- for header in headers:
- if isinstance(header, tuple) and len(header) == 2:
- if header[0] == 'Last-Modified':
- target_url_date = header[1]
- break
+ target_url_date = get_url_modified_date(target_url)
if target_url_date is not None:
target_dest_mtime = os.path.getmtime(target_dest)
- target_url_mtime = time.mktime(
- datetime.datetime.strptime(target_url_date,
- "%a, %d %b %Y %X "
- "GMT").timetuple())
+ target_url_mtime = time.mktime(target_url_date.timetuple())
if target_url_mtime > target_dest_mtime:
logging.debug('URL target is newer than disk...will '
'download')
diff --git a/apex/deploy.py b/apex/deploy.py
index bc4d0789..635a5d07 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -177,9 +177,11 @@ def create_deploy_parser():
default=False,
help='Use tripleo-quickstart to deploy')
deploy_parser.add_argument('--upstream', action='store_true',
- default=False,
+ default=True,
help='Force deployment to use upstream '
- 'artifacts')
+ 'artifacts. This option is now '
+ 'deprecated and only upstream '
+ 'deployments are supported.')
deploy_parser.add_argument('--no-fetch', action='store_true',
default=False,
help='Ignore fetching latest upstream and '
@@ -341,39 +343,36 @@ def main():
else:
root_pw = None
- upstream = (os_version != constants.DEFAULT_OS_VERSION or
- args.upstream)
+ if not args.upstream:
+ logging.warning("Using upstream is now required for Apex. "
+ "Forcing upstream to true")
if os_version == 'master':
branch = 'master'
else:
branch = "stable/{}".format(os_version)
- if upstream:
- logging.info("Deploying with upstream artifacts for OpenStack "
- "{}".format(os_version))
- args.image_dir = os.path.join(args.image_dir, os_version)
- upstream_url = constants.UPSTREAM_RDO.replace(
- constants.DEFAULT_OS_VERSION, os_version)
- upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
- utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
- upstream_targets,
- fetch=not args.no_fetch)
- sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
- # copy undercloud so we don't taint upstream fetch
- uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
- uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
- shutil.copyfile(uc_fetch_img, uc_image)
- # prep undercloud with required packages
- uc_builder.add_upstream_packages(uc_image)
- # add patches from upstream to undercloud and overcloud
- logging.info('Adding patches to undercloud')
- patches = deploy_settings['global_params']['patches']
- c_builder.add_upstream_patches(patches['undercloud'], uc_image,
- APEX_TEMP_DIR, branch)
- else:
- sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
- uc_image = 'undercloud.qcow2'
- # patches are ignored in non-upstream deployments
- patches = {'overcloud': [], 'undercloud': []}
+
+ logging.info("Deploying with upstream artifacts for OpenStack "
+ "{}".format(os_version))
+ args.image_dir = os.path.join(args.image_dir, os_version)
+ upstream_url = constants.UPSTREAM_RDO.replace(
+ constants.DEFAULT_OS_VERSION, os_version)
+ upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
+ utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+ upstream_targets,
+ fetch=not args.no_fetch)
+ sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ # copy undercloud so we don't taint upstream fetch
+ uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+ uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
+ shutil.copyfile(uc_fetch_img, uc_image)
+ # prep undercloud with required packages
+ uc_builder.add_upstream_packages(uc_image)
+ # add patches from upstream to undercloud and overcloud
+ logging.info('Adding patches to undercloud')
+ patches = deploy_settings['global_params']['patches']
+ c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+ APEX_TEMP_DIR, branch)
+
# Create/Start Undercloud VM
undercloud = uc_lib.Undercloud(args.image_dir,
args.deploy_dir,
@@ -385,7 +384,7 @@ def main():
undercloud_admin_ip = net_settings['networks'][
constants.ADMIN_NETWORK]['installer_vm']['ip']
- if upstream and ds_opts['containers']:
+ if ds_opts['containers']:
tag = constants.DOCKER_TAG
else:
tag = None
@@ -408,24 +407,25 @@ def main():
net_data_file)
else:
net_data = False
- if upstream and args.env_file == 'opnfv-environment.yaml':
+
+ # TODO(trozet): Either fix opnfv env or default to use upstream env
+ if args.env_file == 'opnfv-environment.yaml':
# Override the env_file if it is defaulted to opnfv
# opnfv env file will not work with upstream
args.env_file = 'upstream-environment.yaml'
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- if not upstream:
- # TODO(trozet): Invoke with containers after Fraser migration
- oc_deploy.prep_env(deploy_settings, net_settings, inventory,
- opnfv_env, net_env_target, APEX_TEMP_DIR)
- else:
- shutil.copyfile(
- opnfv_env,
- os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
- )
+
+ # TODO(trozet): Invoke with containers after Fraser migration
+ # oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+ # opnfv_env, net_env_target, APEX_TEMP_DIR)
+
+ shutil.copyfile(
+ opnfv_env,
+ os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
+ )
patched_containers = oc_deploy.prep_image(
deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
- root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'],
- upstream=upstream)
+ root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
APEX_TEMP_DIR, args.virtual,
@@ -447,7 +447,6 @@ def main():
patched_containers)
container_vars['container_tag'] = constants.DOCKER_TAG
container_vars['stackrc'] = 'source /home/stack/stackrc'
- container_vars['upstream'] = upstream
container_vars['sdn'] = ds_opts['sdn_controller']
container_vars['undercloud_ip'] = undercloud_admin_ip
container_vars['os_version'] = os_version
@@ -487,7 +486,8 @@ def main():
deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
deploy_vars['stackrc'] = 'source /home/stack/stackrc'
deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
- deploy_vars['upstream'] = upstream
+ deploy_vars['undercloud_ip'] = undercloud_admin_ip
+ deploy_vars['ha_enabled'] = ha_enabled
deploy_vars['os_version'] = os_version
deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
@@ -499,7 +499,10 @@ def main():
user='stack', tmp_dir=APEX_TEMP_DIR)
logging.info("Overcloud deployment complete")
except Exception:
- logging.error("Deployment Failed. Please check log")
+ logging.error("Deployment Failed. Please check deploy log as "
+ "well as mistral logs in "
+ "{}".format(os.path.join(APEX_TEMP_DIR,
+ 'mistral_logs.tar.gz')))
raise
finally:
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
@@ -558,6 +561,7 @@ def main():
deploy_vars['vpn'] = ds_opts['vpn']
deploy_vars['l2gw'] = ds_opts.get('l2gw')
deploy_vars['sriov'] = ds_opts.get('sriov')
+ deploy_vars['tacker'] = ds_opts.get('tacker')
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
index ea71e0f3..0a4d1036 100644
--- a/apex/network/network_environment.py
+++ b/apex/network/network_environment.py
@@ -82,7 +82,7 @@ class NetworkEnvironment(dict):
admin_prefix = str(admin_cidr.prefixlen)
self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
self[param_def]['ControlPlaneDefaultRoute'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
+ nets[ADMIN_NETWORK]['gateway']
self[param_def]['EC2MetadataIp'] = \
nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['DnsServers'] = net_settings['dns_servers']
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 90c5cd4b..c7a8e407 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -144,15 +144,16 @@ def get_docker_sdn_file(ds_opts):
"""
# FIXME(trozet): We assume right now there is only one docker SDN file
docker_services = con.VALID_DOCKER_SERVICES
+ tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for sdn_file in sdn_env_list:
sdn_base = os.path.basename(sdn_file)
if sdn_base in docker_services:
if docker_services[sdn_base] is not None:
- return os.path.join(con.THT_DOCKER_ENV_DIR,
+ return os.path.join(tht_dir,
docker_services[sdn_base])
else:
- return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
+ return os.path.join(tht_dir, sdn_base)
def create_deploy_cmd(ds, ns, inv, tmp_dir,
@@ -196,7 +197,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
- if ds_opts['ceph']:
+ if ds_opts['ceph'] and 'csit' not in env_file:
prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
'storage-environment.yaml'))
@@ -249,7 +250,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
- patches=None, upstream=False):
+ patches=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
@@ -259,7 +260,6 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
:param root_pw: password to configure for overcloud image
:param docker_tag: Docker image tag for RDO version (default None)
:param patches: List of patches to apply to overcloud image
- :param upstream: (boolean) Indicates if upstream deployment or not
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
@@ -366,35 +366,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
logging.debug("Temporary overcloud image stored as: {}".format(
tmp_oc_image))
- # TODO (trozet): remove this if block after Fraser
- if sdn == 'opendaylight' and not upstream:
- if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
- {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
- "/root/puppet-opendaylight-"
- "{}.tar.gz".format(ds_opts['odl_version'])}
- ])
- if ds_opts['odl_version'] == 'master':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
- else:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
-
- elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
- and ds_opts['odl_vpp_netvirt']:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ODL_NETVIRT_VPP_RPM)}
- ])
- elif sdn == 'opendaylight':
+ if sdn == 'opendaylight':
undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
'installer_vm']['ip']
oc_builder.inject_opendaylight(
@@ -422,7 +394,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
docker_tag=docker_tag))
# if containers with ceph, and no ceph device we need to use a
# persistent loop device for Ceph OSDs
- if docker_tag and not ds_opts.get('ceph_device', None):
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
with open(tmp_losetup, 'w') as fh:
fh.write(LOSETUP_SERVICE)
@@ -430,7 +402,6 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
{con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
},
{con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
- {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
{con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
{con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
@@ -690,28 +661,26 @@ def prep_storage_env(ds, ns, virtual, tmp_dir):
ceph_params = {
'DockerCephDaemonImage': docker_image,
}
- if not ds['global_params']['ha_enabled']:
- ceph_params['CephPoolDefaultSize'] = 1
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
if virtual:
ceph_params['CephAnsibleExtraConfig'] = {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'ceph_mds_docker_memory_limit': '1g',
}
- ceph_params['CephPoolDefaultPgNum'] = 32
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
- ceph_device = ds_opts['ceph_device']
- else:
- # TODO(trozet): make this DS default after Fraser
- ceph_device = '/dev/loop3'
-
+ ceph_device = ds_opts['ceph_device']
ceph_params['CephAnsibleDisksConfig'] = {
'devices': [ceph_device],
'journal_size': 512,
'osd_scenario': 'collocated'
}
utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index 4f887ed0..29fe64fb 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -26,11 +26,11 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
'os_version',
'l2gw',
'sriov',
- 'containers']
+ 'containers',
+ 'ceph_device']
OPT_DEPLOY_SETTINGS = ['performance',
'vsperf',
- 'ceph_device',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
@@ -105,6 +105,8 @@ class DeploySettings(dict):
self['deploy_options'][req_set] = 'ovs'
elif req_set == 'ceph':
self['deploy_options'][req_set] = True
+ elif req_set == 'ceph_device':
+ self['deploy_options'][req_set] = '/dev/loop3'
elif req_set == 'odl_version':
self['deploy_options'][req_set] = \
constants.DEFAULT_ODL_VERSION
diff --git a/apex/settings/network_settings.py b/apex/settings/network_settings.py
index f6566834..36d143cb 100644
--- a/apex/settings/network_settings.py
+++ b/apex/settings/network_settings.py
@@ -167,10 +167,13 @@ class NetworkSettings(dict):
"""
_network = self.get_network(network)
# if vlan not defined then default it to native
- if network is not ADMIN_NETWORK:
- for role in ROLES:
+ for role in ROLES:
+ if network is not ADMIN_NETWORK:
if 'vlan' not in _network['nic_mapping'][role]:
_network['nic_mapping'][role]['vlan'] = 'native'
+ else:
+ # ctlplane network must be native
+ _network['nic_mapping'][role]['vlan'] = 'native'
cidr = _network.get('cidr')
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
index d501746c..09bd2545 100644
--- a/apex/tests/test_apex_common_builder.py
+++ b/apex/tests/test_apex_common_builder.py
@@ -48,13 +48,50 @@ class TestCommonBuilder(unittest.TestCase):
path = '/etc/puppet/modules/tripleo'
self.assertEquals(c_builder.project_to_path(project), path)
project = 'openstack/nova'
- path = '/usr/lib/python2.7/site-packages/nova'
+ path = '/usr/lib/python2.7/site-packages/'
self.assertEquals(c_builder.project_to_path(project), path)
+ def test_is_patch_promoted(self):
+ dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ self.assertTrue(c_builder.is_patch_promoted(dummy_change,
+ 'master'))
+
+ def test_is_patch_promoted_docker(self):
+ dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ dummy_image = 'centos-binary-opendaylight'
+ self.assertTrue(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ docker_image=dummy_image))
+
+ def test_patch_not_promoted(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master'))
+
+ def test_patch_not_promoted_docker(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'MERGED'}
+ dummy_image = 'centos-binary-opendaylight'
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master',
+ docker_image=dummy_image))
+
+ def test_patch_not_promoted_and_not_merged(self):
+ dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
+ 'status': 'BLAH'}
+ self.assertFalse(c_builder.is_patch_promoted(dummy_change,
+ 'master'))
+
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
@patch('apex.build_utils.get_patch')
@patch('apex.virtual.utils.virt_customize')
- def test_add_upstream_patches(self, mock_customize, mock_get_patch):
+ def test_add_upstream_patches(self, mock_customize, mock_get_patch,
+ mock_get_change, mock_is_patch_promoted):
mock_get_patch.return_value = None
change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
patches = [{
@@ -73,14 +110,18 @@ class TestCommonBuilder(unittest.TestCase):
{con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
project_path, patch_file)}]
mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
@patch('apex.build_utils.get_patch')
@patch('apex.virtual.utils.virt_customize')
def test_add_upstream_patches_docker_puppet(
- self, mock_customize, mock_get_patch):
+ self, mock_customize, mock_get_patch, mock_get_change,
+ mock_is_patch_promoted):
change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
patches = [{
'change-id': change_id,
@@ -96,19 +137,22 @@ class TestCommonBuilder(unittest.TestCase):
{con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
project_path, patch_file)}]
mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
uc_ip='192.0.2.1',
docker_tag='latest')
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
@patch('apex.builders.common_builder.project_to_docker_image')
@patch('apex.builders.overcloud_builder.build_dockerfile')
@patch('apex.build_utils.get_patch')
@patch('apex.virtual.utils.virt_customize')
def test_add_upstream_patches_docker_python(
self, mock_customize, mock_get_patch, mock_build_docker_file,
- mock_project2docker):
+ mock_project2docker, ock_get_change, mock_is_patch_promoted):
mock_project2docker.return_value = ['NovaApi']
change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
patches = [{
@@ -116,6 +160,7 @@ class TestCommonBuilder(unittest.TestCase):
'project': 'openstack/nova'
}]
mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = False
services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
'/dummytmp/',
uc_ip='192.0.2.1',
@@ -125,6 +170,56 @@ class TestCommonBuilder(unittest.TestCase):
self.assertSetEqual(services, {'NovaApi'})
@patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.builders.common_builder.project_to_docker_image')
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_not_add_upstream_patches_docker_python(
+ self, mock_customize, mock_get_patch, mock_build_docker_file,
+ mock_project2docker, ock_get_change, mock_is_patch_promoted):
+ # Test that the calls are not made when the patch is already merged and
+ # promoted
+ mock_project2docker.return_value = ['NovaApi']
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/nova'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = True
+ services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
+ '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+ assert mock_build_docker_file.not_called
+ assert len(services) == 0
+
+ @patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.is_patch_promoted')
+ @patch('apex.build_utils.get_change')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_not_upstream_patches_docker_puppet(
+ self, mock_customize, mock_get_patch, mock_get_change,
+ mock_is_patch_promoted):
+ # Test that the calls are not made when the patch is already merged and
+ # promoted
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/puppet-tripleo'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ mock_is_patch_promoted.return_value = True
+ c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+
+ @patch('builtins.open', mock_open())
@patch('apex.virtual.utils.virt_customize')
def test_add_repo(self, mock_customize):
c_builder.add_repo('fake/url', 'dummyrepo', 'dummy.qcow2',
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index 6c2a185d..b7941f6f 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -107,6 +107,7 @@ class TestDeploy(unittest.TestCase):
args.virtual = True
assert_raises(ApexDeployException, validate_deploy_args, args)
+ @patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@patch('apex.deploy.shutil')
@patch('apex.deploy.oc_deploy')
@@ -132,7 +133,8 @@ class TestDeploy(unittest.TestCase):
mock_deploy_sets, mock_net_sets, mock_net_env,
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
- mock_oc_deploy, mock_shutil, mock_network_data):
+ mock_oc_deploy, mock_shutil, mock_network_data,
+ mock_uc_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
@@ -149,7 +151,7 @@ class TestDeploy(unittest.TestCase):
args.virtual = False
args.quickstart = False
args.debug = False
- args.upstream = False
+ args.upstream = True
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['external']
net_sets.__getitem__.side_effect = net_sets_dict.__getitem__
@@ -179,6 +181,7 @@ class TestDeploy(unittest.TestCase):
args.debug = True
main()
+ @patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@patch('apex.deploy.shutil')
@patch('apex.deploy.oc_deploy')
@@ -204,7 +207,8 @@ class TestDeploy(unittest.TestCase):
mock_deploy_sets, mock_net_sets, mock_net_env,
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
- mock_oc_deploy, mock_shutil, mock_network_data):
+ mock_oc_deploy, mock_shutil, mock_network_data,
+ mock_uc_builder):
# didn't work yet line 412
# net_sets_dict = {'networks': {'admin': {'cidr': MagicMock()}},
# 'dns_servers': 'test'}
@@ -228,7 +232,7 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_nodes = 1
args.virt_compute_ram = None
args.virt_default_ram = 12
- args.upstream = False
+ args.upstream = True
net_sets = mock_net_sets.return_value
net_sets.enabled_network_list = ['admin']
deploy_sets = mock_deploy_sets.return_value
diff --git a/apex/tests/test_apex_network_settings.py b/apex/tests/test_apex_network_settings.py
index 5e2fa072..764c9ef4 100644
--- a/apex/tests/test_apex_network_settings.py
+++ b/apex/tests/test_apex_network_settings.py
@@ -112,6 +112,9 @@ class TestNetworkSettings:
# remove vlan from storage net
storage_net_nicmap['compute'].pop('vlan', None)
assert_is_instance(NetworkSettings(ns), NetworkSettings)
+ for role in ('compute', 'controller'):
+ assert_equal(ns['networks'][ADMIN_NETWORK]['nic_mapping'][
+ role]['vlan'], 'native')
# TODO
# need to manipulate interfaces some how
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index ae2e8f0b..83e2b02d 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -134,7 +134,8 @@ class TestOvercloudDeploy(unittest.TestCase):
'barometer': False,
'ceph': True,
'sdn_controller': 'opendaylight',
- 'sriov': False
+ 'sriov': False,
+ 'os_version': 'queens'
},
'global_params': MagicMock()}
@@ -158,6 +159,10 @@ class TestOvercloudDeploy(unittest.TestCase):
'storage-environment.yaml', result_cmd)
assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
'/services-docker/neutron-opendaylight.yaml', result_cmd)
+ ds['deploy_options']['os_version'] = 'master'
+ result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/services/neutron-opendaylight.yaml', result_cmd)
@patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@@ -170,6 +175,8 @@ class TestOvercloudDeploy(unittest.TestCase):
'global_params': MagicMock()}
ds['global_params'].__getitem__.side_effect = \
lambda i: False if i == 'ha_enabled' else MagicMock()
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: 'master' if i == 'os_version' else MagicMock()
ns = {'ntp': ['ntp']}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
@@ -191,6 +198,8 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_sdn_list.return_value = []
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: 'master' if i == 'os_version' else MagicMock()
ns = {}
inv = MagicMock()
inv.get_node_counts.return_value = (0, 0)
@@ -198,11 +207,13 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_raises(ApexDeployException, create_deploy_cmd,
ds, ns, inv, '/tmp', virt)
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path')
@patch('builtins.open', mock_open())
- def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils):
+ def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils,
+ mock_inject_odl):
ds_opts = {'dataplane': 'fdio',
'sdn_controller': 'opendaylight',
'odl_version': 'master',
@@ -214,6 +225,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@@ -231,12 +243,13 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl(self, mock_os_path, mock_shutil,
- mock_virt_utils):
+ mock_virt_utils, mock_inject_odl):
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
'odl_version': con.DEFAULT_ODL_VERSION,
@@ -250,6 +263,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_inject_odl.assert_called()
@patch('apex.overcloud.deploy.c_builder')
@patch('apex.overcloud.deploy.oc_builder')
@@ -274,18 +288,20 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_c_builder.add_upstream_patches.return_value = ['nova-api']
patches = ['dummy_nova_patch']
rv = prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test',
- docker_tag='latest', patches=patches, upstream=True)
- mock_oc_builder.inject_opendaylight.assert_called
+ docker_tag='latest', patches=patches)
+ mock_oc_builder.inject_opendaylight.assert_called()
mock_virt_utils.virt_customize.assert_called()
- mock_c_builder.add_upstream_patches.assert_called
+ mock_c_builder.add_upstream_patches.assert_called()
self.assertListEqual(sorted(rv), ['nova-api', 'opendaylight'])
+ @patch('apex.overcloud.deploy.oc_builder')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl_not_def(self, mock_os_path,
- mock_shutil, mock_virt_utils):
+ mock_shutil, mock_virt_utils,
+ mock_oc_builder):
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
'odl_version': 'uncommon'}
@@ -296,6 +312,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ mock_oc_builder.inject_opendaylight.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@@ -490,7 +507,7 @@ class TestOvercloudDeploy(unittest.TestCase):
'DockerCephDaemonImage':
'192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos'
'-7',
- 'CephPoolDefaultSize': 1,
+ 'CephPoolDefaultSize': 2,
'CephAnsibleExtraConfig': {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
@@ -648,7 +665,8 @@ class TestOvercloudDeploy(unittest.TestCase):
'containers': False,
'barometer': True,
'ceph': False,
- 'sdn_controller': 'opendaylight'
+ 'sdn_controller': 'opendaylight',
+ 'os_version': 'queens'
}
output = get_docker_sdn_file(ds_opts)
self.assertEqual(output,
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
index fce7a557..9bc91e51 100644
--- a/apex/tests/test_apex_undercloud.py
+++ b/apex/tests/test_apex_undercloud.py
@@ -24,6 +24,7 @@ from nose.tools import (
assert_regexp_matches,
assert_raises,
assert_true,
+ assert_false,
assert_equal)
@@ -118,6 +119,105 @@ class TestUndercloud(unittest.TestCase):
@patch.object(Undercloud, 'generate_config', return_value={})
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
+ def test_detect_nat_with_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.1.1.1',
+ },
+ 'external':
+ [{'enabled': True,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.0.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ assert_true(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_detect_nat_no_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.0.2.1',
+ },
+ 'external':
+ [{'enabled': False,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.1.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=False)
+ assert_true(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_detect_no_nat_no_external(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30'],
+ 'gateway': '192.0.2.3',
+ },
+ 'external':
+ [{'enabled': False,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'},
+ 'gateway': '192.168.1.1'
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+
+ uc = Undercloud('img_path', 'tplt_path', external_network=False)
+ assert_false(uc.detect_nat(ns))
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
def test_configure(self, mock_create, mock_get_vm,
mock_generate_config, mock_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index d76174b0..d2de2de1 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -75,7 +75,6 @@ class Undercloud:
template_dir=self.template_path)
self.setup_volumes()
self.inject_auth()
- self._update_delorean_repo()
@staticmethod
def _get_ip(vm):
@@ -125,6 +124,16 @@ class Undercloud:
"Unable to find IP for undercloud. Check if VM booted "
"correctly")
+ def detect_nat(self, net_settings):
+ if self.external_net:
+ net = net_settings['networks'][constants.EXTERNAL_NETWORK][0]
+ else:
+ net = net_settings['networks'][constants.ADMIN_NETWORK]
+ if net['gateway'] == net['installer_vm']['ip']:
+ return True
+ else:
+ return False
+
def configure(self, net_settings, deploy_settings,
playbook, apex_temp_dir, virtual_oc=False):
"""
@@ -142,7 +151,8 @@ class Undercloud:
ansible_vars = Undercloud.generate_config(net_settings,
deploy_settings)
ansible_vars['apex_temp_dir'] = apex_temp_dir
- ansible_vars['virtual_overcloud'] = virtual_oc
+
+ ansible_vars['nat'] = self.detect_nat(net_settings)
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -228,7 +238,8 @@ class Undercloud:
"network_cidr {}".format(str(ns_admin['cidr'])),
"dhcp_start {}".format(str(ns_admin['dhcp_range'][0])),
"dhcp_end {}".format(str(ns_admin['dhcp_range'][1])),
- "inspection_iprange {}".format(','.join(intro_range))
+ "inspection_iprange {}".format(','.join(intro_range)),
+ "generate_service_certificate false"
]
config['ironic_config'] = [