summaryrefslogtreecommitdiffstats
path: root/apex/builders
diff options
context:
space:
mode:
Diffstat (limited to 'apex/builders')
-rw-r--r--apex/builders/common_builder.py101
-rw-r--r--apex/builders/overcloud_builder.py5
-rw-r--r--apex/builders/undercloud_builder.py52
3 files changed, 142 insertions, 16 deletions
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index b8894ec1..59af94cd 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -14,8 +14,11 @@ import git
import json
import logging
import os
+import platform
+import pprint
import re
import urllib.parse
+import yaml
import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
@@ -56,17 +59,19 @@ def project_to_path(project, patch=None):
return "/usr/lib/python2.7/site-packages/"
-def project_to_docker_image(project):
+def project_to_docker_image(project, docker_url):
"""
Translates OpenStack project to OOO services that are containerized
- :param project: name of OpenStack project
+ :param project: short name of OpenStack project
:return: List of OOO docker service names
"""
# Fetch all docker containers in docker hub with tripleo and filter
# based on project
-
+ logging.info("Checking for docker images matching project: {}".format(
+ project))
hub_output = utils.open_webpage(
- urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10)
+ urllib.parse.urljoin(docker_url,
+ '?page_size=1024'), timeout=10)
try:
results = json.loads(hub_output.decode())['results']
except Exception as e:
@@ -81,12 +86,14 @@ def project_to_docker_image(project):
for result in results:
if result['name'].startswith("centos-binary-{}".format(project)):
# add as docker image shortname (just service name)
+ logging.debug("Adding docker image {} for project {} for "
+ "patching".format(result['name'], project))
docker_images.append(result['name'].replace('centos-binary-', ''))
return docker_images
-def is_patch_promoted(change, branch, docker_image=None):
+def is_patch_promoted(change, branch, docker_url, docker_image=None):
"""
Checks to see if a patch that is in merged exists in either the docker
container or the promoted tripleo images
@@ -119,8 +126,8 @@ def is_patch_promoted(change, branch, docker_image=None):
return True
else:
# must be a docker patch, check docker tag modified time
- docker_url = con.DOCKERHUB_OOO.replace('tripleomaster',
- "tripleo{}".format(branch))
+ docker_url = docker_url.replace('tripleomaster',
+ "tripleo{}".format(branch))
url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
docker_url = urllib.parse.urljoin(docker_url, url_path)
logging.debug("docker url is: {}".format(docker_url))
@@ -173,10 +180,23 @@ def add_upstream_patches(patches, image, tmp_dir,
# and move the patch into the containers directory. We also assume
# this builder call is for overcloud, because we do not support
# undercloud containers
+ if platform.machine() == 'aarch64':
+ docker_url = con.DOCKERHUB_AARCH64
+ else:
+ docker_url = con.DOCKERHUB_OOO
if docker_tag and 'python' in project_path:
# Projects map to multiple THT services, need to check which
# are supported
- ooo_docker_services = project_to_docker_image(patch['project'])
+ project_short_name = os.path.basename(patch['project'])
+ ooo_docker_services = project_to_docker_image(project_short_name,
+ docker_url)
+ if not ooo_docker_services:
+ logging.error("Did not find any matching docker containers "
+ "for project: {}".format(project_short_name))
+ raise exc.ApexCommonBuilderException(
+ 'Unable to find docker services for python project in '
+ 'patch')
+ # Just use the first image to see if patch was promoted into it
docker_img = ooo_docker_services[0]
else:
ooo_docker_services = []
@@ -186,28 +206,43 @@ def add_upstream_patches(patches, image, tmp_dir,
patch['change-id'])
patch_promoted = is_patch_promoted(change,
branch.replace('stable/', ''),
+ docker_url,
docker_img)
if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
+ patch_file_paths = []
# If we found services, then we treat the patch like it applies to
# docker only
if ooo_docker_services:
os_version = default_branch.replace('stable/', '')
for service in ooo_docker_services:
docker_services = docker_services.union({service})
+ # We need to go root to be able to install patch and then
+ # switch back to previous user. Some containers that
+ # have the same name as the project do not necessarily
+ # contain the project code. For example
+ # novajoin-notifier does not contain nova package code.
+ # Therefore we must try to patch and unfortunately
+ # ignore failures until we have a better way of checking
+ # this
docker_cmds = [
"WORKDIR {}".format(project_path),
+ "USER root",
+ "ARG REAL_USER",
+ "RUN yum -y install patch",
"ADD {} {}".format(patch_file, project_path),
- "RUN patch -p1 < {}".format(patch_file)
+ "RUN patch -p1 < {} || echo "
+ "'Patching failed'".format(patch_file),
+ "USER $REAL_USER"
]
src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, service,
docker_tag)
oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
src_img_uri)
- patch_file_path = os.path.join(tmp_dir, 'containers',
- patch_file)
+ patch_file_paths.append(os.path.join(
+ tmp_dir, "containers/{}".format(service), patch_file))
else:
patch_file_path = os.path.join(tmp_dir, patch_file)
virt_ops.extend([
@@ -217,8 +252,10 @@ def add_upstream_patches(patches, image, tmp_dir,
project_path, patch_file)}])
logging.info("Adding patch {} to {}".format(patch_file,
image))
- with open(patch_file_path, 'w') as fh:
- fh.write(patch_diff)
+ patch_file_paths.append(patch_file_path)
+ for patch_fp in patch_file_paths:
+ with open(patch_fp, 'w') as fh:
+ fh.write(patch_diff)
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
@@ -258,3 +295,41 @@ def create_git_archive(repo_url, repo_name, tmp_dir,
repo.archive(fh, prefix=prefix)
logging.debug("Wrote archive file: {}".format(archive_path))
return archive_path
+
+
+def get_neutron_driver(ds_opts):
+ sdn = ds_opts.get('sdn_controller', None)
+
+ if sdn == 'opendaylight':
+ return 'odl'
+ elif sdn == 'ovn':
+ return sdn
+ elif ds_opts.get('vpp', False):
+ return 'vpp'
+ else:
+ return None
+
+
+def prepare_container_images(prep_file, branch='master', neutron_driver=None):
+ if not os.path.isfile(prep_file):
+ raise exc.ApexCommonBuilderException("Prep file does not exist: "
+ "{}".format(prep_file))
+ with open(prep_file) as fh:
+ data = yaml.safe_load(fh)
+ try:
+ p_set = data['parameter_defaults']['ContainerImagePrepare'][0]['set']
+ if neutron_driver:
+ p_set['neutron_driver'] = neutron_driver
+ p_set['namespace'] = "docker.io/tripleo{}".format(branch)
+ if platform.machine() == 'aarch64':
+ p_set['namespace'] = "docker.io/armbandapex"
+ p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
+
+ except KeyError:
+ logging.error("Invalid prep file format: {}".format(prep_file))
+ raise exc.ApexCommonBuilderException("Invalid format for prep file")
+
+ logging.debug("Writing new container prep file:\n{}".format(
+ pprint.pformat(data)))
+ with open(prep_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index a74ec252..eab8fb66 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -25,7 +25,12 @@ def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
assert odl_version in con.VALID_ODL_VERSIONS
# add repo
if odl_version == 'master':
+ # last version in the constants is "master" so select 2nd to last
+ # odl package version has no "master" version
odl_pkg_version = con.VALID_ODL_VERSIONS[-2]
+ # branch will be used to pull puppet-opendaylight. Since puppet-odl
+ # does not pull branch until later, we need to use master version of
+ # that if master ODL version is specified
branch = odl_version
else:
odl_pkg_version = odl_version
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index 4efd00d5..47d2568d 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -9,7 +9,9 @@
# Used to modify undercloud qcow2 image
import logging
+import json
import os
+import subprocess
from apex.common import constants as con
from apex.common import utils
@@ -26,16 +28,17 @@ def add_upstream_packages(image):
pkgs = [
'epel-release',
'openstack-utils',
- 'ceph-common',
'python2-networking-sfc',
'openstack-ironic-inspector',
'subunit-filters',
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
- 'ceph-ansible',
- 'python-tripleoclient'
+ 'python-tripleoclient',
+ 'openstack-tripleo-heat-templates'
]
+ # Remove incompatible python-docker version
+ virt_ops.append({con.VIRT_RUN_CMD: "yum remove -y python-docker-py"})
for pkg in pkgs:
virt_ops.append({con.VIRT_INSTALL: pkg})
@@ -59,3 +62,46 @@ def inject_calipso_installer(tmp_dir, image):
# TODO(trozet): add unit testing for calipso injector
# TODO(trozet): add rest of build for undercloud here as well
+
+
+def update_repos(image, branch):
+ virt_ops = [
+ {con.VIRT_RUN_CMD: "rm -f /etc/yum.repos.d/delorean*"},
+ {con.VIRT_RUN_CMD: "yum-config-manager --add-repo "
+ "https://trunk.rdoproject.org/centos7/{}"
+ "/delorean.repo".format(con.RDO_TAG)},
+ {con.VIRT_RUN_CMD: "yum clean all"},
+ {con.VIRT_INSTALL: "python2-tripleo-repos"},
+ {con.VIRT_RUN_CMD: "tripleo-repos -b {} {} ceph".format(branch,
+ con.RDO_TAG)}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+
+
+def expand_disk(image, desired_size=50):
+ """
+ Expands a disk image to desired_size in GigaBytes
+ :param image: image to resize
+ :param desired_size: desired size in GB
+ :return: None
+ """
+ # there is a lib called vminspect which has some dependencies and is
+ # not yet available in pip. Consider switching to this lib later.
+ try:
+ img_out = json.loads(subprocess.check_output(
+ ['qemu-img', 'info', '--output=json', image],
+ stderr=subprocess.STDOUT).decode())
+ disk_gb_size = int(img_out['virtual-size'] / 1000000000)
+ if disk_gb_size < desired_size:
+ logging.info("Expanding disk image: {}. Current size: {} is less"
+ "than require size: {}".format(image, disk_gb_size,
+ desired_size))
+ diff_size = desired_size - disk_gb_size
+ subprocess.check_call(['qemu-img', 'resize', image,
+ "+{}G".format(diff_size)],
+ stderr=subprocess.STDOUT)
+
+ except (subprocess.CalledProcessError, json.JSONDecodeError, KeyError) \
+ as e:
+ logging.warning("Unable to resize disk, disk may not be large "
+ "enough: {}".format(e))