summaryrefslogtreecommitdiffstats
path: root/apex
diff options
context:
space:
mode:
Diffstat (limited to 'apex')
-rw-r--r--apex/builders/common_builder.py101
-rw-r--r--apex/builders/overcloud_builder.py5
-rw-r--r--apex/builders/undercloud_builder.py52
-rw-r--r--apex/common/constants.py16
-rw-r--r--apex/common/utils.py12
-rw-r--r--apex/deploy.py86
-rw-r--r--apex/network/network_data.py2
-rw-r--r--apex/network/network_environment.py9
-rw-r--r--apex/overcloud/deploy.py73
-rw-r--r--apex/settings/deploy_settings.py3
-rw-r--r--apex/tests/config/98faaca.diff2
-rw-r--r--apex/tests/test_apex_common_builder.py69
-rw-r--r--apex/tests/test_apex_common_utils.py7
-rw-r--r--apex/tests/test_apex_deploy.py10
-rw-r--r--apex/tests/test_apex_network_environment.py7
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py17
-rw-r--r--apex/tests/test_apex_undercloud.py22
-rw-r--r--apex/undercloud/undercloud.py42
-rwxr-xr-xapex/virtual/configure_vm.py9
19 files changed, 454 insertions, 90 deletions
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index b8894ec1..59af94cd 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -14,8 +14,11 @@ import git
import json
import logging
import os
+import platform
+import pprint
import re
import urllib.parse
+import yaml
import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
@@ -56,17 +59,19 @@ def project_to_path(project, patch=None):
return "/usr/lib/python2.7/site-packages/"
-def project_to_docker_image(project):
+def project_to_docker_image(project, docker_url):
"""
Translates OpenStack project to OOO services that are containerized
- :param project: name of OpenStack project
+ :param project: short name of OpenStack project
:return: List of OOO docker service names
"""
# Fetch all docker containers in docker hub with tripleo and filter
# based on project
-
+ logging.info("Checking for docker images matching project: {}".format(
+ project))
hub_output = utils.open_webpage(
- urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10)
+ urllib.parse.urljoin(docker_url,
+ '?page_size=1024'), timeout=10)
try:
results = json.loads(hub_output.decode())['results']
except Exception as e:
@@ -81,12 +86,14 @@ def project_to_docker_image(project):
for result in results:
if result['name'].startswith("centos-binary-{}".format(project)):
# add as docker image shortname (just service name)
+ logging.debug("Adding docker image {} for project {} for "
+ "patching".format(result['name'], project))
docker_images.append(result['name'].replace('centos-binary-', ''))
return docker_images
-def is_patch_promoted(change, branch, docker_image=None):
+def is_patch_promoted(change, branch, docker_url, docker_image=None):
"""
Checks to see if a patch that is in merged exists in either the docker
container or the promoted tripleo images
@@ -119,8 +126,8 @@ def is_patch_promoted(change, branch, docker_image=None):
return True
else:
# must be a docker patch, check docker tag modified time
- docker_url = con.DOCKERHUB_OOO.replace('tripleomaster',
- "tripleo{}".format(branch))
+ docker_url = docker_url.replace('tripleomaster',
+ "tripleo{}".format(branch))
url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
docker_url = urllib.parse.urljoin(docker_url, url_path)
logging.debug("docker url is: {}".format(docker_url))
@@ -173,10 +180,23 @@ def add_upstream_patches(patches, image, tmp_dir,
# and move the patch into the containers directory. We also assume
# this builder call is for overcloud, because we do not support
# undercloud containers
+ if platform.machine() == 'aarch64':
+ docker_url = con.DOCKERHUB_AARCH64
+ else:
+ docker_url = con.DOCKERHUB_OOO
if docker_tag and 'python' in project_path:
# Projects map to multiple THT services, need to check which
# are supported
- ooo_docker_services = project_to_docker_image(patch['project'])
+ project_short_name = os.path.basename(patch['project'])
+ ooo_docker_services = project_to_docker_image(project_short_name,
+ docker_url)
+ if not ooo_docker_services:
+ logging.error("Did not find any matching docker containers "
+ "for project: {}".format(project_short_name))
+ raise exc.ApexCommonBuilderException(
+ 'Unable to find docker services for python project in '
+ 'patch')
+ # Just use the first image to see if patch was promoted into it
docker_img = ooo_docker_services[0]
else:
ooo_docker_services = []
@@ -186,28 +206,43 @@ def add_upstream_patches(patches, image, tmp_dir,
patch['change-id'])
patch_promoted = is_patch_promoted(change,
branch.replace('stable/', ''),
+ docker_url,
docker_img)
if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
+ patch_file_paths = []
# If we found services, then we treat the patch like it applies to
# docker only
if ooo_docker_services:
os_version = default_branch.replace('stable/', '')
for service in ooo_docker_services:
docker_services = docker_services.union({service})
+ # We need to go root to be able to install patch and then
+ # switch back to previous user. Some containers that
+ # have the same name as the project do not necessarily
+ # contain the project code. For example
+ # novajoin-notifier does not contain nova package code.
+ # Therefore we must try to patch and unfortunately
+ # ignore failures until we have a better way of checking
+ # this
docker_cmds = [
"WORKDIR {}".format(project_path),
+ "USER root",
+ "ARG REAL_USER",
+ "RUN yum -y install patch",
"ADD {} {}".format(patch_file, project_path),
- "RUN patch -p1 < {}".format(patch_file)
+ "RUN patch -p1 < {} || echo "
+ "'Patching failed'".format(patch_file),
+ "USER $REAL_USER"
]
src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, service,
docker_tag)
oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
src_img_uri)
- patch_file_path = os.path.join(tmp_dir, 'containers',
- patch_file)
+ patch_file_paths.append(os.path.join(
+ tmp_dir, "containers/{}".format(service), patch_file))
else:
patch_file_path = os.path.join(tmp_dir, patch_file)
virt_ops.extend([
@@ -217,8 +252,10 @@ def add_upstream_patches(patches, image, tmp_dir,
project_path, patch_file)}])
logging.info("Adding patch {} to {}".format(patch_file,
image))
- with open(patch_file_path, 'w') as fh:
- fh.write(patch_diff)
+ patch_file_paths.append(patch_file_path)
+ for patch_fp in patch_file_paths:
+ with open(patch_fp, 'w') as fh:
+ fh.write(patch_diff)
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
@@ -258,3 +295,41 @@ def create_git_archive(repo_url, repo_name, tmp_dir,
repo.archive(fh, prefix=prefix)
logging.debug("Wrote archive file: {}".format(archive_path))
return archive_path
+
+
+def get_neutron_driver(ds_opts):
+ sdn = ds_opts.get('sdn_controller', None)
+
+ if sdn == 'opendaylight':
+ return 'odl'
+ elif sdn == 'ovn':
+ return sdn
+ elif ds_opts.get('vpp', False):
+ return 'vpp'
+ else:
+ return None
+
+
+def prepare_container_images(prep_file, branch='master', neutron_driver=None):
+ if not os.path.isfile(prep_file):
+ raise exc.ApexCommonBuilderException("Prep file does not exist: "
+ "{}".format(prep_file))
+ with open(prep_file) as fh:
+ data = yaml.safe_load(fh)
+ try:
+ p_set = data['parameter_defaults']['ContainerImagePrepare'][0]['set']
+ if neutron_driver:
+ p_set['neutron_driver'] = neutron_driver
+ p_set['namespace'] = "docker.io/tripleo{}".format(branch)
+ if platform.machine() == 'aarch64':
+ p_set['namespace'] = "docker.io/armbandapex"
+ p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
+
+ except KeyError:
+ logging.error("Invalid prep file format: {}".format(prep_file))
+ raise exc.ApexCommonBuilderException("Invalid format for prep file")
+
+ logging.debug("Writing new container prep file:\n{}".format(
+ pprint.pformat(data)))
+ with open(prep_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index a74ec252..eab8fb66 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -25,7 +25,12 @@ def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
assert odl_version in con.VALID_ODL_VERSIONS
# add repo
if odl_version == 'master':
+ # last version in the constants is "master" so select 2nd to last
+ # odl package version has no "master" version
odl_pkg_version = con.VALID_ODL_VERSIONS[-2]
+ # branch will be used to pull puppet-opendaylight. Since puppet-odl
+ # does not pull branch until later, we need to use master version of
+ # that if master ODL version is specified
branch = odl_version
else:
odl_pkg_version = odl_version
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index 4efd00d5..47d2568d 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -9,7 +9,9 @@
# Used to modify undercloud qcow2 image
import logging
+import json
import os
+import subprocess
from apex.common import constants as con
from apex.common import utils
@@ -26,16 +28,17 @@ def add_upstream_packages(image):
pkgs = [
'epel-release',
'openstack-utils',
- 'ceph-common',
'python2-networking-sfc',
'openstack-ironic-inspector',
'subunit-filters',
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
- 'ceph-ansible',
- 'python-tripleoclient'
+ 'python-tripleoclient',
+ 'openstack-tripleo-heat-templates'
]
+ # Remove incompatible python-docker version
+ virt_ops.append({con.VIRT_RUN_CMD: "yum remove -y python-docker-py"})
for pkg in pkgs:
virt_ops.append({con.VIRT_INSTALL: pkg})
@@ -59,3 +62,46 @@ def inject_calipso_installer(tmp_dir, image):
# TODO(trozet): add unit testing for calipso injector
# TODO(trozet): add rest of build for undercloud here as well
+
+
+def update_repos(image, branch):
+ virt_ops = [
+ {con.VIRT_RUN_CMD: "rm -f /etc/yum.repos.d/delorean*"},
+ {con.VIRT_RUN_CMD: "yum-config-manager --add-repo "
+ "https://trunk.rdoproject.org/centos7/{}"
+ "/delorean.repo".format(con.RDO_TAG)},
+ {con.VIRT_RUN_CMD: "yum clean all"},
+ {con.VIRT_INSTALL: "python2-tripleo-repos"},
+ {con.VIRT_RUN_CMD: "tripleo-repos -b {} {} ceph".format(branch,
+ con.RDO_TAG)}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+
+
+def expand_disk(image, desired_size=50):
+ """
+ Expands a disk image to desired_size in GigaBytes
+ :param image: image to resize
+ :param desired_size: desired size in GB
+ :return: None
+ """
+ # there is a lib called vminspect which has some dependencies and is
+ # not yet available in pip. Consider switching to this lib later.
+ try:
+ img_out = json.loads(subprocess.check_output(
+ ['qemu-img', 'info', '--output=json', image],
+ stderr=subprocess.STDOUT).decode())
+ disk_gb_size = int(img_out['virtual-size'] / 1000000000)
+ if disk_gb_size < desired_size:
+ logging.info("Expanding disk image: {}. Current size: {} is less"
+ "than require size: {}".format(image, disk_gb_size,
+ desired_size))
+ diff_size = desired_size - disk_gb_size
+ subprocess.check_call(['qemu-img', 'resize', image,
+ "+{}G".format(diff_size)],
+ stderr=subprocess.STDOUT)
+
+ except (subprocess.CalledProcessError, json.JSONDecodeError, KeyError) \
+ as e:
+ logging.warning("Unable to resize disk, disk may not be large "
+ "enough: {}".format(e))
diff --git a/apex/common/constants.py b/apex/common/constants.py
index 5a2b7f98..59988f74 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -43,17 +43,19 @@ THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services')
DEFAULT_OS_VERSION = 'master'
DEFAULT_ODL_VERSION = 'oxygen'
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'fluorine',
+ 'neon', 'master']
PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
'/puppet-opendaylight'
DEBUG_OVERCLOUD_PW = 'opnfvapex'
NET_ENV_FILE = 'network-environment.yaml'
DEPLOY_TIMEOUT = 120
-UPSTREAM_RDO = 'https://images.rdoproject.org/master/delorean/current' \
- '-tripleo-rdo/'
-OPENSTACK_GERRIT = 'https://review.openstack.org'
+RDO_TAG = 'current-tripleo'
+UPSTREAM_RDO = "https://images.rdoproject.org/master/rdo_trunk/{}/".format(
+ RDO_TAG)
+OPENSTACK_GERRIT = 'https://review.opendev.org'
-DOCKER_TAG = 'current-tripleo-rdo'
+DOCKER_TAG = RDO_TAG
# Maps regular service files to docker versions
# None value means mapping is same as key
VALID_DOCKER_SERVICES = {
@@ -66,8 +68,10 @@ VALID_DOCKER_SERVICES = {
}
DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \
'/tripleomaster/'
+DOCKERHUB_AARCH64 = 'https://registry.hub.docker.com/v2/repositories' \
+ '/armbandapex/'
KUBESPRAY_URL = 'https://github.com/kubernetes-incubator/kubespray.git'
-OPNFV_ARTIFACTS = 'http://artifacts.opnfv.org'
+OPNFV_ARTIFACTS = 'http://storage.googleapis.com/artifacts.opnfv.org'
CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \
'rpm'.format(OPNFV_ARTIFACTS)
diff --git a/apex/common/utils.py b/apex/common/utils.py
index aae821ef..72a66d10 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -310,3 +310,15 @@ def fetch_properties(url):
logging.warning('Unable to fetch properties for: {}'.format(url))
raise exc.FetchException('Unable determine properties location: '
'{}'.format(url))
+
+
+def find_container_client(os_version):
+ """
+ Determines whether to use docker or podman client
+ :param os_version: openstack version
+ :return: client name as string
+ """
+ if os_version == 'rocky' or os_version == 'queens':
+ return 'docker'
+ else:
+ return 'podman'
diff --git a/apex/deploy.py b/apex/deploy.py
index dab6bd1e..d0c2b208 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -44,6 +44,12 @@ from apex.overcloud import deploy as oc_deploy
APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
+UC_DISK_FILES = [
+ 'overcloud-full.vmlinuz',
+ 'overcloud-full.initrd',
+ 'ironic-python-agent.initramfs',
+ 'ironic-python-agent.kernel'
+]
def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -287,12 +293,24 @@ def main():
'requires at least 12GB per controller.')
logging.info('Increasing RAM per controller to 12GB')
elif args.virt_default_ram < 10:
- control_ram = 10
- logging.warning('RAM per controller is too low. nosdn '
- 'requires at least 10GB per controller.')
- logging.info('Increasing RAM per controller to 10GB')
+ if platform.machine() == 'aarch64':
+ control_ram = 16
+ logging.warning('RAM per controller is too low for '
+ 'aarch64 ')
+ logging.info('Increasing RAM per controller to 16GB')
+ else:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
else:
control_ram = args.virt_default_ram
+ if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+ vcpus = 16
+ logging.warning('aarch64 requires at least 16 vCPUS per '
+ 'target VM. Increasing to 16.')
+ else:
+ vcpus = args.virt_cpus
if ha_enabled and args.virt_compute_nodes < 2:
logging.debug(
'HA enabled, bumping number of compute nodes to 2')
@@ -301,7 +319,7 @@ def main():
num_computes=args.virt_compute_nodes,
controller_ram=control_ram * 1024,
compute_ram=compute_ram * 1024,
- vcpus=args.virt_cpus
+ vcpus=vcpus
)
inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
logging.info("Inventory is:\n {}".format(pprint.pformat(
@@ -320,13 +338,14 @@ def main():
utils.run_ansible(ansible_args,
os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_dependencies.yml'))
+ all_in_one = not bool(args.virt_compute_nodes)
if args.snapshot:
# Start snapshot Deployment
logging.info('Executing Snapshot Deployment...')
SnapshotDeployment(deploy_settings=deploy_settings,
snap_cache_dir=args.snap_cache,
fetch=not args.no_fetch,
- all_in_one=not bool(args.virt_compute_nodes))
+ all_in_one=all_in_one)
else:
# Start Standard TripleO Deployment
deployment = ApexDeployment(deploy_settings, args.patches_file,
@@ -377,16 +396,32 @@ def main():
args.image_dir = os.path.join(args.image_dir, os_version)
upstream_url = constants.UPSTREAM_RDO.replace(
constants.DEFAULT_OS_VERSION, os_version)
- upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
+
+ upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+ if platform.machine() == 'aarch64':
+ upstream_targets.append('undercloud.qcow2')
utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
upstream_targets,
fetch=not args.no_fetch)
- sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ # Copy ironic files and overcloud ramdisk and kernel into temp dir
+ # to be copied by ansible into undercloud /home/stack
+ # Note the overcloud disk does not need to be copied here as it will
+ # be modified and copied later
+ for tmp_file in UC_DISK_FILES:
+ shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+ os.path.join(APEX_TEMP_DIR, tmp_file))
+ if platform.machine() == 'aarch64':
+ sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
+ else:
+ sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
# copy undercloud so we don't taint upstream fetch
uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
- uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
+ uc_fetch_img = sdn_image
shutil.copyfile(uc_fetch_img, uc_image)
# prep undercloud with required packages
+ if platform.machine() != 'aarch64':
+ uc_builder.update_repos(image=uc_image,
+ branch=branch.replace('stable/', ''))
uc_builder.add_upstream_packages(uc_image)
uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
# add patches from upstream to undercloud and overcloud
@@ -415,6 +450,21 @@ def main():
for role in 'compute', 'controller':
oc_cfg.create_nic_template(net_settings, deploy_settings, role,
args.deploy_dir, APEX_TEMP_DIR)
+ # Prepare/Upload docker images
+ docker_env = 'containers-prepare-parameter.yaml'
+ shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+ os.path.join(APEX_TEMP_DIR, docker_env))
+ # Upload extra ansible.cfg
+ if platform.machine() == 'aarch64':
+ ansible_env = 'ansible.cfg'
+ shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+ os.path.join(APEX_TEMP_DIR, ansible_env))
+
+ c_builder.prepare_container_images(
+ os.path.join(APEX_TEMP_DIR, docker_env),
+ branch=branch.replace('stable/', ''),
+ neutron_driver=c_builder.get_neutron_driver(ds_opts)
+ )
# Install Undercloud
undercloud.configure(net_settings, deploy_settings,
os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
@@ -443,8 +493,12 @@ def main():
opnfv_env, net_env_target, APEX_TEMP_DIR)
if not args.virtual:
oc_deploy.LOOP_DEVICE_SIZE = "50G"
+ if platform.machine() == 'aarch64':
+ oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ else:
+ oc_image = sdn_image
patched_containers = oc_deploy.prep_image(
- deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
+ deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
@@ -473,6 +527,8 @@ def main():
container_vars['os_version'] = os_version
container_vars['aarch64'] = platform.machine() == 'aarch64'
container_vars['sdn_env_file'] = sdn_env_files
+ container_vars['container_client'] = utils.find_container_client(
+ os_version)
try:
utils.run_ansible(container_vars, docker_playbook,
host=undercloud.ip, user='stack',
@@ -481,6 +537,8 @@ def main():
except Exception:
logging.error("Unable to complete container prep on "
"Undercloud")
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
raise
@@ -513,6 +571,8 @@ def main():
deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
deploy_vars['vim'] = ds_opts['vim']
+ deploy_vars['container_client'] = utils.find_container_client(
+ os_version)
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
@@ -528,6 +588,8 @@ def main():
raise
finally:
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
# Post install
logging.info("Executing post deploy configuration")
@@ -674,6 +736,10 @@ def main():
deploy_vars['l2gw'] = ds_opts.get('l2gw')
deploy_vars['sriov'] = ds_opts.get('sriov')
deploy_vars['tacker'] = ds_opts.get('tacker')
+ deploy_vars['all_in_one'] = all_in_one
+ # TODO(trozet): need to set container client to docker until OOO
+ # migrates OC to podman. Remove this later.
+ deploy_vars['container_client'] = 'docker'
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
diff --git a/apex/network/network_data.py b/apex/network/network_data.py
index 1177af09..6f330c50 100644
--- a/apex/network/network_data.py
+++ b/apex/network/network_data.py
@@ -83,7 +83,7 @@ def create_network_data(ns, target=None):
"{}".format(net))
raise NetworkDataException("cidr is null for network {}".format(
net))
-
+ tmp_net['mtu'] = network.get('mtu', 1500)
network_data.append(copy.deepcopy(tmp_net))
# have to do this due to the aforementioned bug
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
index 0a4d1036..52b4452a 100644
--- a/apex/network/network_environment.py
+++ b/apex/network/network_environment.py
@@ -186,6 +186,8 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ self._update_service_netmap(net_settings.enabled_network_list)
+
def _get_vlan(self, network):
if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
return network['nic_mapping'][CONTROLLER]['vlan']
@@ -218,6 +220,13 @@ class NetworkEnvironment(dict):
prefix = ''
self[reg][key] = self.tht_dir + prefix + postfix
+ def _update_service_netmap(self, network_list):
+ if 'ServiceNetMap' not in self[param_def]:
+ return
+ for service, network in self[param_def]['ServiceNetMap'].items():
+ if network not in network_list:
+ self[param_def]['ServiceNetMap'][service] = 'ctlplane'
+
class NetworkEnvException(Exception):
def __init__(self, value):
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 4deeabff..538f50a4 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -99,6 +99,12 @@ DUPLICATE_COMPUTE_SERVICES = [
'OS::TripleO::Services::ComputeNeutronL3Agent'
]
+NFS_VARS = [
+ 'NovaNfsEnabled',
+ 'GlanceNfsEnabled',
+ 'CinderNfsEnabledBackend'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
"""
@@ -194,8 +200,6 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
sdn_docker_files = get_docker_sdn_files(ds_opts)
for sdn_docker_file in sdn_docker_files:
deploy_options.append(sdn_docker_file)
- if sdn_docker_files:
- deploy_options.append('sdn-images.yaml')
else:
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
@@ -207,6 +211,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+ # TODO(trozet) Fix this check to look for if ceph is in controller services
+ # and not use name of the file
if ds_opts['ceph'] and 'csit' not in env_file:
prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
@@ -247,12 +253,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
if net_data:
cmd += ' --networks-file network_data.yaml'
libvirt_type = 'kvm'
- if virtual:
+ if virtual and (platform.machine() != 'aarch64'):
with open('/sys/module/kvm_intel/parameters/nested') as f:
nested_kvm = f.read().strip()
if nested_kvm != 'Y':
libvirt_type = 'qemu'
+ elif virtual and (platform.machine() == 'aarch64'):
+ libvirt_type = 'qemu'
cmd += ' --libvirt-type {}'.format(libvirt_type)
+ if platform.machine() == 'aarch64':
+ cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -357,22 +367,12 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if dataplane == 'ovs':
- if ds_opts['sfc']:
- oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
- elif sdn == 'opendaylight':
- # FIXME(trozet) remove this after RDO is updated with fix for
- # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
- ovs_file = os.path.basename(con.CUSTOM_OVS)
- ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
- utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
- targets=[ovs_file])
- virt_cmds.extend([
- {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
- ovs_file))},
- {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
- ovs_file)}
- ])
+ # FIXME(trozet) ovs build is failing in CentOS 7.6
+ # if dataplane == 'ovs':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ # https://review.rdoproject.org/r/#/c/13839/
+ # oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
@@ -432,6 +432,29 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
virt_cmds.append(
{con.VIRT_RUN_CMD: "crudini --del {} Unit "
"ConditionPathExists".format(dhcp_unit)})
+ # Prep for NFS
+ virt_cmds.extend([
+ {con.VIRT_INSTALL: "nfs-utils"},
+ {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+ "/etc/systemd/system/multi-user.target.wants/"
+ "nfs-server.service"},
+ {con.VIRT_RUN_CMD: "mkdir -p /glance"},
+ {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
+ {con.VIRT_RUN_CMD: "mkdir -p /nova"},
+ {con.VIRT_RUN_CMD: "chmod 777 /glance"},
+ {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
+ {con.VIRT_RUN_CMD: "chmod 777 /nova"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
+ {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
+ "no_root_squash,no_acl)' > /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "exportfs -avr"},
+ ])
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
return patched_containers
@@ -677,11 +700,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
# Merge compute services into control services if only a single
# node deployment
if num_compute == 0:
- logging.info("All in one deployment. Checking if service merging "
- "required into control services")
with open(tmp_opnfv_env, 'r') as fh:
data = yaml.safe_load(fh)
param_data = data['parameter_defaults']
+ logging.info("All in one deployment detected")
+ logging.info("Disabling NFS in env file")
# Check to see if any parameters are set for Compute
for param in param_data.keys():
if param != 'ComputeServices' and param.startswith('Compute'):
@@ -689,6 +712,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
"in deployment: {}. Please use Controller "
"based parameters when using All-in-one "
"deployments".format(param))
+ if param in NFS_VARS:
+ param_data[param] = False
+ logging.info("Checking if service merging required into "
+ "control services")
if ('ControllerServices' in param_data and 'ComputeServices' in
param_data):
logging.info("Services detected in environment file. Merging...")
@@ -703,11 +730,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
logging.debug("Merged controller services: {}".format(
pprint.pformat(param_data['ControllerServices'])
))
- with open(tmp_opnfv_env, 'w') as fh:
- yaml.safe_dump(data, fh, default_flow_style=False)
else:
logging.info("No services detected in env file, not merging "
"services")
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
with open(tmp_opnfv_env, 'r') as fh:
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index 00e6d6c0..9f8a6f18 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -129,9 +129,6 @@ class DeploySettings(dict):
"Invalid SRIOV interface name: {}".format(
self['deploy_options']['sriov']))
- if self['deploy_options']['odl_version'] == 'oxygen':
- self['deploy_options']['odl_version'] = 'master'
-
if 'performance' in deploy_options:
if not isinstance(deploy_options['performance'], dict):
raise DeploySettingsException("Performance deploy_option"
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
index 68a66fbc..96462d5f 100644
--- a/apex/tests/config/98faaca.diff
+++ b/apex/tests/config/98faaca.diff
@@ -17,7 +17,7 @@ specified in environments/services-docker/update-odl.yaml.
Upgrading ODL to the next major release (1.1->2) requires
only the L2 steps. These are implemented as upgrade_tasks and
-post_upgrade_tasks in https://review.openstack.org/489201.
+post_upgrade_tasks in https://review.opendev.org/489201.
Steps involved in level 2 update are
1. Block OVS instances to connect to ODL
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
index 09bd2545..3ff95bb5 100644
--- a/apex/tests/test_apex_common_builder.py
+++ b/apex/tests/test_apex_common_builder.py
@@ -24,6 +24,8 @@ DOCKER_YAML = {
}
}
+a_mock_open = mock_open(read_data=None)
+
class TestCommonBuilder(unittest.TestCase):
@classmethod
@@ -55,7 +57,8 @@ class TestCommonBuilder(unittest.TestCase):
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_is_patch_promoted_docker(self):
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
@@ -63,13 +66,15 @@ class TestCommonBuilder(unittest.TestCase):
dummy_image = 'centos-binary-opendaylight'
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_patch_not_promoted_docker(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
@@ -77,13 +82,15 @@ class TestCommonBuilder(unittest.TestCase):
dummy_image = 'centos-binary-opendaylight'
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted_and_not_merged(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'BLAH'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
@patch('builtins.open', mock_open())
@patch('apex.builders.common_builder.is_patch_promoted')
@@ -239,7 +246,8 @@ class TestCommonBuilder(unittest.TestCase):
'/dummytmp/dummyrepo.tar')
def test_project_to_docker_image(self):
- found_services = c_builder.project_to_docker_image(project='nova')
+ found_services = c_builder.project_to_docker_image('nova',
+ con.DOCKERHUB_OOO)
assert 'nova-api' in found_services
@patch('apex.common.utils.open_webpage')
@@ -248,4 +256,55 @@ class TestCommonBuilder(unittest.TestCase):
mock_open_web.return_value = b'{"blah": "blah"}'
self.assertRaises(exceptions.ApexCommonBuilderException,
c_builder.project_to_docker_image,
- 'nova')
+ 'nova',
+ con.DOCKERHUB_OOO)
+
+ def test_get_neutron_driver(self):
+ ds_opts = {'dataplane': 'fdio',
+ 'sdn_controller': 'opendaylight',
+ 'odl_version': 'master',
+ 'vpn': False,
+ 'sriov': False}
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'odl')
+ ds_opts['sdn_controller'] = None
+ ds_opts['vpp'] = True
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'vpp')
+ ds_opts['sdn_controller'] = 'ovn'
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'ovn')
+
+ @patch('apex.builders.common_builder.yaml')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_prepare_container_images(self, mock_is_file, mock_yaml):
+ mock_yaml.safe_load.return_value = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'blah',
+ 'neutron_driver': 'null',
+ }
+ }
+ ]
+ }
+ }
+ expected_output = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'docker.io/tripleoqueens',
+ 'neutron_driver': 'odl',
+ }
+ }
+ ]
+ }
+ }
+
+ c_builder.prepare_container_images('dummy.yaml', 'queens',
+ 'odl')
+ mock_yaml.safe_dump.assert_called_with(
+ expected_output,
+ a_mock_open.return_value,
+ default_flow_style=False)
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 4c250117..1ecb7df6 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -84,7 +84,7 @@ class TestCommonUtils:
def test_fetch_upstream_previous_file(self):
test_file = 'overcloud-full.tar.md5'
- url = 'https://images.rdoproject.org/master/delorean/' \
+ url = 'https://images.rdoproject.org/master/rdo_trunk/' \
'current-tripleo/stable/'
os.makedirs('/tmp/fetch_test', exist_ok=True)
open("/tmp/fetch_test/{}".format(test_file), 'w').close()
@@ -155,3 +155,8 @@ class TestCommonUtils:
def test_unique(self):
dummy_list = [1, 2, 1, 3, 4, 5, 5]
assert_equal(utils.unique(dummy_list), [1, 2, 3, 4, 5])
+
+ def test_find_container_client(self):
+ for version in 'rocky', 'queens':
+ assert_equal(utils.find_container_client(version), 'docker')
+ assert_equal(utils.find_container_client('master'), 'podman')
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index be52c276..004c21c1 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -118,6 +118,7 @@ class TestDeploy(unittest.TestCase):
args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -146,7 +147,7 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_shutil, mock_network_data,
- mock_uc_builder, mock_deployment):
+ mock_uc_builder, mock_deployment, mock_c_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
@@ -197,6 +198,7 @@ class TestDeploy(unittest.TestCase):
main()
mock_snap_deployment.assert_called()
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -225,7 +227,7 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_shutil, mock_network_data,
- mock_uc_builder, mock_deployment):
+ mock_uc_builder, mock_deployment, mock_c_builder):
# didn't work yet line 412
# net_sets_dict = {'networks': {'admin': {'cidr': MagicMock()}},
# 'dns_servers': 'test'}
@@ -329,6 +331,7 @@ class TestDeploy(unittest.TestCase):
# TODO(trozet) add assertions here with arguments for functions in
# deploy main
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -358,7 +361,8 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_git, mock_shutil,
- mock_network_data, mock_uc_builder, mock_deployment):
+ mock_network_data, mock_uc_builder, mock_deployment,
+ mock_c_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
index 79a72a55..7aa6ef15 100644
--- a/apex/tests/test_apex_network_environment.py
+++ b/apex/tests/test_apex_network_environment.py
@@ -165,3 +165,10 @@ class TestNetworkEnvironment:
e = NetworkEnvException("test")
print(e)
assert_is_instance(e, NetworkEnvException)
+
+ def test_service_netmap(self):
+ ns = copy(self.ns)
+ ns.enabled_network_list = ['admin']
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ for network in ne['parameter_defaults']['ServiceNetMap'].values():
+ assert_equal(network, 'ctlplane')
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index a70057b9..79dbf54b 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -156,7 +156,6 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_in('--control-scale 3', result_cmd)
assert_in('--compute-scale 2', result_cmd)
assert_in('docker-images.yaml', result_cmd)
- assert_in('sdn-images.yaml', result_cmd)
assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
'/docker.yaml', result_cmd)
assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
@@ -234,6 +233,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@@ -241,7 +241,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl(self, mock_is_file, mock_shutil,
- mock_virt_utils, mock_inject_odl, mock_fetch):
+ mock_virt_utils, mock_inject_odl,
+ mock_fetch, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -259,6 +260,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.c_builder')
@patch('apex.overcloud.deploy.oc_builder')
@@ -340,12 +342,13 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_oc_builder.inject_opendaylight.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_ovn(self, mock_is_file, mock_shutil,
- mock_virt_utils):
+ mock_virt_utils, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'vpn': False,
@@ -358,7 +361,9 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ # mock_ovs_nsh.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_quagga')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -368,7 +373,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl_vpn(self, mock_is_file, mock_shutil,
mock_virt_utils, mock_inject_odl,
- mock_inject_quagga, mock_fetch):
+ mock_inject_quagga, mock_fetch,
+ mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -387,6 +393,7 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
mock_inject_quagga.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -414,7 +421,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
- mock_inject_ovs_nsh.assert_called()
+ # mock_inject_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.os.path.isfile')
def test_prep_image_no_image(self, mock_isfile):
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
index 5c33bf03..14586528 100644
--- a/apex/tests/test_apex_undercloud.py
+++ b/apex/tests/test_apex_undercloud.py
@@ -10,6 +10,7 @@
import ipaddress
import libvirt
import os
+import platform
import subprocess
import unittest
@@ -239,13 +240,16 @@ class TestUndercloud(unittest.TestCase):
assert_raises(ApexUndercloudException,
uc.configure, ns, ds, 'playbook', '/tmp/dir')
+ @patch('apex.undercloud.undercloud.virt_utils')
+ @patch('apex.undercloud.undercloud.uc_builder')
@patch('apex.undercloud.undercloud.os.remove')
@patch('apex.undercloud.undercloud.os.path')
@patch('apex.undercloud.undercloud.shutil')
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
def test_setup_vols(self, mock_get_vm, mock_create,
- mock_shutil, mock_os_path, mock_os_remove):
+ mock_shutil, mock_os_path, mock_os_remove,
+ mock_uc_builder, mock_virt_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
mock_os_path.isfile.return_value = True
mock_os_path.exists.return_value = True
@@ -255,6 +259,9 @@ class TestUndercloud(unittest.TestCase):
src_img = os.path.join(uc.image_path, img_file)
dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
mock_shutil.copyfile.assert_called_with(src_img, dest_img)
+ if platform.machine() != 'aarch64':
+ mock_uc_builder.expand_disk.assert_called()
+ mock_virt_utils.virt_customize.assert_called()
@patch('apex.undercloud.undercloud.os.path')
@patch.object(Undercloud, '_get_vm', return_value=None)
@@ -276,13 +283,21 @@ class TestUndercloud(unittest.TestCase):
{'--upload':
'/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'},
{'--run-command': 'chmod 600 /root/.ssh/authorized_keys'},
- {'--run-command': 'restorecon /root/.ssh/authorized_keys'},
+ {'--run-command': 'restorecon '
+ '-R -v /root/.ssh'},
+ {'--run-command': 'id -u stack || useradd -m stack'},
+ {'--run-command': 'mkdir -p /home/stack/.ssh'},
+ {'--run-command': 'chown stack:stack /home/stack/.ssh'},
{'--run-command':
'cp /root/.ssh/authorized_keys /home/stack/.ssh/'},
{'--run-command':
'chown stack:stack /home/stack/.ssh/authorized_keys'},
{'--run-command':
- 'chmod 600 /home/stack/.ssh/authorized_keys'}]
+ 'chmod 600 /home/stack/.ssh/authorized_keys'},
+ {'--run-command':
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> '
+ '/etc/sudoers'},
+ {'--run-command': 'touch /etc/cloud/cloud-init.disabled'}]
mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
@patch.object(Undercloud, '_get_vm', return_value=None)
@@ -293,6 +308,7 @@ class TestUndercloud(unittest.TestCase):
ns_dict = {
'apex': MagicMock(),
'dns-domain': 'dns',
+ 'ntp': 'pool.ntp.org',
'networks': {'admin':
{'cidr': ipaddress.ip_network('192.0.2.0/24'),
'installer_vm': {'ip': '192.0.2.1',
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 56087695..5ee487c2 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -15,6 +15,7 @@ import shutil
import subprocess
import time
+from apex.builders import undercloud_builder as uc_builder
from apex.virtual import utils as virt_utils
from apex.virtual import configure_vm as vm_lib
from apex.common import constants
@@ -63,7 +64,7 @@ class Undercloud:
if self.external_net:
networks.append('external')
console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
- root = 'vda' if platform.machine() == 'aarch64' else 'sda'
+ root = 'vda2' if platform.machine() == 'aarch64' else 'sda'
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
@@ -72,7 +73,8 @@ class Undercloud:
kernel_args=['console={}'.format(console),
'root=/dev/{}'.format(root)],
default_network=True,
- template_dir=self.template_path)
+ template_dir=self.template_path,
+ memory=10240)
self.setup_volumes()
self.inject_auth()
@@ -110,7 +112,7 @@ class Undercloud:
# give 10 seconds to come up
time.sleep(10)
# set IP
- for x in range(5):
+ for x in range(10):
if self._set_ip():
logging.info("Undercloud started. IP Address: {}".format(
self.ip))
@@ -153,6 +155,8 @@ class Undercloud:
ansible_vars['apex_temp_dir'] = apex_temp_dir
ansible_vars['nat'] = self.detect_nat(net_settings)
+ ansible_vars['container_client'] = utils.find_container_client(
+ self.os_version)
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -180,11 +184,19 @@ class Undercloud:
if os.path.exists(dest_img):
os.remove(dest_img)
shutil.copyfile(src_img, dest_img)
+ if img_file == self.image_name and platform.machine() != 'aarch64':
+ uc_builder.expand_disk(dest_img)
+ self.expand_root_fs()
+
shutil.chown(dest_img, user='qemu', group='qemu')
os.chmod(dest_img, 0o0744)
- # TODO(trozet):check if resize needed right now size is 50gb
+
+ def expand_root_fs(self):
# there is a lib called vminspect which has some dependencies and is
# not yet available in pip. Consider switching to this lib later.
+ logging.debug("Expanding root filesystem on /dev/sda partition")
+ virt_ops = [{constants.VIRT_RUN_CMD: 'xfs_growfs /dev/sda'}]
+ virt_utils.virt_customize(virt_ops, self.volume)
def inject_auth(self):
virt_ops = list()
@@ -199,10 +211,15 @@ class Undercloud:
'/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'})
run_cmds = [
'chmod 600 /root/.ssh/authorized_keys',
- 'restorecon /root/.ssh/authorized_keys',
+ 'restorecon -R -v /root/.ssh',
+ 'id -u stack || useradd -m stack',
+ 'mkdir -p /home/stack/.ssh',
+ 'chown stack:stack /home/stack/.ssh',
'cp /root/.ssh/authorized_keys /home/stack/.ssh/',
'chown stack:stack /home/stack/.ssh/authorized_keys',
- 'chmod 600 /home/stack/.ssh/authorized_keys'
+ 'chmod 600 /home/stack/.ssh/authorized_keys',
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers',
+ 'touch /etc/cloud/cloud-init.disabled'
]
for cmd in run_cmds:
virt_ops.append({constants.VIRT_RUN_CMD: cmd})
@@ -234,12 +251,19 @@ class Undercloud:
"undercloud_hostname undercloud.{}".format(ns['dns-domain']),
"local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
str(ns_admin['cidr']).split('/')[1]),
- "network_gateway {}".format(str(ns_admin['installer_vm']['ip'])),
- "network_cidr {}".format(str(ns_admin['cidr'])),
+ "generate_service_certificate false",
+ "undercloud_ntp_servers {}".format(str(ns['ntp'][0])),
+ "container_images_file "
+ "/home/stack/containers-prepare-parameter.yaml",
+ "undercloud_enable_selinux false"
+ ]
+
+ config['undercloud_network_config'] = [
+ "gateway {}".format(str(ns_admin['installer_vm']['ip'])),
+ "cidr {}".format(str(ns_admin['cidr'])),
"dhcp_start {}".format(str(ns_admin['dhcp_range'][0])),
"dhcp_end {}".format(str(ns_admin['dhcp_range'][1])),
"inspection_iprange {}".format(','.join(intro_range)),
- "generate_service_certificate false"
]
config['ironic_config'] = [
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
index ba0398bb..9d47bf03 100755
--- a/apex/virtual/configure_vm.py
+++ b/apex/virtual/configure_vm.py
@@ -102,6 +102,10 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
source_template = f.read()
imagefile = os.path.realpath(image)
+
+ if arch == 'aarch64' and diskbus == 'sata':
+ diskbus = 'virtio'
+
memory = int(memory) * 1024
params = {
'name': name,
@@ -118,9 +122,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
'user_interface': '',
}
- # assign virtio as default for aarch64
- if arch == 'aarch64' and diskbus == 'sata':
- diskbus = 'virtio'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
@@ -171,7 +172,7 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
"""
params['user_interface'] = """
<controller type='virtio-serial' index='0'>
- <address type='virtio-mmio'/>
+ <address type='pci'/>
</controller>
<serial type='pty'>
<target port='0'/>