summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--apex/build_utils.py13
-rw-r--r--apex/builders/common_builder.py115
-rw-r--r--apex/builders/overcloud_builder.py5
-rw-r--r--apex/builders/undercloud_builder.py52
-rw-r--r--apex/common/constants.py23
-rw-r--r--apex/common/utils.py12
-rw-r--r--apex/deploy.py86
-rw-r--r--apex/network/network_data.py2
-rw-r--r--apex/network/network_environment.py9
-rw-r--r--apex/overcloud/deploy.py75
-rw-r--r--apex/settings/deploy_settings.py3
-rw-r--r--apex/tests/config/98faaca.diff2
-rw-r--r--apex/tests/test_apex_build_utils.py6
-rw-r--r--apex/tests/test_apex_common_builder.py69
-rw-r--r--apex/tests/test_apex_common_utils.py7
-rw-r--r--apex/tests/test_apex_deploy.py10
-rw-r--r--apex/tests/test_apex_network_environment.py7
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py17
-rw-r--r--apex/tests/test_apex_undercloud.py18
-rw-r--r--apex/undercloud/undercloud.py32
-rwxr-xr-xapex/virtual/configure_vm.py9
-rw-r--r--build/ansible.cfg11
-rw-r--r--build/containers-prepare-parameter.yaml26
-rw-r--r--build/csit-environment.yaml23
-rw-r--r--build/csit-queens-environment.yaml23
-rw-r--r--build/csit-rocky-environment.yaml116
-rw-r--r--build/network-environment.yaml22
-rw-r--r--build/nics-template.yaml.jinja28
-rw-r--r--build/patches/neutron-patch-NSDriver.patch2
-rw-r--r--build/rpm_specs/opnfv-apex.spec23
-rwxr-xr-xci/util.sh2
-rw-r--r--config/deploy/common-patches.yaml23
-rw-r--r--config/deploy/os-nosdn-calipso_rocky-noha.yaml (renamed from config/deploy/os-nosdn-calipso_queens-noha.yaml)2
-rw-r--r--config/deploy/os-nosdn-rocky-ha.yaml (renamed from config/deploy/os-nosdn-queens-ha.yaml)2
-rw-r--r--config/deploy/os-nosdn-rocky-noha.yaml (renamed from config/deploy/os-nosdn-queens-noha.yaml)2
-rw-r--r--config/deploy/os-odl-bgpvpn_rocky-ha.yaml (renamed from config/deploy/os-odl-bgpvpn_queens-ha.yaml)2
-rw-r--r--config/deploy/os-odl-bgpvpn_rocky-noha.yaml (renamed from config/deploy/os-odl-bgpvpn_queens-noha.yaml)2
-rw-r--r--config/deploy/os-odl-rocky-ha.yaml13
-rw-r--r--config/deploy/os-odl-rocky-noha.yaml13
-rw-r--r--config/deploy/os-odl-sfc_rocky-ha.yaml (renamed from config/deploy/os-odl-sfc_queens-ha.yaml)2
-rw-r--r--config/deploy/os-odl-sfc_rocky-noha.yaml (renamed from config/deploy/os-odl-sfc_queens-noha.yaml)2
-rw-r--r--config/deploy/os-ovn-nofeature-ha.yaml15
-rw-r--r--config/deploy/os-ovn-nofeature-noha.yaml10
-rw-r--r--config/deploy/os-ovn-rocky-ha.yaml17
-rw-r--r--contrib/aarch64/overcloud-full-rootfs.yaml54
-rw-r--r--contrib/aarch64/undercloud-full.yaml87
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/contributor/APEX-on-aarch64.rst146
-rw-r--r--docs/index.rst23
-rw-r--r--docs/release/installation/abstract.rst6
-rw-r--r--docs/release/installation/architecture.rst17
-rw-r--r--docs/release/installation/baremetal.rst42
-rw-r--r--docs/release/installation/index.rst2
-rw-r--r--docs/release/installation/introduction.rst20
-rw-r--r--docs/release/installation/references.rst2
-rw-r--r--docs/release/installation/requirements.rst4
-rw-r--r--docs/release/installation/upstream.rst40
-rw-r--r--docs/release/release-notes/release-notes.rst51
-rw-r--r--docs/release/scenarios/k8s-nosdn-nofeature-noha/k8s-nosdn-nofeature-noha.rst6
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst8
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst8
-rw-r--r--docs/release/scenarios/os-nosdn-performance-ha/index.rst15
-rw-r--r--docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst54
-rw-r--r--docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst14
-rw-r--r--docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst14
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-ha/index.rst (renamed from docs/release/scenarios/os-ovn-nofeature-noha/index.rst)4
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst (renamed from docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst)11
-rw-r--r--docs/requirements.txt2
-rw-r--r--lib/ansible/playbooks/configure_undercloud.yml58
-rw-r--r--lib/ansible/playbooks/deploy_overcloud.yml45
-rw-r--r--lib/ansible/playbooks/patch_containers.yml13
-rw-r--r--lib/ansible/playbooks/post_deploy_overcloud.yml17
-rw-r--r--lib/ansible/playbooks/post_deploy_undercloud.yml11
-rw-r--r--lib/ansible/playbooks/prepare_overcloud_containers.yml64
-rw-r--r--lib/ansible/playbooks/undercloud_aarch64.yml38
-rw-r--r--setup.cfg2
-rw-r--r--tox.ini12
79 files changed, 1385 insertions, 445 deletions
diff --git a/.gitignore b/.gitignore
index 7bb5fbcb..6bc2461f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,5 @@
*~
*.pyc
-/docs_build/
/docs_output/
/releng/
venv/
@@ -10,3 +9,5 @@ nosetests.xml
.*
*.log
*.retry
+.tox
+docs/_build/*
diff --git a/apex/build_utils.py b/apex/build_utils.py
index 78467875..7457e561 100644
--- a/apex/build_utils.py
+++ b/apex/build_utils.py
@@ -111,6 +111,19 @@ def strip_patch_sections(patch, sections=['releasenotes', 'tests']):
return '\n'.join(tmp_patch)
+def is_path_in_patch(patch, path):
+ """
+ Checks if a particular path is modified in a patch diff
+ :param patch: patch diff
+ :param path: path to check for in diff
+ :return: Boolean
+ """
+ for line in patch.split("\n"):
+ if re.match('^diff.*{}'.format(path), line):
+ return True
+ return False
+
+
def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
logging.info("Fetching patch for change id {}".format(change_id))
change = get_change(url, repo, branch, change_id)
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index a5f301b8..59af94cd 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -14,8 +14,11 @@ import git
import json
import logging
import os
+import platform
+import pprint
import re
import urllib.parse
+import yaml
import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
@@ -25,10 +28,11 @@ from apex.common import utils
from apex.virtual import utils as virt_utils
-def project_to_path(project):
+def project_to_path(project, patch=None):
"""
Translates project to absolute file path to use in patching
:param project: name of project
+ :param patch: the patch to applied to the project
:return: File path
"""
if project.startswith('openstack/'):
@@ -37,6 +41,15 @@ def project_to_path(project):
return "/etc/puppet/modules/{}".format(project.replace('puppet-', ''))
elif 'tripleo-heat-templates' in project:
return "/usr/share/openstack-tripleo-heat-templates"
+ elif ('tripleo-common' in project and
+ build_utils.is_path_in_patch(patch, 'container-images/')):
+ # tripleo-common has python and another component to it
+ # here we detect if there is a change to the yaml component and if so
+ # treat it like it is not python. This has the caveat of if there
+ # is a patch to both python and yaml this will not work
+ # FIXME(trozet): add ability to split tripleo-common patches that
+ # modify both python and yaml
+ return "/usr/share/openstack-tripleo-common-containers/"
else:
# assume python. python patches will apply to a project name subdir.
# For example, python-tripleoclient patch will apply to the
@@ -46,17 +59,19 @@ def project_to_path(project):
return "/usr/lib/python2.7/site-packages/"
-def project_to_docker_image(project):
+def project_to_docker_image(project, docker_url):
"""
Translates OpenStack project to OOO services that are containerized
- :param project: name of OpenStack project
+ :param project: short name of OpenStack project
:return: List of OOO docker service names
"""
# Fetch all docker containers in docker hub with tripleo and filter
# based on project
-
+ logging.info("Checking for docker images matching project: {}".format(
+ project))
hub_output = utils.open_webpage(
- urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10)
+ urllib.parse.urljoin(docker_url,
+ '?page_size=1024'), timeout=10)
try:
results = json.loads(hub_output.decode())['results']
except Exception as e:
@@ -71,12 +86,14 @@ def project_to_docker_image(project):
for result in results:
if result['name'].startswith("centos-binary-{}".format(project)):
# add as docker image shortname (just service name)
+ logging.debug("Adding docker image {} for project {} for "
+ "patching".format(result['name'], project))
docker_images.append(result['name'].replace('centos-binary-', ''))
return docker_images
-def is_patch_promoted(change, branch, docker_image=None):
+def is_patch_promoted(change, branch, docker_url, docker_image=None):
"""
Checks to see if a patch that is in merged exists in either the docker
container or the promoted tripleo images
@@ -109,8 +126,8 @@ def is_patch_promoted(change, branch, docker_image=None):
return True
else:
# must be a docker patch, check docker tag modified time
- docker_url = con.DOCKERHUB_OOO.replace('tripleomaster',
- "tripleo{}".format(branch))
+ docker_url = docker_url.replace('tripleomaster',
+ "tripleo{}".format(branch))
url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
docker_url = urllib.parse.urljoin(docker_url, url_path)
logging.debug("docker url is: {}".format(docker_url))
@@ -157,16 +174,29 @@ def add_upstream_patches(patches, image, tmp_dir,
branch = default_branch
patch_diff = build_utils.get_patch(patch['change-id'],
patch['project'], branch)
- project_path = project_to_path(patch['project'])
+ project_path = project_to_path(patch['project'], patch_diff)
# If docker tag and python we know this patch belongs on docker
# container for a docker service. Therefore we build the dockerfile
# and move the patch into the containers directory. We also assume
# this builder call is for overcloud, because we do not support
# undercloud containers
+ if platform.machine() == 'aarch64':
+ docker_url = con.DOCKERHUB_AARCH64
+ else:
+ docker_url = con.DOCKERHUB_OOO
if docker_tag and 'python' in project_path:
# Projects map to multiple THT services, need to check which
# are supported
- ooo_docker_services = project_to_docker_image(patch['project'])
+ project_short_name = os.path.basename(patch['project'])
+ ooo_docker_services = project_to_docker_image(project_short_name,
+ docker_url)
+ if not ooo_docker_services:
+ logging.error("Did not find any matching docker containers "
+ "for project: {}".format(project_short_name))
+ raise exc.ApexCommonBuilderException(
+ 'Unable to find docker services for python project in '
+ 'patch')
+ # Just use the first image to see if patch was promoted into it
docker_img = ooo_docker_services[0]
else:
ooo_docker_services = []
@@ -176,28 +206,43 @@ def add_upstream_patches(patches, image, tmp_dir,
patch['change-id'])
patch_promoted = is_patch_promoted(change,
branch.replace('stable/', ''),
+ docker_url,
docker_img)
if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
+ patch_file_paths = []
# If we found services, then we treat the patch like it applies to
# docker only
if ooo_docker_services:
os_version = default_branch.replace('stable/', '')
for service in ooo_docker_services:
docker_services = docker_services.union({service})
+ # We need to go root to be able to install patch and then
+ # switch back to previous user. Some containers that
+ # have the same name as the project do not necessarily
+ # contain the project code. For example
+ # novajoin-notifier does not contain nova package code.
+ # Therefore we must try to patch and unfortunately
+ # ignore failures until we have a better way of checking
+ # this
docker_cmds = [
"WORKDIR {}".format(project_path),
+ "USER root",
+ "ARG REAL_USER",
+ "RUN yum -y install patch",
"ADD {} {}".format(patch_file, project_path),
- "RUN patch -p1 < {}".format(patch_file)
+ "RUN patch -p1 < {} || echo "
+ "'Patching failed'".format(patch_file),
+ "USER $REAL_USER"
]
src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, service,
docker_tag)
oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
src_img_uri)
- patch_file_path = os.path.join(tmp_dir, 'containers',
- patch_file)
+ patch_file_paths.append(os.path.join(
+ tmp_dir, "containers/{}".format(service), patch_file))
else:
patch_file_path = os.path.join(tmp_dir, patch_file)
virt_ops.extend([
@@ -207,8 +252,10 @@ def add_upstream_patches(patches, image, tmp_dir,
project_path, patch_file)}])
logging.info("Adding patch {} to {}".format(patch_file,
image))
- with open(patch_file_path, 'w') as fh:
- fh.write(patch_diff)
+ patch_file_paths.append(patch_file_path)
+ for patch_fp in patch_file_paths:
+ with open(patch_fp, 'w') as fh:
+ fh.write(patch_diff)
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
@@ -248,3 +295,41 @@ def create_git_archive(repo_url, repo_name, tmp_dir,
repo.archive(fh, prefix=prefix)
logging.debug("Wrote archive file: {}".format(archive_path))
return archive_path
+
+
+def get_neutron_driver(ds_opts):
+ sdn = ds_opts.get('sdn_controller', None)
+
+ if sdn == 'opendaylight':
+ return 'odl'
+ elif sdn == 'ovn':
+ return sdn
+ elif ds_opts.get('vpp', False):
+ return 'vpp'
+ else:
+ return None
+
+
+def prepare_container_images(prep_file, branch='master', neutron_driver=None):
+ if not os.path.isfile(prep_file):
+ raise exc.ApexCommonBuilderException("Prep file does not exist: "
+ "{}".format(prep_file))
+ with open(prep_file) as fh:
+ data = yaml.safe_load(fh)
+ try:
+ p_set = data['parameter_defaults']['ContainerImagePrepare'][0]['set']
+ if neutron_driver:
+ p_set['neutron_driver'] = neutron_driver
+ p_set['namespace'] = "docker.io/tripleo{}".format(branch)
+ if platform.machine() == 'aarch64':
+ p_set['namespace'] = "docker.io/armbandapex"
+ p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
+
+ except KeyError:
+ logging.error("Invalid prep file format: {}".format(prep_file))
+ raise exc.ApexCommonBuilderException("Invalid format for prep file")
+
+ logging.debug("Writing new container prep file:\n{}".format(
+ pprint.pformat(data)))
+ with open(prep_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index a74ec252..eab8fb66 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -25,7 +25,12 @@ def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
assert odl_version in con.VALID_ODL_VERSIONS
# add repo
if odl_version == 'master':
+ # last version in the constants is "master" so select 2nd to last
+ # odl package version has no "master" version
odl_pkg_version = con.VALID_ODL_VERSIONS[-2]
+ # branch will be used to pull puppet-opendaylight. Since puppet-odl
+ # does not pull branch until later, we need to use master version of
+ # that if master ODL version is specified
branch = odl_version
else:
odl_pkg_version = odl_version
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index 4efd00d5..47d2568d 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -9,7 +9,9 @@
# Used to modify undercloud qcow2 image
import logging
+import json
import os
+import subprocess
from apex.common import constants as con
from apex.common import utils
@@ -26,16 +28,17 @@ def add_upstream_packages(image):
pkgs = [
'epel-release',
'openstack-utils',
- 'ceph-common',
'python2-networking-sfc',
'openstack-ironic-inspector',
'subunit-filters',
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
- 'ceph-ansible',
- 'python-tripleoclient'
+ 'python-tripleoclient',
+ 'openstack-tripleo-heat-templates'
]
+ # Remove incompatible python-docker version
+ virt_ops.append({con.VIRT_RUN_CMD: "yum remove -y python-docker-py"})
for pkg in pkgs:
virt_ops.append({con.VIRT_INSTALL: pkg})
@@ -59,3 +62,46 @@ def inject_calipso_installer(tmp_dir, image):
# TODO(trozet): add unit testing for calipso injector
# TODO(trozet): add rest of build for undercloud here as well
+
+
+def update_repos(image, branch):
+ virt_ops = [
+ {con.VIRT_RUN_CMD: "rm -f /etc/yum.repos.d/delorean*"},
+ {con.VIRT_RUN_CMD: "yum-config-manager --add-repo "
+ "https://trunk.rdoproject.org/centos7/{}"
+ "/delorean.repo".format(con.RDO_TAG)},
+ {con.VIRT_RUN_CMD: "yum clean all"},
+ {con.VIRT_INSTALL: "python2-tripleo-repos"},
+ {con.VIRT_RUN_CMD: "tripleo-repos -b {} {} ceph".format(branch,
+ con.RDO_TAG)}
+ ]
+ virt_utils.virt_customize(virt_ops, image)
+
+
+def expand_disk(image, desired_size=50):
+ """
+ Expands a disk image to desired_size in GigaBytes
+ :param image: image to resize
+ :param desired_size: desired size in GB
+ :return: None
+ """
+ # there is a lib called vminspect which has some dependencies and is
+ # not yet available in pip. Consider switching to this lib later.
+ try:
+ img_out = json.loads(subprocess.check_output(
+ ['qemu-img', 'info', '--output=json', image],
+ stderr=subprocess.STDOUT).decode())
+ disk_gb_size = int(img_out['virtual-size'] / 1000000000)
+ if disk_gb_size < desired_size:
+ logging.info("Expanding disk image: {}. Current size: {} is less"
+ "than require size: {}".format(image, disk_gb_size,
+ desired_size))
+ diff_size = desired_size - disk_gb_size
+ subprocess.check_call(['qemu-img', 'resize', image,
+ "+{}G".format(diff_size)],
+ stderr=subprocess.STDOUT)
+
+ except (subprocess.CalledProcessError, json.JSONDecodeError, KeyError) \
+ as e:
+ logging.warning("Unable to resize disk, disk may not be large "
+ "enough: {}".format(e))
diff --git a/apex/common/constants.py b/apex/common/constants.py
index 0475615a..59988f74 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -39,24 +39,23 @@ VIRT_PW = '--root-password'
THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
-THT_DOCKER_ENV_DIR = {
- 'master': os.path.join(THT_ENV_DIR, 'services'),
- 'queens': os.path.join(THT_ENV_DIR, 'services')
-}
+THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services')
DEFAULT_OS_VERSION = 'master'
DEFAULT_ODL_VERSION = 'oxygen'
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'fluorine',
+ 'neon', 'master']
PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
'/puppet-opendaylight'
DEBUG_OVERCLOUD_PW = 'opnfvapex'
NET_ENV_FILE = 'network-environment.yaml'
DEPLOY_TIMEOUT = 120
-UPSTREAM_RDO = 'https://images.rdoproject.org/master/delorean/current' \
- '-tripleo-rdo/'
-OPENSTACK_GERRIT = 'https://review.openstack.org'
+RDO_TAG = 'current-tripleo'
+UPSTREAM_RDO = "https://images.rdoproject.org/master/rdo_trunk/{}/".format(
+ RDO_TAG)
+OPENSTACK_GERRIT = 'https://review.opendev.org'
-DOCKER_TAG = 'current-tripleo-rdo'
+DOCKER_TAG = RDO_TAG
# Maps regular service files to docker versions
# None value means mapping is same as key
VALID_DOCKER_SERVICES = {
@@ -65,12 +64,14 @@ VALID_DOCKER_SERVICES = {
'neutron-opendaylight-sriov.yaml': None,
'neutron-bgpvpn-opendaylight.yaml': None,
'neutron-sfc-opendaylight.yaml': None,
- 'neutron-ml2-ovn.yaml': 'neutron-ovn.yaml'
+ 'neutron-ml2-ovn.yaml': 'neutron-ovn-ha.yaml'
}
DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \
'/tripleomaster/'
+DOCKERHUB_AARCH64 = 'https://registry.hub.docker.com/v2/repositories' \
+ '/armbandapex/'
KUBESPRAY_URL = 'https://github.com/kubernetes-incubator/kubespray.git'
-OPNFV_ARTIFACTS = 'http://artifacts.opnfv.org'
+OPNFV_ARTIFACTS = 'http://storage.googleapis.com/artifacts.opnfv.org'
CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \
'rpm'.format(OPNFV_ARTIFACTS)
diff --git a/apex/common/utils.py b/apex/common/utils.py
index aae821ef..72a66d10 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -310,3 +310,15 @@ def fetch_properties(url):
logging.warning('Unable to fetch properties for: {}'.format(url))
raise exc.FetchException('Unable determine properties location: '
'{}'.format(url))
+
+
+def find_container_client(os_version):
+ """
+ Determines whether to use docker or podman client
+ :param os_version: openstack version
+ :return: client name as string
+ """
+ if os_version == 'rocky' or os_version == 'queens':
+ return 'docker'
+ else:
+ return 'podman'
diff --git a/apex/deploy.py b/apex/deploy.py
index dab6bd1e..d0c2b208 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -44,6 +44,12 @@ from apex.overcloud import deploy as oc_deploy
APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
+UC_DISK_FILES = [
+ 'overcloud-full.vmlinuz',
+ 'overcloud-full.initrd',
+ 'ironic-python-agent.initramfs',
+ 'ironic-python-agent.kernel'
+]
def validate_cross_settings(deploy_settings, net_settings, inventory):
@@ -287,12 +293,24 @@ def main():
'requires at least 12GB per controller.')
logging.info('Increasing RAM per controller to 12GB')
elif args.virt_default_ram < 10:
- control_ram = 10
- logging.warning('RAM per controller is too low. nosdn '
- 'requires at least 10GB per controller.')
- logging.info('Increasing RAM per controller to 10GB')
+ if platform.machine() == 'aarch64':
+ control_ram = 16
+ logging.warning('RAM per controller is too low for '
+ 'aarch64 ')
+ logging.info('Increasing RAM per controller to 16GB')
+ else:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
else:
control_ram = args.virt_default_ram
+ if platform.machine() == 'aarch64' and args.virt_cpus < 16:
+ vcpus = 16
+ logging.warning('aarch64 requires at least 16 vCPUS per '
+ 'target VM. Increasing to 16.')
+ else:
+ vcpus = args.virt_cpus
if ha_enabled and args.virt_compute_nodes < 2:
logging.debug(
'HA enabled, bumping number of compute nodes to 2')
@@ -301,7 +319,7 @@ def main():
num_computes=args.virt_compute_nodes,
controller_ram=control_ram * 1024,
compute_ram=compute_ram * 1024,
- vcpus=args.virt_cpus
+ vcpus=vcpus
)
inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
logging.info("Inventory is:\n {}".format(pprint.pformat(
@@ -320,13 +338,14 @@ def main():
utils.run_ansible(ansible_args,
os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_dependencies.yml'))
+ all_in_one = not bool(args.virt_compute_nodes)
if args.snapshot:
# Start snapshot Deployment
logging.info('Executing Snapshot Deployment...')
SnapshotDeployment(deploy_settings=deploy_settings,
snap_cache_dir=args.snap_cache,
fetch=not args.no_fetch,
- all_in_one=not bool(args.virt_compute_nodes))
+ all_in_one=all_in_one)
else:
# Start Standard TripleO Deployment
deployment = ApexDeployment(deploy_settings, args.patches_file,
@@ -377,16 +396,32 @@ def main():
args.image_dir = os.path.join(args.image_dir, os_version)
upstream_url = constants.UPSTREAM_RDO.replace(
constants.DEFAULT_OS_VERSION, os_version)
- upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
+
+ upstream_targets = ['overcloud-full.tar', 'ironic-python-agent.tar']
+ if platform.machine() == 'aarch64':
+ upstream_targets.append('undercloud.qcow2')
utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
upstream_targets,
fetch=not args.no_fetch)
- sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ # Copy ironic files and overcloud ramdisk and kernel into temp dir
+ # to be copied by ansible into undercloud /home/stack
+ # Note the overcloud disk does not need to be copied here as it will
+ # be modified and copied later
+ for tmp_file in UC_DISK_FILES:
+ shutil.copyfile(os.path.join(args.image_dir, tmp_file),
+ os.path.join(APEX_TEMP_DIR, tmp_file))
+ if platform.machine() == 'aarch64':
+ sdn_image = os.path.join(args.image_dir, 'undercloud.qcow2')
+ else:
+ sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
# copy undercloud so we don't taint upstream fetch
uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
- uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
+ uc_fetch_img = sdn_image
shutil.copyfile(uc_fetch_img, uc_image)
# prep undercloud with required packages
+ if platform.machine() != 'aarch64':
+ uc_builder.update_repos(image=uc_image,
+ branch=branch.replace('stable/', ''))
uc_builder.add_upstream_packages(uc_image)
uc_builder.inject_calipso_installer(APEX_TEMP_DIR, uc_image)
# add patches from upstream to undercloud and overcloud
@@ -415,6 +450,21 @@ def main():
for role in 'compute', 'controller':
oc_cfg.create_nic_template(net_settings, deploy_settings, role,
args.deploy_dir, APEX_TEMP_DIR)
+ # Prepare/Upload docker images
+ docker_env = 'containers-prepare-parameter.yaml'
+ shutil.copyfile(os.path.join(args.deploy_dir, docker_env),
+ os.path.join(APEX_TEMP_DIR, docker_env))
+ # Upload extra ansible.cfg
+ if platform.machine() == 'aarch64':
+ ansible_env = 'ansible.cfg'
+ shutil.copyfile(os.path.join(args.deploy_dir, ansible_env),
+ os.path.join(APEX_TEMP_DIR, ansible_env))
+
+ c_builder.prepare_container_images(
+ os.path.join(APEX_TEMP_DIR, docker_env),
+ branch=branch.replace('stable/', ''),
+ neutron_driver=c_builder.get_neutron_driver(ds_opts)
+ )
# Install Undercloud
undercloud.configure(net_settings, deploy_settings,
os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
@@ -443,8 +493,12 @@ def main():
opnfv_env, net_env_target, APEX_TEMP_DIR)
if not args.virtual:
oc_deploy.LOOP_DEVICE_SIZE = "50G"
+ if platform.machine() == 'aarch64':
+ oc_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+ else:
+ oc_image = sdn_image
patched_containers = oc_deploy.prep_image(
- deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
+ deploy_settings, net_settings, oc_image, APEX_TEMP_DIR,
root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
@@ -473,6 +527,8 @@ def main():
container_vars['os_version'] = os_version
container_vars['aarch64'] = platform.machine() == 'aarch64'
container_vars['sdn_env_file'] = sdn_env_files
+ container_vars['container_client'] = utils.find_container_client(
+ os_version)
try:
utils.run_ansible(container_vars, docker_playbook,
host=undercloud.ip, user='stack',
@@ -481,6 +537,8 @@ def main():
except Exception:
logging.error("Unable to complete container prep on "
"Undercloud")
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
raise
@@ -513,6 +571,8 @@ def main():
deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
deploy_vars['vim'] = ds_opts['vim']
+ deploy_vars['container_client'] = utils.find_container_client(
+ os_version)
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
@@ -528,6 +588,8 @@ def main():
raise
finally:
os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ for tmp_file in UC_DISK_FILES:
+ os.remove(os.path.join(APEX_TEMP_DIR, tmp_file))
# Post install
logging.info("Executing post deploy configuration")
@@ -674,6 +736,10 @@ def main():
deploy_vars['l2gw'] = ds_opts.get('l2gw')
deploy_vars['sriov'] = ds_opts.get('sriov')
deploy_vars['tacker'] = ds_opts.get('tacker')
+ deploy_vars['all_in_one'] = all_in_one
+ # TODO(trozet): need to set container client to docker until OOO
+ # migrates OC to podman. Remove this later.
+ deploy_vars['container_client'] = 'docker'
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
diff --git a/apex/network/network_data.py b/apex/network/network_data.py
index 1177af09..6f330c50 100644
--- a/apex/network/network_data.py
+++ b/apex/network/network_data.py
@@ -83,7 +83,7 @@ def create_network_data(ns, target=None):
"{}".format(net))
raise NetworkDataException("cidr is null for network {}".format(
net))
-
+ tmp_net['mtu'] = network.get('mtu', 1500)
network_data.append(copy.deepcopy(tmp_net))
# have to do this due to the aforementioned bug
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
index 0a4d1036..52b4452a 100644
--- a/apex/network/network_environment.py
+++ b/apex/network/network_environment.py
@@ -186,6 +186,8 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ self._update_service_netmap(net_settings.enabled_network_list)
+
def _get_vlan(self, network):
if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
return network['nic_mapping'][CONTROLLER]['vlan']
@@ -218,6 +220,13 @@ class NetworkEnvironment(dict):
prefix = ''
self[reg][key] = self.tht_dir + prefix + postfix
+ def _update_service_netmap(self, network_list):
+ if 'ServiceNetMap' not in self[param_def]:
+ return
+ for service, network in self[param_def]['ServiceNetMap'].items():
+ if network not in network_list:
+ self[param_def]['ServiceNetMap'][service] = 'ctlplane'
+
class NetworkEnvException(Exception):
def __init__(self, value):
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 6e1d5bb4..538f50a4 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -99,6 +99,12 @@ DUPLICATE_COMPUTE_SERVICES = [
'OS::TripleO::Services::ComputeNeutronL3Agent'
]
+NFS_VARS = [
+ 'NovaNfsEnabled',
+ 'GlanceNfsEnabled',
+ 'CinderNfsEnabledBackend'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
"""
@@ -152,7 +158,7 @@ def get_docker_sdn_files(ds_opts):
:return: list of docker THT env files for an SDN
"""
docker_services = con.VALID_DOCKER_SERVICES
- tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
+ tht_dir = con.THT_DOCKER_ENV_DIR
sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for i, sdn_file in enumerate(sdn_env_list):
sdn_base = os.path.basename(sdn_file)
@@ -194,8 +200,6 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
sdn_docker_files = get_docker_sdn_files(ds_opts)
for sdn_docker_file in sdn_docker_files:
deploy_options.append(sdn_docker_file)
- if sdn_docker_files:
- deploy_options.append('sdn-images.yaml')
else:
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
@@ -207,6 +211,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+ # TODO(trozet) Fix this check to look for if ceph is in controller services
+ # and not use name of the file
if ds_opts['ceph'] and 'csit' not in env_file:
prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
@@ -247,12 +253,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
if net_data:
cmd += ' --networks-file network_data.yaml'
libvirt_type = 'kvm'
- if virtual:
+ if virtual and (platform.machine() != 'aarch64'):
with open('/sys/module/kvm_intel/parameters/nested') as f:
nested_kvm = f.read().strip()
if nested_kvm != 'Y':
libvirt_type = 'qemu'
+ elif virtual and (platform.machine() == 'aarch64'):
+ libvirt_type = 'qemu'
cmd += ' --libvirt-type {}'.format(libvirt_type)
+ if platform.machine() == 'aarch64':
+ cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -357,22 +367,12 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if dataplane == 'ovs':
- if ds_opts['sfc']:
- oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
- elif sdn == 'opendaylight':
- # FIXME(trozet) remove this after RDO is updated with fix for
- # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
- ovs_file = os.path.basename(con.CUSTOM_OVS)
- ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
- utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
- targets=[ovs_file])
- virt_cmds.extend([
- {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
- ovs_file))},
- {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
- ovs_file)}
- ])
+ # FIXME(trozet) ovs build is failing in CentOS 7.6
+ # if dataplane == 'ovs':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ # https://review.rdoproject.org/r/#/c/13839/
+ # oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
@@ -432,6 +432,29 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
virt_cmds.append(
{con.VIRT_RUN_CMD: "crudini --del {} Unit "
"ConditionPathExists".format(dhcp_unit)})
+ # Prep for NFS
+ virt_cmds.extend([
+ {con.VIRT_INSTALL: "nfs-utils"},
+ {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+ "/etc/systemd/system/multi-user.target.wants/"
+ "nfs-server.service"},
+ {con.VIRT_RUN_CMD: "mkdir -p /glance"},
+ {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
+ {con.VIRT_RUN_CMD: "mkdir -p /nova"},
+ {con.VIRT_RUN_CMD: "chmod 777 /glance"},
+ {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
+ {con.VIRT_RUN_CMD: "chmod 777 /nova"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
+ {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
+ "no_root_squash,no_acl)' > /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "exportfs -avr"},
+ ])
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
return patched_containers
@@ -677,11 +700,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
# Merge compute services into control services if only a single
# node deployment
if num_compute == 0:
- logging.info("All in one deployment. Checking if service merging "
- "required into control services")
with open(tmp_opnfv_env, 'r') as fh:
data = yaml.safe_load(fh)
param_data = data['parameter_defaults']
+ logging.info("All in one deployment detected")
+ logging.info("Disabling NFS in env file")
# Check to see if any parameters are set for Compute
for param in param_data.keys():
if param != 'ComputeServices' and param.startswith('Compute'):
@@ -689,6 +712,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
"in deployment: {}. Please use Controller "
"based parameters when using All-in-one "
"deployments".format(param))
+ if param in NFS_VARS:
+ param_data[param] = False
+ logging.info("Checking if service merging required into "
+ "control services")
if ('ControllerServices' in param_data and 'ComputeServices' in
param_data):
logging.info("Services detected in environment file. Merging...")
@@ -703,11 +730,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
logging.debug("Merged controller services: {}".format(
pprint.pformat(param_data['ControllerServices'])
))
- with open(tmp_opnfv_env, 'w') as fh:
- yaml.safe_dump(data, fh, default_flow_style=False)
else:
logging.info("No services detected in env file, not merging "
"services")
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
with open(tmp_opnfv_env, 'r') as fh:
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index 00e6d6c0..9f8a6f18 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -129,9 +129,6 @@ class DeploySettings(dict):
"Invalid SRIOV interface name: {}".format(
self['deploy_options']['sriov']))
- if self['deploy_options']['odl_version'] == 'oxygen':
- self['deploy_options']['odl_version'] = 'master'
-
if 'performance' in deploy_options:
if not isinstance(deploy_options['performance'], dict):
raise DeploySettingsException("Performance deploy_option"
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
index 68a66fbc..96462d5f 100644
--- a/apex/tests/config/98faaca.diff
+++ b/apex/tests/config/98faaca.diff
@@ -17,7 +17,7 @@ specified in environments/services-docker/update-odl.yaml.
Upgrading ODL to the next major release (1.1->2) requires
only the L2 steps. These are implemented as upgrade_tasks and
-post_upgrade_tasks in https://review.openstack.org/489201.
+post_upgrade_tasks in https://review.opendev.org/489201.
Steps involved in level 2 update are
1. Block OVS instances to connect to ODL
diff --git a/apex/tests/test_apex_build_utils.py b/apex/tests/test_apex_build_utils.py
index f18103c8..36caaf1f 100644
--- a/apex/tests/test_apex_build_utils.py
+++ b/apex/tests/test_apex_build_utils.py
@@ -178,6 +178,12 @@ class TestBuildUtils(unittest.TestCase):
self.assertNotRegex(tmp_patch, 'Steps of upgrade are as follows')
self.assertNotRegex(tmp_patch, 'Steps invlolved in level 2 update')
+ def test_is_path_in_patch(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ self.assertTrue(build_utils.is_path_in_patch(dummy_patch,
+ 'releasenotes/'))
+
def test_strip_no_patch_sections(self):
with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
dummy_patch = fh.read()
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
index 09bd2545..3ff95bb5 100644
--- a/apex/tests/test_apex_common_builder.py
+++ b/apex/tests/test_apex_common_builder.py
@@ -24,6 +24,8 @@ DOCKER_YAML = {
}
}
+a_mock_open = mock_open(read_data=None)
+
class TestCommonBuilder(unittest.TestCase):
@classmethod
@@ -55,7 +57,8 @@ class TestCommonBuilder(unittest.TestCase):
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_is_patch_promoted_docker(self):
dummy_change = {'submitted': '2017-06-05 20:23:09.000000000',
@@ -63,13 +66,15 @@ class TestCommonBuilder(unittest.TestCase):
dummy_image = 'centos-binary-opendaylight'
self.assertTrue(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'MERGED'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
def test_patch_not_promoted_docker(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
@@ -77,13 +82,15 @@ class TestCommonBuilder(unittest.TestCase):
dummy_image = 'centos-binary-opendaylight'
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
'master',
+ con.DOCKERHUB_OOO,
docker_image=dummy_image))
def test_patch_not_promoted_and_not_merged(self):
dummy_change = {'submitted': '2900-06-05 20:23:09.000000000',
'status': 'BLAH'}
self.assertFalse(c_builder.is_patch_promoted(dummy_change,
- 'master'))
+ 'master',
+ con.DOCKERHUB_OOO))
@patch('builtins.open', mock_open())
@patch('apex.builders.common_builder.is_patch_promoted')
@@ -239,7 +246,8 @@ class TestCommonBuilder(unittest.TestCase):
'/dummytmp/dummyrepo.tar')
def test_project_to_docker_image(self):
- found_services = c_builder.project_to_docker_image(project='nova')
+ found_services = c_builder.project_to_docker_image('nova',
+ con.DOCKERHUB_OOO)
assert 'nova-api' in found_services
@patch('apex.common.utils.open_webpage')
@@ -248,4 +256,55 @@ class TestCommonBuilder(unittest.TestCase):
mock_open_web.return_value = b'{"blah": "blah"}'
self.assertRaises(exceptions.ApexCommonBuilderException,
c_builder.project_to_docker_image,
- 'nova')
+ 'nova',
+ con.DOCKERHUB_OOO)
+
+ def test_get_neutron_driver(self):
+ ds_opts = {'dataplane': 'fdio',
+ 'sdn_controller': 'opendaylight',
+ 'odl_version': 'master',
+ 'vpn': False,
+ 'sriov': False}
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'odl')
+ ds_opts['sdn_controller'] = None
+ ds_opts['vpp'] = True
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'vpp')
+ ds_opts['sdn_controller'] = 'ovn'
+ self.assertEquals(c_builder.get_neutron_driver(ds_opts),
+ 'ovn')
+
+ @patch('apex.builders.common_builder.yaml')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_prepare_container_images(self, mock_is_file, mock_yaml):
+ mock_yaml.safe_load.return_value = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'blah',
+ 'neutron_driver': 'null',
+ }
+ }
+ ]
+ }
+ }
+ expected_output = {
+ 'parameter_defaults': {
+ 'ContainerImagePrepare': [
+ {'set':
+ {'namespace': 'docker.io/tripleoqueens',
+ 'neutron_driver': 'odl',
+ }
+ }
+ ]
+ }
+ }
+
+ c_builder.prepare_container_images('dummy.yaml', 'queens',
+ 'odl')
+ mock_yaml.safe_dump.assert_called_with(
+ expected_output,
+ a_mock_open.return_value,
+ default_flow_style=False)
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 4c250117..1ecb7df6 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -84,7 +84,7 @@ class TestCommonUtils:
def test_fetch_upstream_previous_file(self):
test_file = 'overcloud-full.tar.md5'
- url = 'https://images.rdoproject.org/master/delorean/' \
+ url = 'https://images.rdoproject.org/master/rdo_trunk/' \
'current-tripleo/stable/'
os.makedirs('/tmp/fetch_test', exist_ok=True)
open("/tmp/fetch_test/{}".format(test_file), 'w').close()
@@ -155,3 +155,8 @@ class TestCommonUtils:
def test_unique(self):
dummy_list = [1, 2, 1, 3, 4, 5, 5]
assert_equal(utils.unique(dummy_list), [1, 2, 3, 4, 5])
+
+ def test_find_container_client(self):
+ for version in 'rocky', 'queens':
+ assert_equal(utils.find_container_client(version), 'docker')
+ assert_equal(utils.find_container_client('master'), 'podman')
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index be52c276..004c21c1 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -118,6 +118,7 @@ class TestDeploy(unittest.TestCase):
args.snapshot = False
assert_raises(ApexDeployException, validate_deploy_args, args)
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -146,7 +147,7 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_shutil, mock_network_data,
- mock_uc_builder, mock_deployment):
+ mock_uc_builder, mock_deployment, mock_c_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
@@ -197,6 +198,7 @@ class TestDeploy(unittest.TestCase):
main()
mock_snap_deployment.assert_called()
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -225,7 +227,7 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_shutil, mock_network_data,
- mock_uc_builder, mock_deployment):
+ mock_uc_builder, mock_deployment, mock_c_builder):
# didn't work yet line 412
# net_sets_dict = {'networks': {'admin': {'cidr': MagicMock()}},
# 'dns_servers': 'test'}
@@ -329,6 +331,7 @@ class TestDeploy(unittest.TestCase):
# TODO(trozet) add assertions here with arguments for functions in
# deploy main
+ @patch('apex.deploy.c_builder')
@patch('apex.deploy.ApexDeployment')
@patch('apex.deploy.uc_builder')
@patch('apex.deploy.network_data.create_network_data')
@@ -358,7 +361,8 @@ class TestDeploy(unittest.TestCase):
mock_utils, mock_parsers, mock_oc_cfg,
mock_virt_utils, mock_inv, mock_build_vms, mock_uc_lib,
mock_oc_deploy, mock_git, mock_shutil,
- mock_network_data, mock_uc_builder, mock_deployment):
+ mock_network_data, mock_uc_builder, mock_deployment,
+ mock_c_builder):
net_sets_dict = {'networks': MagicMock(),
'dns_servers': 'test'}
ds_opts_dict = {'global_params': MagicMock(),
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
index 79a72a55..7aa6ef15 100644
--- a/apex/tests/test_apex_network_environment.py
+++ b/apex/tests/test_apex_network_environment.py
@@ -165,3 +165,10 @@ class TestNetworkEnvironment:
e = NetworkEnvException("test")
print(e)
assert_is_instance(e, NetworkEnvException)
+
+ def test_service_netmap(self):
+ ns = copy(self.ns)
+ ns.enabled_network_list = ['admin']
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ for network in ne['parameter_defaults']['ServiceNetMap'].values():
+ assert_equal(network, 'ctlplane')
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index a70057b9..79dbf54b 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -156,7 +156,6 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_in('--control-scale 3', result_cmd)
assert_in('--compute-scale 2', result_cmd)
assert_in('docker-images.yaml', result_cmd)
- assert_in('sdn-images.yaml', result_cmd)
assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
'/docker.yaml', result_cmd)
assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
@@ -234,6 +233,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@@ -241,7 +241,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl(self, mock_is_file, mock_shutil,
- mock_virt_utils, mock_inject_odl, mock_fetch):
+ mock_virt_utils, mock_inject_odl,
+ mock_fetch, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -259,6 +260,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.c_builder')
@patch('apex.overcloud.deploy.oc_builder')
@@ -340,12 +342,13 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_oc_builder.inject_opendaylight.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_ovn(self, mock_is_file, mock_shutil,
- mock_virt_utils):
+ mock_virt_utils, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'vpn': False,
@@ -358,7 +361,9 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ # mock_ovs_nsh.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_quagga')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -368,7 +373,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl_vpn(self, mock_is_file, mock_shutil,
mock_virt_utils, mock_inject_odl,
- mock_inject_quagga, mock_fetch):
+ mock_inject_quagga, mock_fetch,
+ mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -387,6 +393,7 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
mock_inject_quagga.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -414,7 +421,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
- mock_inject_ovs_nsh.assert_called()
+ # mock_inject_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.os.path.isfile')
def test_prep_image_no_image(self, mock_isfile):
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
index 2d0dffcb..14586528 100644
--- a/apex/tests/test_apex_undercloud.py
+++ b/apex/tests/test_apex_undercloud.py
@@ -10,6 +10,7 @@
import ipaddress
import libvirt
import os
+import platform
import subprocess
import unittest
@@ -239,13 +240,16 @@ class TestUndercloud(unittest.TestCase):
assert_raises(ApexUndercloudException,
uc.configure, ns, ds, 'playbook', '/tmp/dir')
+ @patch('apex.undercloud.undercloud.virt_utils')
+ @patch('apex.undercloud.undercloud.uc_builder')
@patch('apex.undercloud.undercloud.os.remove')
@patch('apex.undercloud.undercloud.os.path')
@patch('apex.undercloud.undercloud.shutil')
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
def test_setup_vols(self, mock_get_vm, mock_create,
- mock_shutil, mock_os_path, mock_os_remove):
+ mock_shutil, mock_os_path, mock_os_remove,
+ mock_uc_builder, mock_virt_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
mock_os_path.isfile.return_value = True
mock_os_path.exists.return_value = True
@@ -255,6 +259,9 @@ class TestUndercloud(unittest.TestCase):
src_img = os.path.join(uc.image_path, img_file)
dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
mock_shutil.copyfile.assert_called_with(src_img, dest_img)
+ if platform.machine() != 'aarch64':
+ mock_uc_builder.expand_disk.assert_called()
+ mock_virt_utils.virt_customize.assert_called()
@patch('apex.undercloud.undercloud.os.path')
@patch.object(Undercloud, '_get_vm', return_value=None)
@@ -278,12 +285,19 @@ class TestUndercloud(unittest.TestCase):
{'--run-command': 'chmod 600 /root/.ssh/authorized_keys'},
{'--run-command': 'restorecon '
'-R -v /root/.ssh'},
+ {'--run-command': 'id -u stack || useradd -m stack'},
+ {'--run-command': 'mkdir -p /home/stack/.ssh'},
+ {'--run-command': 'chown stack:stack /home/stack/.ssh'},
{'--run-command':
'cp /root/.ssh/authorized_keys /home/stack/.ssh/'},
{'--run-command':
'chown stack:stack /home/stack/.ssh/authorized_keys'},
{'--run-command':
- 'chmod 600 /home/stack/.ssh/authorized_keys'}]
+ 'chmod 600 /home/stack/.ssh/authorized_keys'},
+ {'--run-command':
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> '
+ '/etc/sudoers'},
+ {'--run-command': 'touch /etc/cloud/cloud-init.disabled'}]
mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
@patch.object(Undercloud, '_get_vm', return_value=None)
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 1658801d..5ee487c2 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -15,6 +15,7 @@ import shutil
import subprocess
import time
+from apex.builders import undercloud_builder as uc_builder
from apex.virtual import utils as virt_utils
from apex.virtual import configure_vm as vm_lib
from apex.common import constants
@@ -63,7 +64,7 @@ class Undercloud:
if self.external_net:
networks.append('external')
console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
- root = 'vda' if platform.machine() == 'aarch64' else 'sda'
+ root = 'vda2' if platform.machine() == 'aarch64' else 'sda'
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
@@ -72,7 +73,8 @@ class Undercloud:
kernel_args=['console={}'.format(console),
'root=/dev/{}'.format(root)],
default_network=True,
- template_dir=self.template_path)
+ template_dir=self.template_path,
+ memory=10240)
self.setup_volumes()
self.inject_auth()
@@ -110,7 +112,7 @@ class Undercloud:
# give 10 seconds to come up
time.sleep(10)
# set IP
- for x in range(5):
+ for x in range(10):
if self._set_ip():
logging.info("Undercloud started. IP Address: {}".format(
self.ip))
@@ -153,6 +155,8 @@ class Undercloud:
ansible_vars['apex_temp_dir'] = apex_temp_dir
ansible_vars['nat'] = self.detect_nat(net_settings)
+ ansible_vars['container_client'] = utils.find_container_client(
+ self.os_version)
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -180,11 +184,19 @@ class Undercloud:
if os.path.exists(dest_img):
os.remove(dest_img)
shutil.copyfile(src_img, dest_img)
+ if img_file == self.image_name and platform.machine() != 'aarch64':
+ uc_builder.expand_disk(dest_img)
+ self.expand_root_fs()
+
shutil.chown(dest_img, user='qemu', group='qemu')
os.chmod(dest_img, 0o0744)
- # TODO(trozet):check if resize needed right now size is 50gb
+
+ def expand_root_fs(self):
# there is a lib called vminspect which has some dependencies and is
# not yet available in pip. Consider switching to this lib later.
+ logging.debug("Expanding root filesystem on /dev/sda partition")
+ virt_ops = [{constants.VIRT_RUN_CMD: 'xfs_growfs /dev/sda'}]
+ virt_utils.virt_customize(virt_ops, self.volume)
def inject_auth(self):
virt_ops = list()
@@ -200,9 +212,14 @@ class Undercloud:
run_cmds = [
'chmod 600 /root/.ssh/authorized_keys',
'restorecon -R -v /root/.ssh',
+ 'id -u stack || useradd -m stack',
+ 'mkdir -p /home/stack/.ssh',
+ 'chown stack:stack /home/stack/.ssh',
'cp /root/.ssh/authorized_keys /home/stack/.ssh/',
'chown stack:stack /home/stack/.ssh/authorized_keys',
- 'chmod 600 /home/stack/.ssh/authorized_keys'
+ 'chmod 600 /home/stack/.ssh/authorized_keys',
+ 'echo "stack ALL = (ALL) NOPASSWD: ALL" >> /etc/sudoers',
+ 'touch /etc/cloud/cloud-init.disabled'
]
for cmd in run_cmds:
virt_ops.append({constants.VIRT_RUN_CMD: cmd})
@@ -235,7 +252,10 @@ class Undercloud:
"local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
str(ns_admin['cidr']).split('/')[1]),
"generate_service_certificate false",
- "undercloud_ntp_servers {}".format(str(ns['ntp'][0]))
+ "undercloud_ntp_servers {}".format(str(ns['ntp'][0])),
+ "container_images_file "
+ "/home/stack/containers-prepare-parameter.yaml",
+ "undercloud_enable_selinux false"
]
config['undercloud_network_config'] = [
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
index ba0398bb..9d47bf03 100755
--- a/apex/virtual/configure_vm.py
+++ b/apex/virtual/configure_vm.py
@@ -102,6 +102,10 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
source_template = f.read()
imagefile = os.path.realpath(image)
+
+ if arch == 'aarch64' and diskbus == 'sata':
+ diskbus = 'virtio'
+
memory = int(memory) * 1024
params = {
'name': name,
@@ -118,9 +122,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
'user_interface': '',
}
- # assign virtio as default for aarch64
- if arch == 'aarch64' and diskbus == 'sata':
- diskbus = 'virtio'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
@@ -171,7 +172,7 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
"""
params['user_interface'] = """
<controller type='virtio-serial' index='0'>
- <address type='virtio-mmio'/>
+ <address type='pci'/>
</controller>
<serial type='pty'>
<target port='0'/>
diff --git a/build/ansible.cfg b/build/ansible.cfg
new file mode 100644
index 00000000..a9db58a0
--- /dev/null
+++ b/build/ansible.cfg
@@ -0,0 +1,11 @@
+[defaults]
+retry_files_enabled = False
+forks = 25
+timeout = 60
+gather_timeout = 30
+
+[ssh_connection]
+ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=30m -o ServerAliveInterval=5 -o ServerAliveCountMax=5
+retries = 8
+pipelining = True
+
diff --git a/build/containers-prepare-parameter.yaml b/build/containers-prepare-parameter.yaml
new file mode 100644
index 00000000..5089c335
--- /dev/null
+++ b/build/containers-prepare-parameter.yaml
@@ -0,0 +1,26 @@
+---
+parameter_defaults:
+ ContainerImagePrepare:
+ - push_destination: true
+ set:
+ ceph_image: daemon
+ ceph_namespace: docker.io/ceph
+ ceph_tag: v3.1.0-stable-3.1-luminous-centos-7-x86_64
+ name_prefix: centos-binary-
+ name_suffix: ''
+ namespace: docker.io/tripleomaster
+ neutron_driver: null
+ tag: current-tripleo
+ excludes:
+ - sensu
+ - manila
+ - octavia
+ - skydive
+ - drouter
+ - sahara
+ - rsys
+ - fluent
+ - designate
+ - barbican
+ - etcd
+ - ec2
diff --git a/build/csit-environment.yaml b/build/csit-environment.yaml
index 048833d1..39486d32 100644
--- a/build/csit-environment.yaml
+++ b/build/csit-environment.yaml
@@ -14,6 +14,29 @@ parameter_defaults:
ExtraConfig:
tripleo::ringbuilder::build_ring: false
nova::api::default_floating_pool: 'external'
+ ControllerExtraConfig:
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
+ GlanceNfsEnabled: true
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
+ GlanceNfsOptions:
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
+ NovaNfsEnabled: true
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
diff --git a/build/csit-queens-environment.yaml b/build/csit-queens-environment.yaml
index b13dd4df..12c994d1 100644
--- a/build/csit-queens-environment.yaml
+++ b/build/csit-queens-environment.yaml
@@ -14,6 +14,29 @@ parameter_defaults:
ExtraConfig:
tripleo::ringbuilder::build_ring: false
nova::api::default_floating_pool: 'external'
+ ControllerExtraConfig:
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
+ GlanceNfsEnabled: true
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
+ GlanceNfsOptions:
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
+ NovaNfsEnabled: true
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
diff --git a/build/csit-rocky-environment.yaml b/build/csit-rocky-environment.yaml
new file mode 100644
index 00000000..39486d32
--- /dev/null
+++ b/build/csit-rocky-environment.yaml
@@ -0,0 +1,116 @@
+---
+# Environment file used to list common parameters required for all deployment
+# types
+
+parameters:
+ CloudDomain: opnfvlf.org
+
+parameter_defaults:
+ GlanceBackend: file
+ CeilometerStoreEvents: true
+ NeutronEnableForceMetadata: true
+ NeutronEnableDHCPMetadata: true
+ NeutronEnableIsolatedMetadata: true
+ ExtraConfig:
+ tripleo::ringbuilder::build_ring: false
+ nova::api::default_floating_pool: 'external'
+ ControllerExtraConfig:
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
+ GlanceNfsEnabled: true
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
+ GlanceNfsOptions:
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
+ NovaNfsEnabled: true
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
+ DockerPuppetProcessCount: 10
+ NeutronNetworkVLANRanges: 'datacentre:500:525'
+ SshServerOptions:
+ HostKey:
+ - '/etc/ssh/ssh_host_rsa_key'
+ - '/etc/ssh/ssh_host_ecdsa_key'
+ - '/etc/ssh/ssh_host_ed25519_key'
+ SyslogFacility: 'AUTHPRIV'
+ AuthorizedKeysFile: '.ssh/authorized_keys'
+ PasswordAuthentication: 'no'
+ ChallengeResponseAuthentication: 'no'
+ GSSAPIAuthentication: 'no'
+ GSSAPICleanupCredentials: 'no'
+ UsePAM: 'yes'
+ X11Forwarding: 'yes'
+ UsePrivilegeSeparation: 'sandbox'
+ AcceptEnv:
+ - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+ - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+ - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+ - 'XMODIFIERS'
+ Subsystem: 'sftp /usr/libexec/openssh/sftp-server'
+ UseDNS: 'no'
+ ControllerServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Clustercheck
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::GlanceRegistry
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::OsloMessagingRpc
+ - OS::TripleO::Services::OsloMessagingNotify
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::OpenDaylightApi
+ - OS::TripleO::Services::OpenDaylightOvs
+ ComputeServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
diff --git a/build/network-environment.yaml b/build/network-environment.yaml
index 3fd22e3d..1397a0c8 100644
--- a/build/network-environment.yaml
+++ b/build/network-environment.yaml
@@ -63,33 +63,53 @@ parameter_defaults:
NeutronExternalNetworkBridge: 'br-ex'
ServiceNetMap:
+ ApacheNetwork: internal_api
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
AodhApiNetwork: internal_api
+ PankoApiNetwork: internal_api
+ BarbicanApiNetwork: internal_api
+ GnocchiApiNetwork: internal_api
OpendaylightApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
GlanceApiNetwork: internal_api
GlanceRegistryNetwork: internal_api
+ IronicApiNetwork: ctlplane
+ IronicNetwork: ctlplane
+ IronicInspectorNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane
KeystonePublicApiNetwork: internal_api
NeutronApiNetwork: internal_api
HeatApiNetwork: internal_api
+ HeatApiCfnNetwork: internal_api
+ HeatApiCloudwatchNetwork: internal_api
+ ManilaApiNetwork: internal_api
+ MetricsQdrNetwork: internal_api
NovaApiNetwork: internal_api
NovaMetadataNetwork: internal_api
+ NovaPlacementNetwork: internal_api
NovaVncProxyNetwork: internal_api
+ NovaLibvirtNetwork: internal_api
+ NovajoinNetwork: internal_api
+ OctaviaApiNetwork: internal_api
SwiftMgmtNetwork: storage
SwiftProxyNetwork: storage
TackerApiNetwork: internal_api
CongressApiNetwork: internal_api
HorizonNetwork: internal_api
+ OsloMessagingRpcNetwork: internal_api
+ OsloMessagingNotifyNetwork: internal_api
MemcachedNetwork: internal_api
RabbitMqNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage
- CephPublicNetwork: storage
+ CephMonNetwork: storage
+ PublicNetwork: external
+ OvnDbsNetwork: internal_api
+ DockerRegistryNetwork: ctlplane
# Define which network will be used for hostname resolution
ControllerHostnameResolveNetwork: internal_api
ComputeHostnameResolveNetwork: internal_api
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 189654ca..6d1fb9c0 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -9,6 +9,14 @@ parameters:
default: ''
description: IP address/subnet on the ctlplane network
type: string
+ ControlPlaneStaticRoutes:
+ default: []
+ description: >
+ Routes for the ctlplane network traffic.
+ JSON route e.g. [{'destination':'10.0.0.0/16', 'nexthop':'10.0.0.1'}]
+ Unless the default is changed, the parameter is automatically resolved
+ from the subnet host_routes attribute.
+ type: json
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/build/patches/neutron-patch-NSDriver.patch b/build/patches/neutron-patch-NSDriver.patch
index 84b4fb02..95ad58f9 100644
--- a/build/patches/neutron-patch-NSDriver.patch
+++ b/build/patches/neutron-patch-NSDriver.patch
@@ -139,7 +139,7 @@ index 88d6e67f31..c0fab604d1 100644
+
+ def _configure_mtu(self, ns_dev, mtu=None):
+ # Need to set MTU, after added to namespace. See review
-+ # https://review.openstack.org/327651
++ # https://review.opendev.org/327651
+ try:
+ # Note: network_device_mtu will be deprecated in future
+ mtu_override = self.conf.network_device_mtu
diff --git a/build/rpm_specs/opnfv-apex.spec b/build/rpm_specs/opnfv-apex.spec
index 510ce866..d05397ed 100644
--- a/build/rpm_specs/opnfv-apex.spec
+++ b/build/rpm_specs/opnfv-apex.spec
@@ -1,7 +1,7 @@
%global srcname opnfv-apex
Name: python34-%{srcname}
-Version: 7.0
+Version: 8.0
Release: %{_release}
Summary: Scripts for OPNFV deployment using Apex
@@ -72,16 +72,18 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-nosdn-bar-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-bar-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-calipso-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-calipso_queens-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-calipso_rocky-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-queens-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-queens-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-rocky-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-rocky-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-queens-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-queens-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-rocky-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-rocky-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
@@ -89,12 +91,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm_ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn_queens-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn_queens-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn_rocky-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-bgpvpn_rocky-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-sfc-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-sfc-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-sfc_queens-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-sfc_queens-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-sfc_rocky-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-sfc_rocky-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_netvirt-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
@@ -109,7 +111,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-odl-sriov-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-sriov-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-gluon-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-ovn-rocky-ha.yaml
%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-onos-sfc-ha.yaml
%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
@@ -129,6 +132,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Thu Sep 27 2018 Tim Rozet <trozet@redhat.com> - 8.0-0
+ Updates from Queens to Rocky
* Fri Aug 24 2018 Tim Rozet <trozet@redhat.com> - 7.0-7
Add Calipso for Queens
* Tue Aug 21 2018 Ricardo Noriega <rnoriega@redhat.com> - 7.0-6
diff --git a/ci/util.sh b/ci/util.sh
index a9df0213..5172ae1b 100755
--- a/ci/util.sh
+++ b/ci/util.sh
@@ -48,7 +48,7 @@ controller<number> or compute<number>"
node_output=$(undercloud_connect "stack" "source stackrc; nova list")
node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
- node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+ node_ip=$(echo "$node_output" | grep "$node " | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
if [ "$node_ip" == "" ]; then
echo -e "Unable to find IP for ${node} in \n${node_output}"
diff --git a/config/deploy/common-patches.yaml b/config/deploy/common-patches.yaml
index ac006bdd..bac6812c 100644
--- a/config/deploy/common-patches.yaml
+++ b/config/deploy/common-patches.yaml
@@ -6,11 +6,34 @@ patches:
project: openstack/tripleo-common
- change-id: Iaa2276aadae351fbc138de258c51d786f69e4395
project: openstack/tripleo-common
+ - change-id: I8d1db69d520da069099f919f286e6a553dd645a5
+ project: openstack/tripleo-heat-templates
+ - change-id: Ia51a825d11bd9b94d0110f13cdf2a6bbcedf6194
+ project: openstack/tripleo-common
+ overcloud:
+ - change-id: Ie988ba6a2d444a614e97c0edf5fce24b23970310
+ project: openstack/puppet-tripleo
+ - change-id: I93e3d355625508fdc42f44bdd358f3ba86fbd8d7
+ project: openstack/puppet-tripleo
+ rocky:
+ undercloud:
+ - change-id: I2e0a40d7902f592e4b7bd727f57048111e0bea36
+ project: openstack/tripleo-common
+ branch: master
+ - change-id: Iaa2276aadae351fbc138de258c51d786f69e4395
+ project: openstack/tripleo-common
+ branch: master
+ - change-id: I8d1db69d520da069099f919f286e6a553dd645a5
+ project: openstack/tripleo-heat-templates
+ - change-id: Ia51a825d11bd9b94d0110f13cdf2a6bbcedf6194
+ project: openstack/tripleo-common
overcloud:
- change-id: Ie988ba6a2d444a614e97c0edf5fce24b23970310
project: openstack/puppet-tripleo
+ branch: master
- change-id: I93e3d355625508fdc42f44bdd358f3ba86fbd8d7
project: openstack/puppet-tripleo
+ branch: master
queens:
undercloud:
- change-id: I966bf7f6f8d1cbc656abfad59e8bb927e1aa53c2
diff --git a/config/deploy/os-nosdn-calipso_queens-noha.yaml b/config/deploy/os-nosdn-calipso_rocky-noha.yaml
index 48d93f49..b14ceed5 100644
--- a/config/deploy/os-nosdn-calipso_queens-noha.yaml
+++ b/config/deploy/os-nosdn-calipso_rocky-noha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: false
sfc: false
vpn: false
diff --git a/config/deploy/os-nosdn-queens-ha.yaml b/config/deploy/os-nosdn-rocky-ha.yaml
index 88a40042..152a8840 100644
--- a/config/deploy/os-nosdn-queens-ha.yaml
+++ b/config/deploy/os-nosdn-rocky-ha.yaml
@@ -3,7 +3,7 @@ global_params:
ha_enabled: true
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: false
tacker: false
congress: false
diff --git a/config/deploy/os-nosdn-queens-noha.yaml b/config/deploy/os-nosdn-rocky-noha.yaml
index efadc31e..5fecfb6d 100644
--- a/config/deploy/os-nosdn-queens-noha.yaml
+++ b/config/deploy/os-nosdn-rocky-noha.yaml
@@ -3,7 +3,7 @@ global_params:
ha_enabled: false
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: false
tacker: false
congress: false
diff --git a/config/deploy/os-odl-bgpvpn_queens-ha.yaml b/config/deploy/os-odl-bgpvpn_rocky-ha.yaml
index 27a0caf6..a0833fd4 100644
--- a/config/deploy/os-odl-bgpvpn_queens-ha.yaml
+++ b/config/deploy/os-odl-bgpvpn_rocky-ha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: opendaylight
odl_version: master
tacker: false
diff --git a/config/deploy/os-odl-bgpvpn_queens-noha.yaml b/config/deploy/os-odl-bgpvpn_rocky-noha.yaml
index 33382307..6bc5adf3 100644
--- a/config/deploy/os-odl-bgpvpn_queens-noha.yaml
+++ b/config/deploy/os-odl-bgpvpn_rocky-noha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: opendaylight
odl_version: master
tacker: false
diff --git a/config/deploy/os-odl-rocky-ha.yaml b/config/deploy/os-odl-rocky-ha.yaml
new file mode 100644
index 00000000..8728bd8d
--- /dev/null
+++ b/config/deploy/os-odl-rocky-ha.yaml
@@ -0,0 +1,13 @@
+---
+global_params:
+ ha_enabled: true
+
+deploy_options:
+ containers: true
+ os_version: rocky
+ sdn_controller: opendaylight
+ odl_version: oxygen
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-odl-rocky-noha.yaml b/config/deploy/os-odl-rocky-noha.yaml
new file mode 100644
index 00000000..f6ceb81b
--- /dev/null
+++ b/config/deploy/os-odl-rocky-noha.yaml
@@ -0,0 +1,13 @@
+---
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ containers: true
+ os_version: rocky
+ sdn_controller: opendaylight
+ odl_version: oxygen
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-odl-sfc_queens-ha.yaml b/config/deploy/os-odl-sfc_rocky-ha.yaml
index 0cd01466..3a1b6c93 100644
--- a/config/deploy/os-odl-sfc_queens-ha.yaml
+++ b/config/deploy/os-odl-sfc_rocky-ha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: opendaylight
odl_version: master
tacker: false
diff --git a/config/deploy/os-odl-sfc_queens-noha.yaml b/config/deploy/os-odl-sfc_rocky-noha.yaml
index d091b8cf..8af873a5 100644
--- a/config/deploy/os-odl-sfc_queens-noha.yaml
+++ b/config/deploy/os-odl-sfc_rocky-noha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
containers: true
- os_version: queens
+ os_version: rocky
sdn_controller: opendaylight
odl_version: master
tacker: false
diff --git a/config/deploy/os-ovn-nofeature-ha.yaml b/config/deploy/os-ovn-nofeature-ha.yaml
new file mode 100644
index 00000000..6c42cad9
--- /dev/null
+++ b/config/deploy/os-ovn-nofeature-ha.yaml
@@ -0,0 +1,15 @@
+---
+global_params:
+ ha_enabled: true
+ patches:
+ undercloud:
+ - change-id: Ic08ff58b10d4fa7116163be1f7fce57879cee8c5
+ project: openstack/tripleo-common
+
+deploy_options:
+ containers: true
+ sdn_controller: ovn
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-ovn-nofeature-noha.yaml b/config/deploy/os-ovn-nofeature-noha.yaml
deleted file mode 100644
index 8c8c5f97..00000000
--- a/config/deploy/os-ovn-nofeature-noha.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-global_params:
- ha_enabled: false
-
-deploy_options:
- sdn_controller: ovn
- tacker: false
- congress: true
- sfc: false
- vpn: false
diff --git a/config/deploy/os-ovn-rocky-ha.yaml b/config/deploy/os-ovn-rocky-ha.yaml
new file mode 100644
index 00000000..a2c5a865
--- /dev/null
+++ b/config/deploy/os-ovn-rocky-ha.yaml
@@ -0,0 +1,17 @@
+---
+global_params:
+ ha_enabled: true
+ patches:
+ undercloud:
+ - change-id: Ic08ff58b10d4fa7116163be1f7fce57879cee8c5
+ project: openstack/tripleo-common
+ branch: master
+
+deploy_options:
+ containers: true
+ os_version: rocky
+ sdn_controller: ovn
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/contrib/aarch64/overcloud-full-rootfs.yaml b/contrib/aarch64/overcloud-full-rootfs.yaml
new file mode 100644
index 00000000..ad420427
--- /dev/null
+++ b/contrib/aarch64/overcloud-full-rootfs.yaml
@@ -0,0 +1,54 @@
+disk_images:
+ -
+ imagename: overcloud-full-rootfs
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+ elements:
+ - baremetal
+ - dhcp-all-interfaces
+ - cloud-init
+ - openvswitch
+ - overcloud-agent
+ - overcloud-full
+ - overcloud-controller
+ - overcloud-compute
+ - overcloud-ceph-storage
+ - puppet-modules
+ - enable-serial-console
+ - stable-interface-names
+ - selinux-permissive
+ - grub2
+ - growroot
+ - devuser
+ - element-manifest
+ - dynamic-login
+ - iptables
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ - dracut-regenerate
+ - remove-machine-id
+ - remove-resolvconf
+ packages:
+ - openstack-utils
+ - python-tripleoclient
+ - python-tripleoclient-heat-installer
+ - python-psutil
+ - python-debtcollector
+ - plotnetcfg
+ - sos
+ - yum-plugin-priorities
+ - ntp
+ - jq
+ - openstack-heat-agents
+ - device-mapper-multipath
+ - os-net-config
+ - grub2-efi-aa64
+ - grub2-efi-aa64-modules
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
diff --git a/contrib/aarch64/undercloud-full.yaml b/contrib/aarch64/undercloud-full.yaml
new file mode 100644
index 00000000..42084c89
--- /dev/null
+++ b/contrib/aarch64/undercloud-full.yaml
@@ -0,0 +1,87 @@
+disk_images:
+ -
+ imagename: undercloud-full
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+ elements:
+ - vm
+ - block-device-efi
+ - baremetal
+ - dhcp-all-interfaces
+ - disable-selinux
+ - cloud-init-nocloud
+ - openvswitch
+ - overcloud-agent
+ - overcloud-full
+ - overcloud-controller
+ - overcloud-compute
+ - overcloud-ceph-storage
+ - puppet-modules
+ - enable-serial-console
+ - stable-interface-names
+ - grub2
+ - bootloader
+ - devuser
+ - element-manifest
+ - dynamic-login
+ - iptables
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ - dracut-regenerate
+ - remove-machine-id
+ - remove-resolvconf
+ packages:
+ - openstack-utils
+ - python-tripleoclient
+ - python-tripleoclient-heat-installer
+ - python-psutil
+ - python-debtcollector
+ - plotnetcfg
+ - sos
+ - yum-plugin-priorities
+ - ntp
+ - jq
+ - openstack-heat-agents
+ - device-mapper-multipath
+ - os-net-config
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
+ -
+ imagename: ironic-python-agent
+ # This is bogus, but there's no initrd type in diskimage-builder
+ arch: aarch64
+ type: qcow2
+ distro: centos7
+
+ # So we just override the extension instead
+ imageext: initramfs
+ elements:
+ - ironic-agent
+ - ironic-agent-multipath
+ - dynamic-login
+ - devuser
+ - disable-selinux
+ - element-manifest
+ - network-gateway
+ - enable-packages-install
+ - pip-and-virtualenv-override
+ packages:
+ - util-linux
+ - grub2-efi-aa64
+ - grub2-efi-aa64-module
+ - python-hardware-detect
+ - yum-plugin-priorities
+ - iscsi-initiator-utils
+ options:
+ - " --no-tmpfs"
+ environment:
+ DIB_PYTHON_VERSION: '2'
+ DIB_DEV_USER_USERNAME: 'stack'
+ DIB_DEV_USER_PASSWORD: 'stack'
+ DIB_DEV_USER_PWDLESS_SUDO: 'Yes'
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..eb12e74b
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # noqa: F401,F403
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..6c76e3c0
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: APEX
diff --git a/docs/contributor/APEX-on-aarch64.rst b/docs/contributor/APEX-on-aarch64.rst
new file mode 100644
index 00000000..a2e90dd6
--- /dev/null
+++ b/docs/contributor/APEX-on-aarch64.rst
@@ -0,0 +1,146 @@
+==================================================================================
+APEX on AARCH64
+==================================================================================
+
+This document describes the changes needed to deploy OPNFV-APEX on aarch64
+ * General considerations
+ * Creating undercloud and overcloud images using DIB
+ * Creating Kolla containers
+
+General considerations
+--------------------------
+
+OPNFV - APEX relies on artifacts created by the OOO project.
+
+Those artifacts are:
+
+1. Openstack packages, found in delorean_.
+
+ .. _delorean: http://www.python.org/
+
+2. UC and OC images created by RDO and found in images_.
+
+ .. _images: https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo-internal/
+
+3. The containerized version of the openstack services found in docker.io_.
+
+ .. _docker.io: https://hub.docker.com/r/tripleomaster/
+
+All the above artifacts are x86_64 only and as a result cannot be used by APEX on aarch64
+As a result the user needs to create the Images locally before attempting to deploy.
+The only supported scenario is 'os-nosdn-rocky-ha'.
+
+Other than the aarch64 disk images and containers, there is no other special configuration
+required for aarch64. The only requirement is for the nodes to be identified as aarch64 nodes
+in the inventory files.
+
+For example :
+
+.. code-block:: yaml
+
+ node1:
+ mac_address: "68:05:CA:68:08:CA"
+ ipmi_ip: 10.10.10.10
+ ipmi_user: user
+ ipmi_pass: pass
+ pm_type: "pxe_ipmitool"
+ cpus: 1
+ memory: 128000
+ disk: 480
+ disk_device: sda
+ arch: "aarch64"
+ capabilities: "profile:control"
+
+
+Creating undercloud and overcloud images using DIB
+--------------------------------------------------
+In order to create that image DIB_ must be used. DIB can either be built from source or use yum to be installed.
+
+.. _DIB: https://github.com/openstack/diskimage-builder
+
+It is important to use a fairly late version of DIB to support UEFI systems. The version currently on epel does NOT have support for UEFI. The version on delorean (15.01) works just fine. DIB uses a YAML file from the user which describes how the
+image should look like. The original yaml from RDO is here_:
+
+
+.. _here: https://github.com/openstack/tripleo-common/blob/master/image-yaml/overcloud-images.yaml
+
+The equivelant yaml files for aarch64 are included in the apex repo in the "apex/contrib/aarch64" folder.
+The UC and OC images are very similar in terms of packages. The major difference is the partition table in EFI so for the undercloud, that has to provided as an environmental variable.
+
+.. code-block:: python
+
+ export DIB_BLOCK_DEVICE_CONFIG="
+
+ - local_loop:
+ name: image0
+
+ - partitioning:
+ base: image0
+ label: gpt
+ partitions:
+ - name: ESP
+ type: 'EF00'
+ size: 64MiB
+ mkfs:
+ type: vfat
+ mount:
+ mount_point: /boot/efi
+ fstab:
+ options: "defaults"
+ fsck-passno: 1
+ - name: root
+ type: '8300'
+ size: 50GiB
+ mkfs:
+ type: ext4
+ mount:
+ mount_point: /
+ fstab:
+ options: "defaults"
+ fsck-passno: 1
+ "
+
+ export DIB_YUM_REPO_CONF+="/etc/yum.repos.d/delorean-deps-rocky.repo /etc/yum.repos.d/delorean-rocky.repo /etc/yum.repos.d
+ /epel.repo "
+ openstack --debug overcloud image build --config-file undercloud_full.yaml --output-directory ./
+
+
+The overcloud is built in a similar way.
+
+.. code-block:: python
+
+ export DIB_YUM_REPO_CONF+="/etc/yum.repos.d/delorean-deps-rocky.repo /etc/yum.repos.d/delorean-rocky.repo /etc/yum.repos.d
+ /epel.repo "
+ openstack --debug overcloud image build --config-file overcloud_full_rootfs.yaml --output-directory ./
+
+
+
+Apex container deployment
+-------------------------
+Similarly the containers provided by OOO are for x86 only. Containers for apex on aarch64 for the Rocky release can
+be found in armbandapex_.
+
+.. _armbandapex: https://registry.hub.docker.com/v2/repositories/armbandapex/
+
+A user who wishes to rebuild the containers can easily do so by sing Kolla. An example kolla.conf and the command to build the containers is given bellow.
+
+
+.. code-block:: python
+
+ [DEFAULT]
+
+ base=centos
+ type=binary
+ namespace="private docker.io repository"
+ tag=current-tripleo-rdo
+ rpm_setup_config=ceph.repo,epel.repo,delorean-deps.repo,delorean.repo
+ push=True
+
+
+
+.. code-block:: python
+
+ openstack overcloud container image build --config-file /usr/share/tripleo-common/container-images/overcloud_containers.yaml
+ --kolla-config-file /etc/kolla/kolla-build.conf
+
+
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..2fd1b4ab
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,23 @@
+.. _apex:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Apex
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/scenarios/os-nosdn-nofeature-noha/index
+ release/scenarios/os-ovn-nofeature-ha/index
+ release/scenarios/os-nosdn-nofeature-ha/index
+ release/scenarios/os-odl-nofeature-noha/index
+ release/scenarios/os-odl-nofeature-ha/index
+ release/scenarios/k8s-nosdn-nofeature-noha/index
+ release/installation/index
+ release/release-notes/index
diff --git a/docs/release/installation/abstract.rst b/docs/release/installation/abstract.rst
index aeef1246..2d55c154 100644
--- a/docs/release/installation/abstract.rst
+++ b/docs/release/installation/abstract.rst
@@ -1,16 +1,16 @@
Abstract
========
-This document describes how to install the Fraser release of OPNFV when
+This document describes how to install the Gambia release of OPNFV when
using Apex as a deployment tool covering it's limitations, dependencies
and required system resources.
License
=======
-Fraser release of OPNFV when using Apex as a deployment tool Docs
+Gambia release of OPNFV when using Apex as a deployment tool Docs
(c) by Tim Rozet (Red Hat)
-Fraser release of OPNFV when using Apex as a deployment tool Docs
+Gambia release of OPNFV when using Apex as a deployment tool Docs
are licensed under a Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this.
If not, see <http://creativecommons.org/licenses/by/4.0/>.
diff --git a/docs/release/installation/architecture.rst b/docs/release/installation/architecture.rst
index 1ab7c7fc..0bf2d3bc 100644
--- a/docs/release/installation/architecture.rst
+++ b/docs/release/installation/architecture.rst
@@ -16,8 +16,7 @@ deploy the overcloud.
The undercloud is the all-in-one installation of OpenStack that includes
baremetal provisioning capability. The undercloud will be deployed as a
-virtual machine on a Jump Host. This VM is pre-built and distributed as part
-of the Apex RPM.
+virtual machine on a Jump Host.
The overcloud is OPNFV. Configuration will be passed into undercloud and
the undercloud will use OpenStack's orchestration component, named Heat, to
@@ -127,11 +126,11 @@ issues per scenario. The following scenarios correspond to a supported
+-------------------------+-------------+---------------+
| os-nosdn-nofeature-noha | Apex | Yes |
+-------------------------+-------------+---------------+
-| os-nosdn-bar-ha | Barometer | Yes |
+| os-nosdn-bar-ha | Barometer | No |
+-------------------------+-------------+---------------+
-| os-nosdn-bar-noha | Barometer | Yes |
+| os-nosdn-bar-noha | Barometer | No |
+-------------------------+-------------+---------------+
-| os-nosdn-calipso-noha | Calipso | No |
+| os-nosdn-calipso-noha | Calipso | Yes |
+-------------------------+-------------+---------------+
| os-nosdn-ovs_dpdk-ha | Apex | No |
+-------------------------+-------------+---------------+
@@ -168,9 +167,9 @@ issues per scenario. The following scenarios correspond to a supported
+-------------------------+-------------+---------------+
| os-odl-l2gw-noha | Apex | No |
+-------------------------+-------------+---------------+
-| os-odl-sfc-ha | SFC | No |
+| os-odl-sfc-ha | SFC | Yes |
+-------------------------+-------------+---------------+
-| os-odl-sfc-noha | SFC | No |
+| os-odl-sfc-noha | SFC | Yes |
+-------------------------+-------------+---------------+
| os-odl-gluon-noha | Gluon | No |
+-------------------------+-------------+---------------+
@@ -188,5 +187,7 @@ issues per scenario. The following scenarios correspond to a supported
+-------------------------+-------------+---------------+
| os-onos-sfc-ha | ONOSFW | No |
+-------------------------+-------------+---------------+
-| os-ovn-nofeature-noha | Apex | Yes |
+| os-ovn-nofeature-noha | Apex | No |
++-------------------------+-------------+---------------+
+| os-ovn-nofeature-ha | Apex | Yes |
+-------------------------+-------------+---------------+
diff --git a/docs/release/installation/baremetal.rst b/docs/release/installation/baremetal.rst
index ff55bc16..efea0f86 100644
--- a/docs/release/installation/baremetal.rst
+++ b/docs/release/installation/baremetal.rst
@@ -46,11 +46,17 @@ and report the properties of it back to the undercloud node.
After introspection the undercloud will execute a Heat Stack Deployment to
continue node provisioning and configuration. The nodes will reboot and PXE
from the undercloud PXE server again to provision each node using Glance disk
-images provided by the undercloud. These disk images include all the necessary
-packages and configuration for an OPNFV deployment to execute. Once the disk
+images provided by the undercloud. These disk images include all the necessary
+packages and configuration for an OPNFV deployment to execute. Once the disk
images have been written to node's disks the nodes will boot locally and
-execute cloud-init which will execute the final node configuration. This
-configuration is largely completed by executing a puppet apply on each node.
+execute cloud-init which will execute the final node configuration. At this
+point in the deployment, the Heat Stack will complete, and Mistral will
+takeover the configuration of the nodes. Mistral handles calling Ansible which
+will connect to each node, and begin configuration. This configuration includes
+launching the desired OPNFV services as containers, and generating their
+configuration files. These configuration is largely completed by executing a
+puppet apply on each container to generate the config files, which are then
+stored on the overcloud host and mounted into the service container at runtime.
Installation Guide - Bare Metal Deployment
==========================================
@@ -62,11 +68,7 @@ Install Bare Metal Jump Host
----------------------------
1a. If your Jump Host does not have CentOS 7 already on it, or you would like
- to do a fresh install, then download the Apex bootable ISO from the OPNFV
- artifacts site <http://artifacts.opnfv.org/apex.html>. There have been
- isolated reports of problems with the ISO having trouble completing
- installation successfully. In the unexpected event the ISO does not work
- please workaround this by downloading the CentOS 7 DVD and performing a
+ to do a fresh install, then download the CentOS 7 DVD and perform a
"Virtualization Host" install. If you perform a "Minimal Install" or
install type other than "Virtualization Host" simply run
``sudo yum -y groupinstall "Virtualization Host"``
@@ -84,11 +86,11 @@ Install Bare Metal Jump Host
Replace /dev/sdX with the device assigned to your usb drive. Then select
the USB device as the boot media on your Jump Host
-2a. When not using the OPNFV Apex ISO, install these repos:
+2a. Install these repos:
- ``sudo yum install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm``
+ ``sudo yum install https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm``
``sudo yum install epel-release``
- ``sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/fraser/opnfv-apex.repo``
+ ``sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/gambia/opnfv-apex.repo``
The RDO Project release repository is needed to install OpenVSwitch, which
is a dependency of opnfv-apex. If you do not have external connectivity to
@@ -97,14 +99,12 @@ Install Bare Metal Jump Host
opnfv-apex repo hosts all of the Apex dependencies which will automatically
be installed when installing RPMs, but will be pre-installed with the ISO.
-2b. If you chose not to use the Apex ISO, then you must download and install
- the Apex RPMs to the Jump Host. Download the first 3 Apex RPMs from the
- OPNFV downloads page, under the TripleO RPMs
- ``https://www.opnfv.org/software/downloads``.
+2b. Download the first Apex RPMs from the OPNFV downloads page, under the
+ TripleO RPMs ``https://www.opnfv.org/software/downloads``. The dependent
+ RPMs will be automatically installed from the opnfv-apex repo in the
+ previous step.
The following RPMs are available for installation:
- - opnfv-apex - OpenDaylight, OVN, and nosdn support
- - opnfv-apex-undercloud - (reqed) Undercloud Image
- python34-opnfv-apex - (reqed) OPNFV Apex Python package
- python34-markupsafe - (reqed) Dependency of python34-opnfv-apex **
- python34-jinja2 - (reqed) Dependency of python34-opnfv-apex **
@@ -123,9 +123,9 @@ Install Bare Metal Jump Host
automatically installed by installing python34-opnfv-apex when the
opnfv-apex.repo has been previously downloaded to ``/etc/yum.repos.d/``.
- Install the three required RPMs (replace <rpm> with the actual downloaded
+ Install the required RPM (replace <rpm> with the actual downloaded
artifact):
- ``yum -y install <opnfv-apex.rpm> <opnfv-apex-undercloud> <python34-opnfv-apex>``
+ ``yum -y install <python34-opnfv-apex>``
3. After the operating system and the opnfv-apex RPMs are installed, login to
your Jump Host as root.
@@ -151,7 +151,7 @@ IPMI configuration information gathered in section
2. The nodes dictionary contains a definition block for each baremetal host
that will be deployed. 0 or more compute nodes and 1 or 3 controller nodes
- are required. (The example file contains blocks for each of these already).
+ are required (the example file contains blocks for each of these already).
It is optional at this point to add more compute nodes into the node list.
By specifying 0 compute nodes in the inventory file, the deployment will
automatically deploy "all-in-one" nodes which means the compute will run
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
index 82b9d25c..443aef6a 100644
--- a/docs/release/installation/index.rst
+++ b/docs/release/installation/index.rst
@@ -23,7 +23,7 @@ Contents:
:Authors: Tim Rozet (trozet@redhat.com)
:Authors: Dan Radez (dradez@redhat.com)
-:Version: 6.0
+:Version: 7.1
Indices and tables
==================
diff --git a/docs/release/installation/introduction.rst b/docs/release/installation/introduction.rst
index 76ed0acb..706e2265 100644
--- a/docs/release/installation/introduction.rst
+++ b/docs/release/installation/introduction.rst
@@ -1,7 +1,7 @@
Introduction
============
-This document describes the steps to install an OPNFV Fraser reference
+This document describes the steps to install an OPNFV Gambia reference
platform using the Apex installer.
The audience is assumed to have a good background in networking
@@ -18,22 +18,14 @@ deployment tool chain.
The Apex deployment artifacts contain the necessary tools to deploy and
configure an OPNFV target system using the Apex deployment toolchain.
-These artifacts offer the choice of using the Apex bootable ISO
-(``opnfv-apex-fraser.iso``) to both install CentOS 7 and the
-necessary materials to deploy or the Apex RPMs (``opnfv-apex*.rpm``),
-and their associated dependencies, which expects installation to a
-CentOS 7 libvirt enabled host. The RPM contains a collection of
-configuration files, prebuilt disk images, and the automatic deployment
-script (``opnfv-deploy``).
+The Apex artifact is a python package capable of automating the installation of
+TripleO and other OPNFV components.
-An OPNFV install requires a "Jump Host" in order to operate. The bootable
-ISO will allow you to install a customized CentOS 7 release to the Jump Host,
+An OPNFV install requires a "Jump Host" in order to operate. It is required
+to install CentOS 7 release to the Jump Host for traditional deployment,
which includes the required packages needed to run ``opnfv-deploy``.
If you already have a Jump Host with CentOS 7 installed, you may choose to
-skip the ISO step and simply install the (``opnfv-apex*.rpm``) RPMs. The RPMs
-are the same RPMs included in the ISO and include all the necessary disk
-images and configuration files to execute an OPNFV deployment. Either method
-will prepare a host to the same ready state for OPNFV deployment.
+skip the ISO step and simply install the (``python34-opnfv-apex*.rpm``) RPM.
``opnfv-deploy`` instantiates a Triple-O Undercloud VM server using libvirt
as its provider. This VM is then configured and used to provision the
diff --git a/docs/release/installation/references.rst b/docs/release/installation/references.rst
index 935be038..b8b177d6 100644
--- a/docs/release/installation/references.rst
+++ b/docs/release/installation/references.rst
@@ -21,7 +21,7 @@ OPNFV
OpenStack
---------
-`OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
+`OpenStack Queens Release artifacts <http://www.openstack.org/software/queens>`_
`OpenStack documentation <http://docs.openstack.org>`_
diff --git a/docs/release/installation/requirements.rst b/docs/release/installation/requirements.rst
index 9aefa21d..239db197 100644
--- a/docs/release/installation/requirements.rst
+++ b/docs/release/installation/requirements.rst
@@ -6,7 +6,7 @@ Jump Host Requirements
The Jump Host requirements are outlined below:
-1. CentOS 7 (from ISO or self-installed).
+1. CentOS 7 (self-installed) or Fedora (with Snapshot deployment).
2. Root access.
@@ -15,7 +15,7 @@ The Jump Host requirements are outlined below:
4. minimum 1 networks and maximum 5 networks, multiple NIC and/or VLAN
combinations are supported. This is virtualized for a VM deployment.
-5. The Fraser Apex RPMs and their dependencies.
+5. The Gambia Apex RPM and its dependencies.
6. 16 GB of RAM for a bare metal deployment, 64 GB of RAM for a Virtual
Deployment.
diff --git a/docs/release/installation/upstream.rst b/docs/release/installation/upstream.rst
index b98b0c19..f18c4b11 100644
--- a/docs/release/installation/upstream.rst
+++ b/docs/release/installation/upstream.rst
@@ -1,14 +1,11 @@
-Deploying Directly from Upstream - (Beta)
-=========================================
+Deploying Directly from Upstream
+================================
-In addition to deploying with OPNFV tested artifacts included in the
-opnfv-apex-undercloud and opnfv-apex RPMs, it is now possible to deploy
+When installing the Undercloud and Overcloud, the disk images are now downloaded
directly from upstream artifacts. Essentially this deployment pulls the latest
RDO overcloud and undercloud artifacts at deploy time. This option is useful
-for being able to deploy newer versions of OpenStack that are not included
-with this release, and offers some significant advantages for some users.
-Please note this feature is currently in beta for the Fraser release and will
-be fully supported in the next OPNFV release.
+for being able to pull the latest Queens and other OPNFV components which have
+been promoted via a TripleO pipeline and deemed to be stable.
Upstream Deployment Key Features
--------------------------------
@@ -39,19 +36,15 @@ in order to download and prepare the cached artifacts.
Scenarios and Deploy Settings for Upstream Deployments
------------------------------------------------------
-Some deploy settings files are already provided which have been tested by the
-Apex team. These include (under /etc/opnfv-apex/):
-
- - os-nosdn-queens_upstream-noha.yaml
- - os-nosdn-master_upstream-noha.yaml
- - os-odl-queens_upstream-noha.yaml
- - os-odl-master_upstream-noha.yaml
-
-Each of these scenarios has been tested by Apex over the Fraser release, but
-none are guaranteed to work as upstream is a moving target and this feature is
-relatively new. Still it is the goal of the Apex team to provide support
-and move to an upstream based deployments in the future, so please file a bug
-when encountering any issues.
+The deploy settings and scenarios included with the Gambia release of Apex will
+be supported as deemed by the `OPNFV Scenarios in Apex`_ section of this guide.
+Each of these scenarios has been tested by Apex over the Gambia release, and
+using those deploy settings will control which upstream artifacts are pulled
+at deploy time. By specifying different versions of OpenStack, ODL, or other
+components in the deploy settings, different upstream artifacts may be downloaded
+and are not considered to be supported. For deploying newer versions of components
+it is advised to use the master branch of OPNFV Apex as part of our continuous
+integration effort to support those components.
Including Upstream Patches with Deployment
------------------------------------------------------
@@ -83,8 +76,7 @@ Running ``opnfv-deploy``
Deploying is similar to the typical method used for baremetal and virtual
deployments with the addition of a few new arguments to the ``opnfv-deploy``
-command. In order to use an upstream deployment, please use the ``--upstream``
-argument. Also, the artifacts for each upstream deployment are only
+command. The artifacts for each upstream deployment are only
downloaded when a newer version is detected upstream. In order to explicitly
disable downloading new artifacts from upstream if previous artifacts are
already cached, please use the ``--no-fetch`` argument.
@@ -105,3 +97,5 @@ container with a bash shell. Note the containers do not use systemd, unlike
the traditional deployment model and are instead started as the first process
in the container. To restart a service, use the ``docker restart <container>``
command.
+
+.. _`OPNFV Scenarios in Apex`: architecture.html#opnfv-scenarios-in-apex
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 8a8593a0..159165de 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,11 +1,11 @@
========================================================================
-OPNFV Release Notes for the Fraser release of OPNFV Apex deployment tool
+OPNFV Release Notes for the Gambia release of OPNFV Apex deployment tool
========================================================================
Abstract
========
-This document provides the release notes for Fraser release with the Apex
+This document provides the release notes for Gambia release with the Apex
deployment toolchain.
License
@@ -17,7 +17,7 @@ All Apex and "common" entities are protected by the Apache 2.0 License
Important Notes
===============
-This is the OPNFV Fraser release that implements the deploy stage of the
+This is the OPNFV Gambia release that implements the deploy stage of the
OPNFV CI pipeline via Apex.
Apex is based on RDO's Triple-O installation tool chain.
@@ -29,14 +29,14 @@ deploy OPNFV using Apex installer.
Summary
=======
-Fraser release with the Apex deployment toolchain will establish an OPNFV
+Gambia release with the Apex deployment toolchain will establish an OPNFV
target system on a Pharos compliant lab infrastructure. The current definition
of an OPNFV target system is OpenStack Pike combined with an SDN
controller, such as OpenDaylight. The system is deployed with OpenStack High
Availability (HA) for most OpenStack services. SDN controllers are deployed
on every controller unless deploying with one the HA FD.IO scenarios. Ceph
storage is used as Cinder backend, and is the only supported storage for
-Fraser. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
+Gambia. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
node in an HA setup. Apex also supports non-HA deployments, which deploys a
single controller and n number of compute nodes. Furthermore, Apex is
capable of deploying scenarios in a bare metal or virtual fashion. Virtual
@@ -46,7 +46,7 @@ simulate the a bare metal deployment.
- Documentation is built by Jenkins
- .iso image is built by Jenkins
- .rpm packages are built by Jenkins
-- Jenkins deploys a Fraser release with the Apex deployment toolchain
+- Jenkins deploys a Gambia release with the Apex deployment toolchain
bare metal, which includes 3 control+network nodes, and 2 compute nodes.
Release Data
@@ -56,16 +56,16 @@ Release Data
| **Project** | apex |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | opnfv-6.0.0 |
+| **Repo/tag** | opnfv-7.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | 6.0.0 |
+| **Release designation** | 7.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2018-04-30 |
+| **Release date** | 2018-12-14 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Fraser release |
+| **Purpose of the delivery** | OPNFV Gambia release |
| | |
+--------------------------------------+--------------------------------------+
@@ -74,25 +74,25 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Fraser release with the Apex
-deployment toolchain. It is based on following upstream versions:
+This is the first tracked version of the Gambia release with the Apex
+deployment toolchain. It is based on following upstream versions:
-- OpenStack (Pike release)
+- OpenStack (Queens release)
-- OpenDaylight (Nitrogen/Oxygen releases)
+- OpenDaylight (Oxygen releases)
- CentOS 7
Document Version Changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of Fraser release with the Apex
+This is the first tracked version of Gambia release with the Apex
deployment toolchain.
The following documentation is provided with this release:
-- OPNFV Installation instructions for the Fraser release with the Apex
+- OPNFV Installation instructions for the Gambia release with the Apex
deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Fraser release with the Apex deployment
+- OPNFV Release Notes for the Gambia release with the Apex deployment
toolchain - ver. 1.0.0 (this document)
Deliverables
@@ -108,10 +108,10 @@ Software Deliverables
Documentation Deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Fraser release with the Apex
- deployment toolchain - ver. 6.0
-- OPNFV Release Notes for the Fraser release with the Apex deployment
- toolchain - ver. 6.0 (this document)
+- OPNFV Installation instructions for the Gambia release with the Apex
+ deployment toolchain - ver. 7.1
+- OPNFV Release Notes for the Gambia release with the Apex deployment
+ toolchain - ver. 7.1 (this document)
Known Limitations, Issues and Workarounds
=========================================
@@ -158,9 +158,6 @@ Known Issues
+--------------------------------------+--------------------------------------+
| JIRA: APEX-412 | Install failures with UEFI |
+--------------------------------------+--------------------------------------+
-| JIRA: APEX-425 | Need to tweak performance settings |
-| | virtual DPDK scenarios |
-+--------------------------------------+--------------------------------------+
Workarounds
@@ -178,10 +175,10 @@ Apex installer.
References
==========
-For more information on the OPNFV Fraser release, please see:
+For more information on the OPNFV Gambia release, please see:
-http://wiki.opnfv.org/releases/Fraser
+http://wiki.opnfv.org/releases/Gambia
:Authors: Tim Rozet (trozet@redhat.com)
:Authors: Dan Radez (dradez@redhat.com)
-:Version: 6.0
+:Version: 7.1
diff --git a/docs/release/scenarios/k8s-nosdn-nofeature-noha/k8s-nosdn-nofeature-noha.rst b/docs/release/scenarios/k8s-nosdn-nofeature-noha/k8s-nosdn-nofeature-noha.rst
index 69b9c2a6..7ff21b73 100644
--- a/docs/release/scenarios/k8s-nosdn-nofeature-noha/k8s-nosdn-nofeature-noha.rst
+++ b/docs/release/scenarios/k8s-nosdn-nofeature-noha/k8s-nosdn-nofeature-noha.rst
@@ -2,9 +2,11 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Gambia 1.0 of
+This document provides scenario level details for Gambia 1.1 of
Kubernetes deployment with no SDN controller, no extra features
-and no High Availability enabled.
+and no High Availability enabled. Note this scenario is *not* supported
+for Gambia initial release and will be supported in a later service release
+of Gambia.
============
Introduction
diff --git a/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
index bc6d39b8..5f2839c1 100644
--- a/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
+++ b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
@@ -2,14 +2,14 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Fraser 1.0 of
+This document provides scenario level details for Gambia 1.1 of
deployment with no SDN controller and no extra features enabled.
============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment without any NFV features or SDN controller enabled.
Scenario components and composition
@@ -38,6 +38,6 @@ None
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
+For more information on the OPNFV Gambia release, please visit
+http://www.opnfv.org/gambia
diff --git a/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
index 8edd29bd..e5d4c989 100644
--- a/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
+++ b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
@@ -2,14 +2,14 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Fraser 1.0 of
+This document provides scenario level details for Gambia 1.1 of
deployment with no SDN controller and no extra features enabled.
============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment without any NFV features or SDN controller enabled.
Scenario components and composition
@@ -35,6 +35,6 @@ None
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
+For more information on the OPNFV Gambia release, please visit
+http://www.opnfv.org/gambia
diff --git a/docs/release/scenarios/os-nosdn-performance-ha/index.rst b/docs/release/scenarios/os-nosdn-performance-ha/index.rst
deleted file mode 100644
index e0dbca7f..00000000
--- a/docs/release/scenarios/os-nosdn-performance-ha/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. _os-nosdn-performance-ha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-================================================
-os-nosdn-performance-ha overview and description
-================================================
-
-.. toctree::
- :numbered:
- :maxdepth: 4
-
- os-nosdn-performance-ha.rst
diff --git a/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst b/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst
deleted file mode 100644
index beed894e..00000000
--- a/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-This document provides scenario level details for Fraser 1.0 of
-deployment with no SDN controller and performance options enabled.
-
-============
-Introduction
-============
-
-This scenario is used primarily to demonstrate the performance settings and
-capabilities in Apex. This scenario will deploy a Pike OpenStack
-deployment without any NFV features or SDN controller enabled.
-
-Scenario components and composition
-===================================
-
-This scenario is composed of common OpenStack services enabled by default,
-including Nova, Neutron, Glance, Cinder, Keystone, Horizon. Optionally and
-by default, Tacker and Congress services are also enabled. Ceph is used as
-the backend storage to Cinder on all deployed nodes.
-
-All services are in HA, meaning that there are multiple cloned instances of
-each service, and they are balanced by HA Proxy using a Virtual IP Address
-per service.
-
-The main purpose of this scenario is to serve as an example to show how to
-set optional performance settings in an Apex deploy settings file.
-
-Scenario usage overview
-=======================
-
-The performance options listed in os-nosdn-performance-ha.yaml give an example
-of the different options a user can set in any deploy settings file. Some
-of these performance options are actually required for other scenarios which
-rely on DPDK. Options under the nova section like 'libvirtpin' allow a
-user to choose which core to pin nova instances to on the overcloud compute
-node. Options under 'kernel' allow a user to set kernel specific arguments
-at boot, which include options like hugepages, isolcpus, enabling iommu, etc.
-
-
-Limitations, Issues and Workarounds
-===================================
-
-* `APEX-389 <https://jira.opnfv.org/browse/APEX-389>`_:
- Compute kernel parameters are applied to all nodes
-
-References
-==========
-
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
-
diff --git a/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
index 52530cdd..111ba6f7 100644
--- a/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
+++ b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
@@ -2,14 +2,14 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Fraser 1.0 of
+This document provides scenario level details for Gambia 1.1 of
deployment with the OpenDaylight SDN controller and no extra features enabled.
============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment with OpenDaylight, and without any NFV features enabled.
Scenario components and composition
@@ -38,18 +38,12 @@ settings file.
Limitations, Issues and Workarounds
===================================
-* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
- ODL routes local subnet traffic to GW
-* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
- OpenFlow rules are populated very slowly
* `APEX-268 <https://jira.opnfv.org/browse/APEX-268>`_:
VMs with multiple floating IPs can only access via first NIC
-* `APEX-422 <https://jira.opnfv.org/browse/APEX-422>`_:
- First nova instance DHCP request fails
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
+For more information on the OPNFV Gambia release, please visit
+http://www.opnfv.org/gambia
diff --git a/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
index 932ccc85..3e26d672 100644
--- a/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
+++ b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
@@ -2,14 +2,14 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Fraser 1.0 of
+This document provides scenario level details for Gambia 1.1 of
deployment with the OpenDaylight SDN controller and no extra features enabled.
============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment with OpenDaylight, and without any NFV features enabled.
Scenario components and composition
@@ -32,18 +32,12 @@ settings file.
Limitations, Issues and Workarounds
===================================
-* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
- ODL routes local subnet traffic to GW
-* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
- OpenFlow rules are populated very slowly
* `APEX-268 <https://jira.opnfv.org/browse/APEX-268>`_:
VMs with multiple floating IPs can only access via first NIC
-* `APEX-422 <https://jira.opnfv.org/browse/APEX-422>`_:
- First nova instance DHCP request fails
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
+For more information on the OPNFV Gambia release, please visit
+http://www.opnfv.org/gambia
diff --git a/docs/release/scenarios/os-ovn-nofeature-noha/index.rst b/docs/release/scenarios/os-ovn-nofeature-ha/index.rst
index 74545044..b7e62e6c 100644
--- a/docs/release/scenarios/os-ovn-nofeature-noha/index.rst
+++ b/docs/release/scenarios/os-ovn-nofeature-ha/index.rst
@@ -1,4 +1,4 @@
-.. _os-ovn-nofeature-noha:
+.. _os-ovn-nofeature-ha:
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
@@ -12,4 +12,4 @@ os-ovn-nofeature-noha overview and description
:numbered:
:maxdepth: 4
- os-ovn-nofeature-noha.rst
+ os-ovn-nofeature-ha.rst
diff --git a/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst b/docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst
index 1b2da194..165b5f71 100644
--- a/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
+++ b/docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Fraser 1.0 of
+This document provides scenario level details for Gambia 1.1 of
deployment with the OVN SDN controller and no extra features enabled.
============
@@ -23,18 +23,17 @@ the backend storage to Cinder on all deployed nodes.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-ovn-nofeature-noha.yaml deploy
+Simply deploy this scenario by using the os-ovn-nofeature-ha.yaml deploy
settings file.
Limitations, Issues and Workarounds
===================================
-* `APEX-430 <https://jira.opnfv.org/browse/APEX-430>`_:
- OVN HA functionality is not available.
+None
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/fraser
+For more information on the OPNFV Gambia release, please visit
+http://www.opnfv.org/gambia
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..9fde2df2
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
index 96080e5f..07b82c8e 100644
--- a/lib/ansible/playbooks/configure_undercloud.yml
+++ b/lib/ansible/playbooks/configure_undercloud.yml
@@ -27,6 +27,13 @@
with_items:
- controller
- compute
+ - name: Copy container prep env file to undercloud
+ copy:
+ src: "{{ apex_temp_dir }}/containers-prepare-parameter.yaml"
+ dest: "/home/stack/containers-prepare-parameter.yaml"
+ owner: stack
+ group: stack
+ mode: 0644
- lineinfile:
path: /etc/sudoers
regexp: 'Defaults\s*requiretty'
@@ -66,51 +73,43 @@
src: /home/stack/apex-undercloud-install.log
dest: "{{ apex_temp_dir }}/"
flat: yes
+ - name: Install ceph-ansible
+ yum:
+ name: ceph-ansible
+ become: yes
- name: openstack-configs nova
shell: openstack-config --set /var/lib/config-data/nova/etc/nova/nova.conf DEFAULT {{ item }}
become: yes
with_items: "{{ nova_config }}"
- name: restart nova services
- docker_container:
- name: "{{ item }}"
- state: started
- restart: yes
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- nova_conductor
- nova_compute
- nova_api
- nova_scheduler
+ become: yes
- name: openstack-configs neutron
shell: openstack-config --set /var/lib/config-data/neutron/etc/neutron/neutron.conf DEFAULT {{ item }}
become: yes
with_items: "{{ neutron_config }}"
- name: restart neutron services
- docker_container:
- name: "{{ item }}"
- state: started
- restart: yes
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- neutron_api
- neutron_dhcp
+ become: yes
- name: openstack-configs ironic
shell: openstack-config --set /var/lib/config-data/ironic/etc/ironic/ironic.conf {{ item }}
become: yes
with_items: "{{ ironic_config }}"
- name: restart ironic services
- docker_container:
- name: "{{ item }}"
- state: started
- restart: yes
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- ironic_api
- ironic_conductor
- ironic_inspector
- # will need to modify the below to patch the container
- - lineinfile:
- path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
- regexp: '_link_ip_address_pxe_configs'
- line: ' _link_mac_pxe_configs(task)'
- when: aarch64
+ become: yes
- name: configure external network vlan ifcfg
template:
src: external_vlan_ifcfg.yml.j2
@@ -134,21 +133,12 @@
when:
- external_network.vlan == "native"
- external_network.enabled
- - not aarch64
- name: bring up eth2
shell: ip link set up dev eth2
when:
- external_network.vlan == "native"
- external_network.enabled
- - not aarch64
become: yes
- - name: assign IP to native eth0 if aarch64
- shell: ip a a {{ external_network.ip }}/{{ external_network.prefix }} dev eth0
- become: yes
- when:
- - external_network.vlan == "native"
- - external_network.enabled
- - aarch64
- name: bring up eth0 if aarch64
shell: ip link set up dev eth0
when:
@@ -182,12 +172,22 @@
jump: ACCEPT
source: "{{ nat_cidr }}"
ctstate: ESTABLISHED,RELATED
- - name: Undercloud NAT - Save iptables
- shell: service iptables save
become: yes
when:
- not nat_network_ipv6
- nat
+ - name: Allow SSH in iptables
+ iptables:
+ action: insert
+ chain: INPUT
+ rule_num: 1
+ protocol: tcp
+ destination_port: 22
+ jump: ACCEPT
+ become: yes
+ - name: Undercloud NAT - Save iptables
+ shell: service iptables save
+ become: yes
- name: fetch storage environment file
fetch:
src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
index 90c3ad1d..9a405814 100644
--- a/lib/ansible/playbooks/deploy_overcloud.yml
+++ b/lib/ansible/playbooks/deploy_overcloud.yml
@@ -12,11 +12,23 @@
- network-environment.yaml
- instackenv.json
- overcloud-full.qcow2
+ - overcloud-full.vmlinuz
+ - overcloud-full.initrd
+ - ironic-python-agent.initramfs
+ - ironic-python-agent.kernel
- deploy_command
- virtual-environment.yaml
- baremetal-environment.yaml
- kubernetes-environment.yaml
- "{{ apex_env_file }}"
+ - name: Copy ansible.cfg data to undercloud in aarch64
+ copy:
+ src: "{{ apex_temp_dir }}/ansible.cfg"
+ dest: "/home/stack/ansible.cfg"
+ owner: stack
+ group: stack
+ mode: 0644
+ when: aarch64
- name: Copy network data to undercloud
copy:
src: "{{ apex_temp_dir }}/network_data.yaml"
@@ -61,6 +73,22 @@
owner: root
group: root
become: yes
+ - name: Insert External network into Compute role
+ shell: |
+ ruby -e '
+ require "yaml"
+ data = YAML.load(File.read("/usr/share/openstack-tripleo-heat-templates/roles_data.yaml"))
+ if data[1]["networks"].is_a?(Array)
+ data[1]["networks"].push("External")
+ elsif data[1]["networks"].is_a?(Hash)
+ data[1]["networks"].merge!("External"=> { "subnet" => "external_subnet" })
+ else
+ raise "Unable to determine data to modify in roles_data.yaml"
+ end
+ data[1]["default_route_networks"] = Array.new(["External"])
+ File.open("/usr/share/openstack-tripleo-heat-templates/roles_data.yaml", "w") { |f| f.write(data.to_yaml) }
+ '
+ become: yes
- name: Upload glance images
shell: "{{ stackrc }} && openstack overcloud image upload"
become: yes
@@ -80,8 +108,25 @@
- baremetal
- control
- compute
+ - name: Re-enable ceph config for aarch64
+ replace:
+ path: "/usr/share/ceph-ansible/roles/ceph-client/tasks/create_users_keys.yml"
+ regexp: "x86_64"
+ replace: "aarch64"
+ backup: yes
+ when: aarch64
- name: Configure DNS server for ctlplane network
shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
+ - name: Update NIC templates before deployment
+ shell: >
+ /usr/share/openstack-tripleo-heat-templates/tools/merge-new-params-nic-config-script.py
+ -n /home/stack/network_data.yaml -t /home/stack/nics/{{ item }}.yaml --discard-comments True
+ --role-name Controller
+ become: yes
+ become_user: stack
+ with_items:
+ - controller
+ - compute
- block:
- name: Execute Overcloud Deployment
shell: "{{ stackrc }} && bash deploy_command"
diff --git a/lib/ansible/playbooks/patch_containers.yml b/lib/ansible/playbooks/patch_containers.yml
new file mode 100644
index 00000000..1ef05810
--- /dev/null
+++ b/lib/ansible/playbooks/patch_containers.yml
@@ -0,0 +1,13 @@
+---
+ - name: "Pull docker image to ensure it exists locally: {{ item }}"
+ shell: "{{ container_client }} pull {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:current-tripleo"
+ - name: "Find docker image user {{ item }}"
+ shell: >
+ {{ container_client }} inspect --format='{{ '{{' }}.ContainerConfig.User{{ '}}' }}'
+ {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:current-tripleo
+ register: user_result
+ - name: "Patch docker image {{ item }}"
+ shell: >
+ cd /home/stack/containers/{{ item }} && {{ container_client }} build
+ --build-arg REAL_USER={{ user_result.stdout }}
+ -t {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex .
diff --git a/lib/ansible/playbooks/post_deploy_overcloud.yml b/lib/ansible/playbooks/post_deploy_overcloud.yml
index ff9895b0..2b90ab1f 100644
--- a/lib/ansible/playbooks/post_deploy_overcloud.yml
+++ b/lib/ansible/playbooks/post_deploy_overcloud.yml
@@ -54,14 +54,12 @@
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
- - name: Restart Compute Nova Compute (Pike Workaround)
- shell: "systemctl restart openstack-nova-compute"
+ - name: Restart Compute Nova Compute (workaround for NFS)
+ shell: "{{ container_client }} restart nova_compute"
become: yes
- when:
- - "'compute' in ansible_hostname"
- - os_version == 'pike'
+ when: "'compute' in ansible_hostname or all_in_one"
- name: Update ODL container restart policy to always
- shell: "docker update --restart=always opendaylight_api"
+ shell: "{{ container_client }} update --restart=always opendaylight_api"
become: yes
when:
- sdn == 'opendaylight'
@@ -86,9 +84,12 @@
crudini --set /var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini
DEFAULT nova_metadata_host $(hiera -c /etc/puppet/hiera.yaml nova_metadata_vip)
become: yes
- when: "'controller' in ansible_hostname"
+ when:
+ - "'controller' in ansible_hostname"
+ - sdn != 'ovn'
- name: Restart metadata service
- shell: "docker restart neutron_metadata_agent"
+ shell: "{{ container_client }} restart neutron_metadata_agent"
become: yes
when:
- "'controller' in ansible_hostname"
+ - sdn != 'ovn'
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
index d03f1a1e..8cdfedfe 100644
--- a/lib/ansible/playbooks/post_deploy_undercloud.yml
+++ b/lib/ansible/playbooks/post_deploy_undercloud.yml
@@ -74,6 +74,17 @@
become: yes
become_user: stack
with_items: "{{ overcloudrc_files }}"
+ - name: Write SDN WEB and REST PORT to overcloudrc
+ lineinfile:
+ line: "export {{ item[0] }}=8081"
+ regexp: "{{ item[0] }}"
+ path: "/home/stack/{{ item[1] }}"
+ when: sdn != false
+ become: yes
+ become_user: stack
+ with_nested:
+ - [ 'SDN_CONTROLLER_WEBPORT', 'SDN_CONTROLLER_RESTCONFPORT' ]
+ - "{{ overcloudrc_files }}"
- name: Grab Heat Environment variables
shell: "{{ stackrc }} && openstack stack environment show overcloud -f json"
register: heat_env
diff --git a/lib/ansible/playbooks/prepare_overcloud_containers.yml b/lib/ansible/playbooks/prepare_overcloud_containers.yml
index 54dbe098..ebf081dc 100644
--- a/lib/ansible/playbooks/prepare_overcloud_containers.yml
+++ b/lib/ansible/playbooks/prepare_overcloud_containers.yml
@@ -20,73 +20,31 @@
when: patched_docker_services|length > 0
- name: Prepare generic docker registry image file
shell: >
- {{ stackrc }} && openstack overcloud container image prepare
- --namespace docker.io/tripleo{{ os_version }}
- --tag {{ container_tag }}
- --push-destination {{ undercloud_ip }}:8787
- -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml
- -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
- --output-images-file overcloud_containers.yml
+ sudo openstack tripleo container image prepare
+ -e /home/stack/containers-prepare-parameter.yaml
--output-env-file docker-images.yaml
- become: yes
- become_user: stack
- - name: Prepare SDN docker registry image file
- shell: >
- {{ stackrc }} && openstack overcloud container image prepare
- --namespace docker.io/tripleo{{ os_version }}
- --tag {{ container_tag }}
- --push-destination {{ undercloud_ip }}:8787
- {{ sdn_env_file }}
- --output-images-file sdn_containers.yml
- --output-env-file sdn-images.yaml
- become: yes
- become_user: stack
- when: sdn != false
- - name: Update Ceph tag for aarch64 in container env file
- lineinfile:
- path: /home/stack/overcloud_containers.yml
- regexp: '.*ceph.*'
- line: '- imagename: docker.io/ceph/daemon:master-fafda7d-luminous-centos-7-aarch64'
- when: aarch64
- - name: Update Ceph tag for aarch64 in container image file
- lineinfile:
- path: /home/stack/docker-images.yaml
- regexp: '^DockerCephDaemonImage'
- line: 'DockerCephDaemonImage: {{ undercloud_ip }}:8787/ceph/daemon/master-fafda7d-luminous-centos-7-aarch64'
- when: aarch64
- - name: Upload docker images to local registry
- shell: >
- {{ stackrc }} && openstack overcloud container image upload
- --config-file /home/stack/overcloud_containers.yml
- - name: Upload SDN docker images to local registry
- shell: >
- {{ stackrc }} && openstack overcloud container image upload
- --config-file /home/stack/sdn_containers.yml
- when: sdn != false
- name: Collect docker images in registry
uri:
url: http://{{ undercloud_ip }}:8787/v2/_catalog
body_format: json
register: response
- - name: Patch Docker images
- shell: >
- cd /home/stack/containers/{{ item }} && docker build
- -t {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex .
+ - include_tasks: patch_containers.yml
+ with_items: "{{ patched_docker_services }}"
+ loop_control:
+ loop_var: item
when:
- patched_docker_services|length > 0
- item in (response.json)['repositories']|join(" ")
- with_items: "{{ patched_docker_services }}"
- name: Push patched docker images to local registry
- shell: docker push {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex
+ shell: "{{ container_client }} push {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex"
when:
- patched_docker_services|length > 0
- item in (response.json)['repositories']|join(" ")
with_items: "{{ patched_docker_services }}"
- name: Modify Images with Apex tag
replace:
- path: "{{ item[0] }}"
- regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item[1] }}):.*"
+ path: "/home/stack/docker-images.yaml"
+ regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item }}):.*"
replace: '\1:apex'
- with_nested:
- - [ '/home/stack/sdn-images.yaml', '/home/stack/docker-images.yaml']
- - "{{ patched_docker_services }}" \ No newline at end of file
+ with_items: "{{ patched_docker_services }}"
+ become: yes
diff --git a/lib/ansible/playbooks/undercloud_aarch64.yml b/lib/ansible/playbooks/undercloud_aarch64.yml
index ddaf1b04..efcbdabd 100644
--- a/lib/ansible/playbooks/undercloud_aarch64.yml
+++ b/lib/ansible/playbooks/undercloud_aarch64.yml
@@ -3,13 +3,12 @@
tasks:
- name: aarch64 configuration
block:
- - shell: yum -y reinstall grub2-efi shim
- copy:
src: /boot/efi/EFI/centos/grubaa64.efi
- dest: /tftpboot/grubaa64.efi
+ dest: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/grubaa64.efi
remote_src: yes
- file:
- path: /tftpboot/EFI/centos
+ path: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/EFI/centos
state: directory
mode: 0755
- copy:
@@ -18,32 +17,25 @@
set timeout=5
set hidden_timeout_quiet=false
menuentry "local" {
- configfile (hd0,gpt3)/boot/grub2/grub.cfg
+ configfile /var/lib/ironic/tftpboot/$net_default_mac.conf
}
- dest: /tftpboot/EFI/centos/grub.cfg
+ dest: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/EFI/centos/grub.cfg
mode: 0644
- - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi'
- - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \$pybasedir/drivers/modules/pxe_grub_config.template'
-
- - systemd:
- name: openstack-ironic-conductor
- state: restarted
- enabled: yes
- - replace:
- path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
- regexp: 'linuxefi'
- replace: 'linux'
- - replace:
- path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
- regexp: 'initrdefi'
- replace: 'initrd'
+ - shell: 'sudo crudini --set /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf pxe pxe_bootfile_name_by_arch aarch64:grubaa64.efi'
+ - shell: 'sudo crudini --set /var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf pxe pxe_config_template_by_arch aarch64:\$pybasedir/drivers/modules/pxe_grub_config.template'
+ - shell: 'docker exec -u root ironic_conductor sed -i "s/initrdefi/initrd/g" /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template'
+ - shell: 'docker exec -u root ironic_conductor sed -i "s/linuxefi/linux/g" /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template'
- lineinfile:
- path: /tftpboot/map-file
+ path: /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file
insertafter: EOF
state: present
line: ''
- - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file"
- - shell: "echo 'r ^/EFI/centos/grub.cfg /tftpboot/EFI/centos/grub.cfg' | sudo tee --append /tftpboot/map-file"
+ - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /var/lib/ironic/tftpboot/pxelinux.cfg/\\1' | sudo tee --append /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file"
+ - shell: "echo 'r ^/EFI/centos/grub.cfg /var/lib/ironic/tftpboot/EFI/centos/grub.cfg' | sudo tee --append /var/lib/config-data/puppet-generated/ironic/var/lib/ironic/tftpboot/map-file"
+ - shell: "docker restart {{ item }}"
+ with_items:
+ - ironic_conductor
+ - ironic_pxe_tftp
- systemd:
name: xinetd
state: restarted
diff --git a/setup.cfg b/setup.cfg
index 5b394fb8..4bb9312e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -32,6 +32,7 @@ packages =
apex
data_files =
share/opnfv-apex/ =
+ build/containers-prepare-parameter.yaml
build/network-environment.yaml
build/opnfv-environment.yaml
build/upstream-environment.yaml
@@ -39,6 +40,7 @@ data_files =
build/nics-template.yaml.jinja2
build/csit-environment.yaml
build/csit-queens-environment.yaml
+ build/csit-rocky-environment.yaml
build/virtual-environment.yaml
build/baremetal-environment.yaml
build/domain.xml
diff --git a/tox.ini b/tox.ini
index 4d9ed626..f69881ed 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = docs,pep8,pylint,py35
+envlist = docs,docs-linkcheck,pep8,pylint,py35
[testenv]
usedevelop = True
@@ -24,3 +24,13 @@ commands = flake8 --exclude .build,build --ignore=F401
[testenv:py35]
basepython = python3
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck