summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--apex/build.py1
-rw-r--r--apex/build_utils.py27
-rw-r--r--apex/builders/common_builder.py90
-rw-r--r--apex/builders/exceptions.py12
-rw-r--r--apex/builders/overcloud_builder.py73
-rw-r--r--apex/builders/undercloud_builder.py7
-rw-r--r--apex/common/constants.py18
-rw-r--r--apex/common/exceptions.py4
-rw-r--r--apex/common/utils.py91
-rw-r--r--apex/deploy.py127
-rw-r--r--apex/overcloud/config.py6
-rw-r--r--apex/overcloud/deploy.py302
-rw-r--r--apex/settings/deploy_settings.py10
-rw-r--r--apex/tests/config/98faaca.diff331
-rw-r--r--apex/tests/test_apex_build_utils.py21
-rw-r--r--apex/tests/test_apex_common_builder.py69
-rw-r--r--apex/tests/test_apex_common_utils.py48
-rw-r--r--apex/tests/test_apex_deploy.py70
-rw-r--r--apex/tests/test_apex_overcloud_builder.py65
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py295
-rw-r--r--apex/tests/test_apex_undercloud.py55
-rw-r--r--apex/tests/test_apex_virtual_utils.py20
-rw-r--r--apex/undercloud/undercloud.py77
-rw-r--r--apex/utils.py107
-rwxr-xr-xapex/virtual/configure_vm.py4
-rw-r--r--apex/virtual/exceptions.py12
-rw-r--r--apex/virtual/utils.py36
-rw-r--r--build/Makefile6
-rwxr-xr-xbuild/barometer-install.sh84
-rw-r--r--build/nics-template.yaml.jinja2358
-rw-r--r--build/opnfv-environment.yaml5
-rwxr-xr-xbuild/overcloud-full.sh15
-rw-r--r--build/patches/neutron-patch-NSDriver.patch36
-rw-r--r--build/patches/puppet-ceph.patch76
-rw-r--r--build/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch80
-rw-r--r--build/patches/tacker-client-fix-symmetrical.patch31
-rw-r--r--build/rpm_specs/networking-vpp.spec13
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec28
-rwxr-xr-xbuild/undercloud.sh2
-rw-r--r--build/upstream-environment.yaml1
-rw-r--r--build/variables.sh15
-rw-r--r--ci/PR_revision.log6
-rw-r--r--config/deploy/deploy_settings.yaml14
-rw-r--r--config/deploy/os-nosdn-master_upstream-noha.yaml11
-rw-r--r--config/deploy/os-nosdn-pike_upstream-noha.yaml (renamed from config/deploy/os-nosdn-pike-noha.yaml)0
-rw-r--r--config/deploy/os-nosdn-queens_upstream-noha.yaml11
-rw-r--r--config/deploy/os-odl-l2gw-ha.yaml12
-rw-r--r--config/deploy/os-odl-l2gw-noha.yaml12
-rw-r--r--config/deploy/os-odl-master_upstream-noha.yaml (renamed from config/deploy/os-odl-pike-noha.yaml)7
-rw-r--r--config/deploy/os-odl-pike_upstream-noha.yaml12
-rw-r--r--config/deploy/os-odl-queens_upstream-noha.yaml16
-rw-r--r--config/deploy/os-odl-sriov-ha.yaml21
-rw-r--r--config/deploy/os-odl-sriov-noha.yaml21
-rw-r--r--config/network/network_settings.yaml14
-rw-r--r--config/network/network_settings_v6.yaml4
-rw-r--r--config/network/network_settings_vlans.yaml4
-rw-r--r--config/network/network_settings_vpp.yaml314
-rw-r--r--docs/contributor/upstream-overcloud-container-design.rst126
-rw-r--r--docs/release/installation/architecture.rst8
-rw-r--r--docs/release/installation/baremetal.rst8
-rw-r--r--docs/release/installation/virtual.rst6
-rw-r--r--lib/ansible/playbooks/configure_undercloud.yml52
-rw-r--r--lib/ansible/playbooks/deploy_dependencies.yml7
-rw-r--r--lib/ansible/playbooks/deploy_overcloud.yml14
-rw-r--r--lib/ansible/playbooks/fetch_overcloud_logs.yml25
-rw-r--r--lib/ansible/playbooks/fetch_overcloud_nodes.yml13
-rw-r--r--lib/ansible/playbooks/post_deploy_overcloud.yml17
-rw-r--r--lib/ansible/playbooks/post_deploy_undercloud.yml64
-rw-r--r--lib/ansible/playbooks/prepare_overcloud_containers.yml105
-rw-r--r--lib/ansible/playbooks/undercloud_aarch64.yml2
-rw-r--r--requirements.txt1
-rw-r--r--setup.cfg1
73 files changed, 2805 insertions, 855 deletions
diff --git a/.gitignore b/.gitignore
index f42d4c6..2789a24 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,8 +4,8 @@
/docs_output/
/releng/
apex.egg-info/
-/apex/tests/playbooks/*.retry
coverage.xml
nosetests.xml
-ci/*.log
.*
+*.log
+*.retry
diff --git a/apex/build.py b/apex/build.py
index 08f91ab..dff25ac 100644
--- a/apex/build.py
+++ b/apex/build.py
@@ -225,6 +225,7 @@ def main():
console.setLevel(log_level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
+ utils.install_ansible()
# Since we only support building inside of git repo this should be fine
try:
apex_root = subprocess.check_output(
diff --git a/apex/build_utils.py b/apex/build_utils.py
index c9d8472..1c413df 100644
--- a/apex/build_utils.py
+++ b/apex/build_utils.py
@@ -90,6 +90,31 @@ def clone_fork(args):
logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
+def strip_patch_sections(patch, sections=['releasenotes']):
+ """
+ Removes patch sections from a diff which contain a file path
+ :param patch: patch to strip
+ :param sections: list of keywords to use to strip out of the patch file
+ :return: stripped patch
+ """
+
+ append_line = True
+ tmp_patch = []
+ for line in patch.split("\n"):
+ if re.match('diff\s', line):
+ for section in sections:
+ if re.search(section, line):
+ logging.debug("Stripping {} from patch: {}".format(
+ section, line))
+ append_line = False
+ break
+ else:
+ append_line = True
+ if append_line:
+ tmp_patch.append(line)
+ return '\n'.join(tmp_patch)
+
+
def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
logging.info("Fetching patch for change id {}".format(change_id))
change = get_change(url, repo, branch, change_id)
@@ -100,7 +125,7 @@ def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
change_id)
patch_url = "changes/{}/revisions/{}/patch".format(change_path,
current_revision)
- return rest.get(patch_url)
+ return strip_patch_sections(rest.get(patch_url))
def get_parser():
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index fd3bcc3..05a81ef 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -10,11 +10,16 @@
# Common building utilities for undercloud and overcloud
import git
+import json
import logging
import os
+import re
+import apex.builders.overcloud_builder as oc_builder
from apex import build_utils
+from apex.builders import exceptions as exc
from apex.common import constants as con
+from apex.common import utils
from apex.virtual import utils as virt_utils
@@ -35,9 +40,38 @@ def project_to_path(project):
return "/usr/lib/python2.7/site-packages/{}".format(project)
+def project_to_docker_image(project):
+ """
+ Translates OpenStack project to OOO services that are containerized
+ :param project: name of OpenStack project
+ :return: List of OOO docker service names
+ """
+ # Fetch all docker containers in docker hub with tripleo and filter
+ # based on project
+ hub_output = utils.open_webpage(con.DOCKERHUB_OOO, timeout=10)
+ try:
+ results = json.loads(hub_output.decode())['results']
+ except Exception as e:
+ logging.error("Unable to parse docker hub output for"
+ "tripleoupstream repository")
+ logging.debug("HTTP response from dockerhub:\n{}".format(hub_output))
+ raise exc.ApexCommonBuilderException(
+ "Failed to parse docker image info from Docker Hub: {}".format(e))
+ logging.debug("Docker Hub tripleoupstream entities found: {}".format(
+ results))
+ docker_images = list()
+ for result in results:
+ if result['name'].startswith("centos-binary-{}".format(project)):
+ # add as docker image shortname (just service name)
+ docker_images.append(result['name'].replace('centos-binary-', ''))
+
+ return docker_images
+
+
def add_upstream_patches(patches, image, tmp_dir,
default_branch=os.path.join('stable',
- con.DEFAULT_OS_VERSION)):
+ con.DEFAULT_OS_VERSION),
+ uc_ip=None, docker_tag=None):
"""
Adds patches from upstream OpenStack gerrit to Undercloud for deployment
:param patches: list of patches
@@ -45,10 +79,13 @@ def add_upstream_patches(patches, image, tmp_dir,
:param tmp_dir: to store temporary patch files
:param default_branch: default branch to fetch commit (if not specified
in patch)
- :return: None
+ :param uc_ip: undercloud IP (required only for docker patches)
+ :param docker_tag: Docker Tag (required only for docker patches)
+ :return: Set of docker services patched (if applicable)
"""
virt_ops = [{con.VIRT_INSTALL: 'patch'}]
logging.debug("Evaluating upstream patches:\n{}".format(patches))
+ docker_services = set()
for patch in patches:
assert isinstance(patch, dict)
assert all(i in patch.keys() for i in ['project', 'change-id'])
@@ -60,21 +97,52 @@ def add_upstream_patches(patches, image, tmp_dir,
patch['project'], branch)
if patch_diff:
patch_file = "{}.patch".format(patch['change-id'])
- patch_file_path = os.path.join(tmp_dir, patch_file)
+ project_path = project_to_path(patch['project'])
+ # If docker tag and python we know this patch belongs on docker
+ # container for a docker service. Therefore we build the dockerfile
+ # and move the patch into the containers directory. We also assume
+ # this builder call is for overcloud, because we do not support
+ # undercloud containers
+ if docker_tag and 'python' in project_path:
+ # Projects map to multiple THT services, need to check which
+ # are supported
+ ooo_docker_services = project_to_docker_image(patch['project'])
+ else:
+ ooo_docker_services = []
+ # If we found services, then we treat the patch like it applies to
+ # docker only
+ if ooo_docker_services:
+ os_version = default_branch.replace('stable/', '')
+ for service in ooo_docker_services:
+ docker_services = docker_services.union({service})
+ docker_cmds = [
+ "WORKDIR {}".format(project_path),
+ "ADD {} {}".format(patch_file, project_path),
+ "RUN patch -p1 < {}".format(patch_file)
+ ]
+ src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+ "{}".format(uc_ip, os_version, service,
+ docker_tag)
+ oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
+ src_img_uri)
+ patch_file_path = os.path.join(tmp_dir, 'containers',
+ patch_file)
+ else:
+ patch_file_path = os.path.join(tmp_dir, patch_file)
+ virt_ops.extend([
+ {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+ project_path)},
+ {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+ project_path, patch_file)}])
+ logging.info("Adding patch {} to {}".format(patch_file,
+ image))
with open(patch_file_path, 'w') as fh:
fh.write(patch_diff)
- project_path = project_to_path(patch['project'])
- virt_ops.extend([
- {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
- project_path)},
- {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
- project_path, patch_file)}])
- logging.info("Adding patch {} to {}".format(patch_file,
- image))
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
virt_utils.virt_customize(virt_ops, image)
+ return docker_services
def add_repo(repo_url, repo_name, image, tmp_dir):
diff --git a/apex/builders/exceptions.py b/apex/builders/exceptions.py
new file mode 100644
index 0000000..b88f02b
--- /dev/null
+++ b/apex/builders/exceptions.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexCommonBuilderException(Exception):
+ pass
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
index e7b0796..a84d100 100644
--- a/apex/builders/overcloud_builder.py
+++ b/apex/builders/overcloud_builder.py
@@ -10,13 +10,17 @@
# Used to modify overcloud qcow2 image
import logging
+import os
+import tarfile
-from apex.builders import common_builder as c_builder
+import apex.builders.common_builder
from apex.common import constants as con
+from apex.common.exceptions import ApexBuildException
from apex.virtual import utils as virt_utils
-def inject_opendaylight(odl_version, image, tmp_dir):
+def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
+ os_version, docker_tag=None):
assert odl_version in con.VALID_ODL_VERSIONS
# add repo
if odl_version == 'master':
@@ -28,18 +32,77 @@ def inject_opendaylight(odl_version, image, tmp_dir):
odl_url = "https://nexus.opendaylight.org/content/repositories" \
"/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version)
repo_name = "opendaylight-{}".format(odl_pkg_version)
- c_builder.add_repo(odl_url, repo_name, image, tmp_dir)
+ apex.builders.common_builder.add_repo(odl_url, repo_name, image, tmp_dir)
# download puppet-opendaylight
- archive = c_builder.create_git_archive(
+ archive = apex.builders.common_builder.create_git_archive(
repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight',
tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/')
# install ODL, puppet-odl
virt_ops = [
- {con.VIRT_INSTALL: 'opendaylight'},
{con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
{con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
"puppet-opendaylight.tar"}
]
+ if docker_tag:
+ docker_cmds = [
+ "RUN yum remove opendaylight -y",
+ "RUN echo $'[opendaylight]\\n\\",
+ "baseurl={}\\n\\".format(odl_url),
+ "gpgcheck=0\\n\\",
+ "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+ "RUN yum -y install opendaylight"
+ ]
+ src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+ "{}".format(uc_ip, os_version, 'opendaylight',
+ docker_tag)
+ build_dockerfile('opendaylight', tmp_dir, docker_cmds, src_img_uri)
+ else:
+ virt_ops.append({con.VIRT_INSTALL: 'opendaylight'})
virt_utils.virt_customize(virt_ops, image)
logging.info("OpenDaylight injected into {}".format(image))
+
+
+def build_dockerfile(service, tmp_dir, docker_cmds, src_image_uri):
+ """
+ Builds docker file per service and stores it in a
+ tmp_dir/containers/<service> directory. If the Dockerfile already exists,
+ simply append the docker cmds to it.
+ :param service: name of sub-directory to store Dockerfile in
+ :param tmp_dir: Temporary directory to store the container's dockerfile in
+ :param docker_cmds: List of commands to insert into the dockerfile
+ :param src_image_uri: Docker URI format for where the source image exists
+ :return: None
+ """
+ logging.debug("Building Dockerfile for {} with docker_cmds: {}".format(
+ service, docker_cmds))
+ c_dir = os.path.join(tmp_dir, 'containers')
+ service_dir = os.path.join(c_dir, service)
+ if not os.path.isdir(service_dir):
+ os.makedirs(service_dir, exist_ok=True)
+ from_cmd = "FROM {}\n".format(src_image_uri)
+ service_file = os.path.join(service_dir, 'Dockerfile')
+ assert isinstance(docker_cmds, list)
+ if os.path.isfile(service_file):
+ append_cmds = True
+ else:
+ append_cmds = False
+ with open(service_file, "a+") as fh:
+ if not append_cmds:
+ fh.write(from_cmd)
+ fh.write('\n'.join(docker_cmds))
+
+
+def archive_docker_patches(tmp_dir):
+ """
+ Archives Overcloud docker patches into a tar file for upload to Undercloud
+ :param tmp_dir: temporary directory where containers folder is stored
+ :return: None
+ """
+ container_path = os.path.join(tmp_dir, 'containers')
+ if not os.path.isdir(container_path):
+ raise ApexBuildException("Docker directory for patches not found: "
+ "{}".format(container_path))
+ archive_file = os.path.join(tmp_dir, 'docker_patches.tar.gz')
+ with tarfile.open(archive_file, "w:gz") as tar:
+ tar.add(container_path, arcname=os.path.basename(container_path))
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index baba8a5..268bad7 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -20,6 +20,11 @@ def add_upstream_packages(image):
:return: None
"""
virt_ops = list()
+ # FIXME(trozet): we have to lock to this beta ceph ansible package because
+ # the current RPM versioning is wrong and an older package has a higher
+ # version than this package. We should change to just 'ceph-ansible'
+ # once the package/repo has been fixed. Note: luminous is fine here
+ # because Apex will only support container deployment for Queens and later
pkgs = [
'openstack-utils',
'ceph-common',
@@ -29,6 +34,8 @@ def add_upstream_packages(image):
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
+ 'http://mirror.centos.org/centos/7/storage/x86_64/ceph-luminous' +
+ '/ceph-ansible-3.1.0-0.beta3.1.el7.noarch.rpm'
]
for pkg in pkgs:
diff --git a/apex/common/constants.py b/apex/common/constants.py
index a2b9a63..4f72b08 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -16,7 +16,7 @@ STORAGE_NETWORK = 'storage'
API_NETWORK = 'api'
CONTROLLER = 'controller'
COMPUTE = 'compute'
-
+ANSIBLE_PATH = 'ansible/playbooks'
OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
STORAGE_NETWORK, API_NETWORK]
DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
@@ -39,10 +39,14 @@ VIRT_PW = '--root-password'
THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
+THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services-docker')
DEFAULT_OS_VERSION = 'pike'
DEFAULT_ODL_VERSION = 'nitrogen'
VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+CEPH_VERSION_MAP = {'pike': 'jewel',
+ 'queens': 'luminous',
+ 'master': 'luminous'}
PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
'/puppet-opendaylight'
DEBUG_OVERCLOUD_PW = 'opnfvapex'
@@ -50,3 +54,15 @@ NET_ENV_FILE = 'network-environment.yaml'
DEPLOY_TIMEOUT = 90
UPSTREAM_RDO = 'https://images.rdoproject.org/pike/delorean/current-tripleo/'
OPENSTACK_GERRIT = 'https://review.openstack.org'
+
+DOCKER_TAG = 'current-tripleo-rdo'
+# Maps regular service files to docker versions
+# None value means mapping is same as key
+VALID_DOCKER_SERVICES = {
+ 'neutron-opendaylight.yaml': None,
+ 'neutron-opendaylight-dpdk.yaml': None,
+ 'neutron-opendaylight-sriov.yaml': None,
+ 'neutron-ml2-ovn.yaml': 'neutron-ovn.yaml'
+}
+DOCKERHUB_OOO = ('https://registry.hub.docker.com/v2/repositories'
+ '/tripleoupstream/?page_size=1024')
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
index 54d9983..a4d390a 100644
--- a/apex/common/exceptions.py
+++ b/apex/common/exceptions.py
@@ -18,3 +18,7 @@ class JumpHostNetworkException(Exception):
class ApexCleanException(Exception):
pass
+
+
+class ApexBuildException(Exception):
+ pass
diff --git a/apex/common/utils.py b/apex/common/utils.py
index 13250a4..cb7cbe1 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -8,10 +8,12 @@
##############################################################################
import datetime
+import distro
import json
import logging
import os
import pprint
+import socket
import subprocess
import tarfile
import time
@@ -20,6 +22,8 @@ import urllib.request
import urllib.parse
import yaml
+from apex.common import exceptions as exc
+
def str2bool(var):
if isinstance(var, bool):
@@ -137,30 +141,45 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
raise Exception(e)
-def fetch_upstream_and_unpack(dest, url, targets):
+def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
"""
Fetches targets from a url destination and downloads them if they are
newer. Also unpacks tar files in dest dir.
:param dest: Directory to download and unpack files to
:param url: URL where target files are located
:param targets: List of target files to download
+ :param fetch: Whether or not to fetch latest from internet (boolean)
:return: None
"""
os.makedirs(dest, exist_ok=True)
assert isinstance(targets, list)
for target in targets:
- download_target = True
target_url = urllib.parse.urljoin(url, target)
target_dest = os.path.join(dest, target)
- logging.debug("Fetching and comparing upstream target: \n{}".format(
- target_url))
- try:
- u = urllib.request.urlopen(target_url)
- except urllib.error.URLError as e:
- logging.error("Failed to fetch target url. Error: {}".format(
- e.reason))
- raise
- if os.path.isfile(target_dest):
+ target_exists = os.path.isfile(target_dest)
+ if fetch:
+ download_target = True
+ elif not target_exists:
+ logging.warning("no-fetch requested but target: {} is not "
+ "cached, will download".format(target_dest))
+ download_target = True
+ else:
+ logging.info("no-fetch requested and previous cache exists for "
+ "target: {}. Will skip download".format(target_dest))
+ download_target = False
+
+ if download_target:
+ logging.debug("Fetching and comparing upstream"
+ " target: \n{}".format(target_url))
+ try:
+ u = urllib.request.urlopen(target_url)
+ except urllib.error.URLError as e:
+ logging.error("Failed to fetch target url. Error: {}".format(
+ e.reason))
+ raise
+ # Check if previous file and fetch we need to compare files to
+ # determine if download is necessary
+ if target_exists and download_target:
logging.debug("Previous file found: {}".format(target_dest))
metadata = u.info()
headers = metadata.items()
@@ -184,6 +203,7 @@ def fetch_upstream_and_unpack(dest, url, targets):
download_target = False
else:
logging.debug('Unable to find last modified url date')
+
if download_target:
urllib.request.urlretrieve(target_url, filename=target_dest)
logging.info("Target downloaded: {}".format(target))
@@ -192,3 +212,52 @@ def fetch_upstream_and_unpack(dest, url, targets):
tar = tarfile.open(target_dest)
tar.extractall(path=dest)
tar.close()
+
+
+def install_ansible():
+ # we only install for CentOS/Fedora for now
+ dist = distro.id()
+ if 'centos' in dist:
+ pkg_mgr = 'yum'
+ elif 'fedora' in dist:
+ pkg_mgr = 'dnf'
+ else:
+ return
+
+ # yum python module only exists for 2.x, so use subprocess
+ try:
+ subprocess.check_call([pkg_mgr, '-y', 'install', 'ansible'])
+ except subprocess.CalledProcessError:
+ logging.warning('Unable to install Ansible')
+
+
+def internet_connectivity():
+ try:
+ urllib.request.urlopen('http://opnfv.org', timeout=3)
+ return True
+ except (urllib.request.URLError, socket.timeout):
+ logging.debug('No internet connectivity detected')
+ return False
+
+
+def open_webpage(url, timeout=5):
+ try:
+ response = urllib.request.urlopen(url, timeout=timeout)
+ return response.read()
+ except (urllib.request.URLError, socket.timeout):
+ logging.error("Unable to open URL: {}".format(url))
+ raise
+
+
+def edit_tht_env(env_file, section, settings):
+ assert isinstance(settings, dict)
+ with open(env_file) as fh:
+ data = yaml.safe_load(fh)
+
+ if section not in data.keys():
+ data[section] = {}
+ for setting, value in settings.items():
+ data[section][setting] = value
+ with open(env_file, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+ logging.debug("Data written to env file {}:\n{}".format(env_file, data))
diff --git a/apex/deploy.py b/apex/deploy.py
index 5485d15..bc4d078 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -21,13 +21,13 @@ import tempfile
import apex.virtual.configure_vm as vm_lib
import apex.virtual.utils as virt_utils
+import apex.builders.common_builder as c_builder
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.undercloud_builder as uc_builder
from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
from apex import NetworkSettings
-from apex.builders import common_builder as c_builder
-from apex.builders import overcloud_builder as oc_builder
-from apex.builders import undercloud_builder as uc_builder
from apex.common import utils
from apex.common import constants
from apex.common import parsers
@@ -39,7 +39,6 @@ from apex.overcloud import config as oc_cfg
from apex.overcloud import deploy as oc_deploy
APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
-ANSIBLE_PATH = 'ansible/playbooks'
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
@@ -181,6 +180,10 @@ def create_deploy_parser():
default=False,
help='Force deployment to use upstream '
'artifacts')
+ deploy_parser.add_argument('--no-fetch', action='store_true',
+ default=False,
+ help='Ignore fetching latest upstream and '
+ 'use what is in cache')
return deploy_parser
@@ -234,6 +237,7 @@ def main():
console.setLevel(log_level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
+ utils.install_ansible()
validate_deploy_args(args)
# Parse all settings
deploy_settings = DeploySettings(args.deploy_settings_file)
@@ -248,7 +252,11 @@ def main():
os_version=os_version)
net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
utils.dump_yaml(dict(net_env), net_env_target)
+
+ # get global deploy params
ha_enabled = deploy_settings['global_params']['ha_enabled']
+ introspect = deploy_settings['global_params'].get('introspect', True)
+
if args.virtual:
if args.virt_compute_ram is None:
compute_ram = args.virt_default_ram
@@ -300,7 +308,7 @@ def main():
'virsh_enabled_networks': net_settings.enabled_network_list
}
utils.run_ansible(ansible_args,
- os.path.join(args.lib_dir, ANSIBLE_PATH,
+ os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_dependencies.yml'))
uc_external = False
if 'external' in net_settings.enabled_network_list:
@@ -347,15 +355,9 @@ def main():
constants.DEFAULT_OS_VERSION, os_version)
upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
- upstream_targets)
+ upstream_targets,
+ fetch=not args.no_fetch)
sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
- if ds_opts['sdn_controller'] == 'opendaylight':
- logging.info("Preparing upstream image with OpenDaylight")
- oc_builder.inject_opendaylight(
- odl_version=ds_opts['odl_version'],
- image=sdn_image,
- tmp_dir=APEX_TEMP_DIR
- )
# copy undercloud so we don't taint upstream fetch
uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
@@ -367,28 +369,36 @@ def main():
patches = deploy_settings['global_params']['patches']
c_builder.add_upstream_patches(patches['undercloud'], uc_image,
APEX_TEMP_DIR, branch)
- logging.info('Adding patches to overcloud')
- c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
- APEX_TEMP_DIR, branch)
else:
sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
uc_image = 'undercloud.qcow2'
+ # patches are ignored in non-upstream deployments
+ patches = {'overcloud': [], 'undercloud': []}
+ # Create/Start Undercloud VM
undercloud = uc_lib.Undercloud(args.image_dir,
args.deploy_dir,
root_pw=root_pw,
external_network=uc_external,
- image_name=os.path.basename(uc_image))
+ image_name=os.path.basename(uc_image),
+ os_version=os_version)
undercloud.start()
+ undercloud_admin_ip = net_settings['networks'][
+ constants.ADMIN_NETWORK]['installer_vm']['ip']
+
+ if upstream and ds_opts['containers']:
+ tag = constants.DOCKER_TAG
+ else:
+ tag = None
# Generate nic templates
for role in 'compute', 'controller':
oc_cfg.create_nic_template(net_settings, deploy_settings, role,
args.deploy_dir, APEX_TEMP_DIR)
# Install Undercloud
- undercloud.configure(net_settings,
- os.path.join(args.lib_dir, ANSIBLE_PATH,
+ undercloud.configure(net_settings, deploy_settings,
+ os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'configure_undercloud.yml'),
- APEX_TEMP_DIR)
+ APEX_TEMP_DIR, virtual_oc=args.virtual)
# Prepare overcloud-full.qcow2
logging.info("Preparing Overcloud for deployment...")
@@ -404,23 +414,58 @@ def main():
args.env_file = 'upstream-environment.yaml'
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
if not upstream:
+ # TODO(trozet): Invoke with containers after Fraser migration
oc_deploy.prep_env(deploy_settings, net_settings, inventory,
opnfv_env, net_env_target, APEX_TEMP_DIR)
- oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
- root_pw=root_pw)
else:
- shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
- 'overcloud-full.qcow2'))
shutil.copyfile(
opnfv_env,
os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
)
+ patched_containers = oc_deploy.prep_image(
+ deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
+ root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'],
+ upstream=upstream)
oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
APEX_TEMP_DIR, args.virtual,
os.path.basename(opnfv_env),
net_data=net_data)
- deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ # Prepare undercloud with containers
+ docker_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'prepare_overcloud_containers.yml')
+ if ds_opts['containers']:
+ ceph_version = constants.CEPH_VERSION_MAP[ds_opts['os_version']]
+ ceph_docker_image = "ceph/daemon:tag-build-master-" \
+ "{}-centos-7".format(ceph_version)
+ logging.info("Preparing Undercloud with Docker containers")
+ if patched_containers:
+ oc_builder.archive_docker_patches(APEX_TEMP_DIR)
+ container_vars = dict()
+ container_vars['apex_temp_dir'] = APEX_TEMP_DIR
+ container_vars['patched_docker_services'] = list(
+ patched_containers)
+ container_vars['container_tag'] = constants.DOCKER_TAG
+ container_vars['stackrc'] = 'source /home/stack/stackrc'
+ container_vars['upstream'] = upstream
+ container_vars['sdn'] = ds_opts['sdn_controller']
+ container_vars['undercloud_ip'] = undercloud_admin_ip
+ container_vars['os_version'] = os_version
+ container_vars['ceph_docker_image'] = ceph_docker_image
+ container_vars['sdn_env_file'] = \
+ oc_deploy.get_docker_sdn_file(ds_opts)
+ try:
+ utils.run_ansible(container_vars, docker_playbook,
+ host=undercloud.ip, user='stack',
+ tmp_dir=APEX_TEMP_DIR)
+ logging.info("Container preparation complete")
+ except Exception:
+ logging.error("Unable to complete container prep on "
+ "Undercloud")
+ os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ raise
+
+ deploy_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_overcloud.yml')
virt_env = 'virtual-environment.yaml'
bm_env = 'baremetal-environment.yaml'
@@ -434,6 +479,9 @@ def main():
deploy_vars['virtual'] = args.virtual
deploy_vars['debug'] = args.debug
deploy_vars['aarch64'] = platform.machine() == 'aarch64'
+ deploy_vars['introspect'] = not (args.virtual or
+ deploy_vars['aarch64'] or
+ not introspect)
deploy_vars['dns_server_args'] = ''
deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
@@ -441,6 +489,8 @@ def main():
deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
deploy_vars['upstream'] = upstream
deploy_vars['os_version'] = os_version
+ deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
+ deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
@@ -465,7 +515,7 @@ def main():
'UserKnownHostsFile=/dev/null -o ' \
'LogLevel=error'
deploy_vars['external_network_cmds'] = \
- oc_deploy.external_network_cmds(net_settings)
+ oc_deploy.external_network_cmds(net_settings, deploy_settings)
# TODO(trozet): just parse all ds_opts as deploy vars one time
deploy_vars['gluon'] = ds_opts['gluon']
deploy_vars['sdn'] = ds_opts['sdn_controller']
@@ -483,20 +533,15 @@ def main():
else:
deploy_vars['congress'] = False
deploy_vars['calipso'] = ds_opts.get('calipso', False)
- deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
- 'installer_vm']['ip']
- # TODO(trozet): this is probably redundant with getting external
- # network info from undercloud.py
- if 'external' in net_settings.enabled_network_list:
- ext_cidr = net_settings['networks']['external'][0]['cidr']
- else:
- ext_cidr = net_settings['networks']['admin']['cidr']
- deploy_vars['external_cidr'] = str(ext_cidr)
- if ext_cidr.version == 6:
- deploy_vars['external_network_ipv6'] = True
+ deploy_vars['calipso_ip'] = undercloud_admin_ip
+ # overcloudrc.v3 removed and set as default in queens and later
+ if os_version == 'pike':
+ deploy_vars['overcloudrc_files'] = ['overcloudrc',
+ 'overcloudrc.v3']
else:
- deploy_vars['external_network_ipv6'] = False
- post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ deploy_vars['overcloudrc_files'] = ['overcloudrc']
+
+ post_undercloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'post_deploy_undercloud.yml')
logging.info("Executing post deploy configuration undercloud playbook")
try:
@@ -511,9 +556,11 @@ def main():
# TODO(trozet): just parse all ds_opts as deploy vars one time
deploy_vars['sfc'] = ds_opts['sfc']
deploy_vars['vpn'] = ds_opts['vpn']
+ deploy_vars['l2gw'] = ds_opts.get('l2gw')
+ deploy_vars['sriov'] = ds_opts.get('sriov')
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
- post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
+ post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'post_deploy_overcloud.yml')
# Run per overcloud node
for node, ip in deploy_vars['overcloud_nodes'].items():
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
index a7f7d84..e8d8fbb 100644
--- a/apex/overcloud/config.py
+++ b/apex/overcloud/config.py
@@ -52,6 +52,9 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
if ds.get('dvr') is True:
nets['admin']['nic_mapping'][role]['phys_type'] = \
'linux_bridge'
+ else:
+ nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+ 'linux_bridge'
elif ds['dataplane'] == 'ovs_dpdk':
ovs_dpdk_br = 'br-phy'
if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
@@ -66,9 +69,6 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
nets['tenant']['nic_mapping'][role]['interface-options'] =\
ds['performance'][role.title()]['vpp']['interface-options']
- if role == 'controller' and ds.get('sfc', None):
- ext_net = 'interface'
-
template_output = template.render(
nets=nets,
role=role,
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 809afc1..03e5652 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -11,14 +11,18 @@ import base64
import fileinput
import logging
import os
+import platform
import shutil
import uuid
import struct
import time
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.common_builder as c_builder
from apex.common import constants as con
from apex.common.exceptions import ApexDeployException
from apex.common import parsers
+from apex.common import utils
from apex.virtual import utils as virt_utils
from cryptography.hazmat.primitives import serialization as \
crypto_serialization
@@ -37,6 +41,8 @@ SDN_FILE_MAP = {
'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
+ 'l2gw': 'neutron-l2gw-opendaylight.yaml',
+ 'sriov': 'neutron-opendaylight-sriov.yaml',
'default': 'neutron-opendaylight.yaml',
},
'onos': {
@@ -69,19 +75,57 @@ OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
".noarch.rpm"
+LOSETUP_SERVICE = """[Unit]
+Description=Setup loop devices
+Before=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
+ExecStop=/sbin/losetup -d /dev/loop3
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
+"""
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
+ """
+ Builds a list of SDN environment files to be used in the deploy cmd.
+
+ This function recursively searches an sdn_map. First the sdn controller is
+ matched and then the function looks for enabled features for that
+ controller to determine which environment files should be used. By
+ default the feature will be checked if set to true in deploy settings to be
+ added to the list. If a feature does not have a boolean value, then the
+ key and value pair to compare with are checked as a tuple (k,v).
+
+ :param ds: deploy settings
+ :param sdn_map: SDN map to recursively search
+ :param env_list: recursive var to hold previously found env_list
+ :return: A list of env files
+ """
if env_list is None:
env_list = list()
for k, v in sdn_map.items():
- if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+ if ds['sdn_controller'] == k or (k in ds and ds[k]):
if isinstance(v, dict):
+ # Append default SDN env file first
+ # The assumption is that feature-enabled SDN env files
+ # override and do not conflict with previously set default
+ # settings
+ if ds['sdn_controller'] == k and 'default' in v:
+ env_list.append(os.path.join(con.THT_ENV_DIR,
+ v['default']))
env_list.extend(build_sdn_env_list(ds, v))
+ # check if the value is not a boolean
+ elif isinstance(v, tuple):
+ if ds[k] == v[0]:
+ env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
else:
env_list.append(os.path.join(con.THT_ENV_DIR, v))
- elif isinstance(v, tuple):
- if ds[k] == v[0]:
- env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
if len(env_list) == 0:
try:
env_list.append(os.path.join(
@@ -92,6 +136,25 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
return env_list
+def get_docker_sdn_file(ds_opts):
+ """
+ Returns docker env file for detected SDN
+ :param ds_opts: deploy options
+ :return: docker THT env file for an SDN
+ """
+ # FIXME(trozet): We assume right now there is only one docker SDN file
+ docker_services = con.VALID_DOCKER_SERVICES
+ sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+ for sdn_file in sdn_env_list:
+ sdn_base = os.path.basename(sdn_file)
+ if sdn_base in docker_services:
+ if docker_services[sdn_base] is not None:
+ return os.path.join(con.THT_DOCKER_ENV_DIR,
+ docker_services[sdn_base])
+ else:
+ return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
+
+
def create_deploy_cmd(ds, ns, inv, tmp_dir,
virtual, env_file='opnfv-environment.yaml',
net_data=False):
@@ -99,22 +162,46 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
logging.info("Creating deployment command")
deploy_options = ['network-environment.yaml']
+ ds_opts = ds['deploy_options']
+
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker.yaml'))
+
+ if ds['global_params']['ha_enabled']:
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker-ha.yaml'))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'puppet-pacemaker.yaml'))
+
if env_file:
deploy_options.append(env_file)
- ds_opts = ds['deploy_options']
- deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+ if ds_opts['containers']:
+ deploy_options.append('docker-images.yaml')
+ sdn_docker_file = get_docker_sdn_file(ds_opts)
+ if sdn_docker_file:
+ deploy_options.append(sdn_docker_file)
+ deploy_options.append('sdn-images.yaml')
+ else:
+ deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for k, v in OTHER_FILE_MAP.items():
if k in ds_opts and ds_opts[k]:
- deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
+ "{}.yaml".format(k)))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
if ds_opts['ceph']:
- prep_storage_env(ds, tmp_dir)
+ prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
'storage-environment.yaml'))
- if ds['global_params']['ha_enabled']:
- deploy_options.append(os.path.join(con.THT_ENV_DIR,
- 'puppet-pacemaker.yaml'))
+ if ds_opts['sriov']:
+ prep_sriov_env(ds, tmp_dir)
if virtual:
deploy_options.append('virtual-environment.yaml')
@@ -128,6 +215,14 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
raise ApexDeployException("Invalid number of control or computes")
elif num_control > 1 and not ds['global_params']['ha_enabled']:
num_control = 1
+ if platform.machine() == 'aarch64':
+ # aarch64 deploys were not completing in the default 90 mins.
+ # Not sure if this is related to the hardware the OOO support
+ # was developed on or the virtualization support in CentOS
+ # Either way it will probably get better over time as the aarch
+ # support matures in CentOS and deploy time should be tested in
+ # the future so this multiplier can be removed.
+ con.DEPLOY_TIMEOUT *= 2
cmd = "openstack overcloud deploy --templates --timeout {} " \
.format(con.DEPLOY_TIMEOUT)
# build cmd env args
@@ -153,13 +248,18 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
return cmd
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
+ patches=None, upstream=False):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
+ :param ns: network settings
:param img: sdn image
:param tmp_dir: dir to store modified sdn image
:param root_pw: password to configure for overcloud image
+ :param docker_tag: Docker image tag for RDO version (default None)
+ :param patches: List of patches to apply to overcloud image
+ :param upstream: (boolean) Indicates if upstream deployment or not
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
@@ -172,6 +272,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
ds_opts = ds['deploy_options']
virt_cmds = list()
sdn = ds_opts['sdn_controller']
+ patched_containers = set()
# we need this due to rhbz #1436021
# fixed in systemd-219-37.el7
if sdn is not False:
@@ -186,6 +287,18 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
".service"
}])
+ if ns.get('http_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'http_proxy={}' >> /etc/environment".format(
+ ns['http_proxy'])})
+
+ if ns.get('https_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'https_proxy={}' >> /etc/environment".format(
+ ns['https_proxy'])})
+
if ds_opts['vpn']:
virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
virt_cmds.append({
@@ -248,7 +361,13 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
"/root/nosdn_vpp_rpms/*.rpm"}
])
- if sdn == 'opendaylight':
+ tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+ shutil.copyfile(img, tmp_oc_image)
+ logging.debug("Temporary overcloud image stored as: {}".format(
+ tmp_oc_image))
+
+ # TODO (trozet): remove this if block after Fraser
+ if sdn == 'opendaylight' and not upstream:
if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
virt_cmds.extend([
{con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
@@ -275,21 +394,49 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
{con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
ODL_NETVIRT_VPP_RPM)}
])
-
- if sdn == 'ovn':
+ elif sdn == 'opendaylight':
+ undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+ 'installer_vm']['ip']
+ oc_builder.inject_opendaylight(
+ odl_version=ds_opts['odl_version'],
+ image=tmp_oc_image,
+ tmp_dir=tmp_dir,
+ uc_ip=undercloud_admin_ip,
+ os_version=ds_opts['os_version'],
+ docker_tag=docker_tag,
+ )
+ if docker_tag:
+ patched_containers = patched_containers.union({'opendaylight'})
+
+ if patches:
+ if ds_opts['os_version'] == 'master':
+ branch = ds_opts['os_version']
+ else:
+ branch = "stable/{}".format(ds_opts['os_version'])
+ logging.info('Adding patches to overcloud')
+ patched_containers = patched_containers.union(
+ c_builder.add_upstream_patches(patches,
+ tmp_oc_image, tmp_dir,
+ branch,
+ uc_ip=undercloud_admin_ip,
+ docker_tag=docker_tag))
+ # if containers with ceph, and no ceph device we need to use a
+ # persistent loop device for Ceph OSDs
+ if docker_tag and not ds_opts.get('ceph_device', None):
+ tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
+ with open(tmp_losetup, 'w') as fh:
+ fh.write(LOSETUP_SERVICE)
virt_cmds.extend([
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
- "*openvswitch*"},
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
- "*openvswitch*"}
+ {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
+ },
+ {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
+ {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
+ {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
+ {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
-
- tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
- shutil.copyfile(img, tmp_oc_image)
- logging.debug("Temporary overcloud image stored as: {}".format(
- tmp_oc_image))
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
+ return patched_containers
def make_ssh_key():
@@ -400,8 +547,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ns['domain_name']))
elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
if 'NeutronVPPAgentPhysnets' in line:
- output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
- format(tenant_nic['Controller']))
+ # VPP interface tap0 will be used for external network
+ # connectivity.
+ output_line = (" NeutronVPPAgentPhysnets: "
+ "'datacentre:{},external:tap0'"
+ .format(tenant_nic['Controller']))
elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
'dvr') is True:
if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
@@ -413,6 +563,13 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
+ # SRIOV networks are VLAN based provider networks. In order to simplify
+ # the deployment, nfv_sriov will be the default physnet. VLANs are not
+ # needed in advance, and the user will have to create the network
+ # specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if 'NeutronNetworkVLANRanges' in line:
+ output_line = ("{},nfv_sriov'".format(line[:-1]))
if perf:
for role in 'NovaCompute', 'Controller':
@@ -484,11 +641,13 @@ def generate_ceph_key():
return base64.b64encode(header + key)
-def prep_storage_env(ds, tmp_dir):
+def prep_storage_env(ds, ns, virtual, tmp_dir):
"""
Creates storage environment file for deployment. Source file is copied by
undercloud playbook to host.
:param ds:
+ :param ns:
+ :param virtual:
:param tmp_dir:
:return:
"""
@@ -510,9 +669,45 @@ def prep_storage_env(ds, tmp_dir):
elif 'CephAdminKey' in line:
print(" CephAdminKey: {}".format(generate_ceph_key().decode(
'utf-8')))
+ elif 'CephClientKey' in line:
+ print(" CephClientKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
else:
print(line)
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+
+ if ds_opts['containers']:
+ undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+ 'installer_vm']['ip']
+ ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
+ docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
+ "{}-centos-7".format(undercloud_admin_ip,
+ ceph_version)
+ ceph_params = {
+ 'DockerCephDaemonImage': docker_image,
+ }
+ if not ds['global_params']['ha_enabled']:
+ ceph_params['CephPoolDefaultSize'] = 1
+
+ if virtual:
+ ceph_params['CephAnsibleExtraConfig'] = {
+ 'centos_package_dependencies': [],
+ 'ceph_osd_docker_memory_limit': '1g',
+ 'ceph_mds_docker_memory_limit': '1g',
+ }
+ ceph_params['CephPoolDefaultPgNum'] = 32
+ if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+ ceph_device = ds_opts['ceph_device']
+ else:
+ # TODO(trozet): make this DS default after Fraser
+ ceph_device = '/dev/loop3'
+
+ ceph_params['CephAnsibleDisksConfig'] = {
+ 'devices': [ceph_device],
+ 'journal_size': 512,
+ 'osd_scenario': 'collocated'
+ }
+ utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
@@ -520,12 +715,58 @@ def prep_storage_env(ds, tmp_dir):
))
-def external_network_cmds(ns):
+def prep_sriov_env(ds, tmp_dir):
+ """
+ Creates SRIOV environment file for deployment. Source file is copied by
+ undercloud playbook to host.
+ :param ds:
+ :param tmp_dir:
+ :return:
+ """
+ ds_opts = ds['deploy_options']
+ sriov_iface = ds_opts['sriov']
+ sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
+ if not os.path.isfile(sriov_file):
+ logging.error("sriov-environment file is not in tmp directory: {}. "
+ "Check if file was copied from "
+ "undercloud".format(tmp_dir))
+ raise ApexDeployException("sriov-environment file not copied from "
+ "undercloud")
+ # TODO(rnoriega): Instead of line editing, refactor this code to load
+ # yaml file into a dict, edit it and write the file back.
+ for line in fileinput.input(sriov_file, inplace=True):
+ line = line.strip('\n')
+ if 'NovaSchedulerDefaultFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NovaSchedulerAvailableFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NeutronPhysicalDevMappings' in line:
+ print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
+ .format(sriov_iface))
+ elif 'NeutronSriovNumVFs' in line:
+ print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
+ elif 'NovaPCIPassthrough' in line:
+ print(" NovaPCIPassthrough:")
+ elif 'devname' in line:
+ print(" - devname: \"{}\"".format(sriov_iface))
+ elif 'physical_network' in line:
+ print(" physical_network: \"nfv_sriov\"")
+ else:
+ print(line)
+
+
+def external_network_cmds(ns, ds):
"""
Generates external network openstack commands
:param ns: network settings
+ :param ds: deploy settings
:return: list of commands to configure external network
"""
+ ds_opts = ds['deploy_options']
+ external_physnet = 'datacentre'
+ if ds_opts['dataplane'] == 'fdio' and \
+ ds_opts['sdn_controller'] != 'opendaylight':
+ external_physnet = 'external'
if 'external' in ns.enabled_network_list:
net_config = ns['networks']['external'][0]
external = True
@@ -546,7 +787,8 @@ def external_network_cmds(ns):
'compute']['vlan'])
cmds.append("openstack network create external --project service "
"--external --provider-network-type {} "
- "--provider-physical-network datacentre".format(ext_type))
+ "--provider-physical-network {}"
+ .format(ext_type, external_physnet))
# create subnet command
cidr = net_config['cidr']
subnet_cmd = "openstack subnet create external-subnet --project " \
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index f2012b2..4f887ed 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -23,7 +23,10 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
'ceph',
'gluon',
'rt_kvm',
- 'os_version']
+ 'os_version',
+ 'l2gw',
+ 'sriov',
+ 'containers']
OPT_DEPLOY_SETTINGS = ['performance',
'vsperf',
@@ -115,6 +118,11 @@ class DeploySettings(dict):
raise DeploySettingsException(
"Invalid ODL version: {}".format(self[deploy_options][
'odl_version']))
+ elif req_set == 'sriov':
+ if self['deploy_options'][req_set] is True:
+ raise DeploySettingsException(
+ "Invalid SRIOV interface name: {}".format(
+ self['deploy_options']['sriov']))
if self['deploy_options']['odl_version'] == 'oxygen':
self['deploy_options']['odl_version'] = 'master'
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
new file mode 100644
index 0000000..68a66fb
--- /dev/null
+++ b/apex/tests/config/98faaca.diff
@@ -0,0 +1,331 @@
+From 98faacad44e39a456d9fe1a1d21f5a65e8de4fc1 Mon Sep 17 00:00:00 2001
+From: Janki Chhatbar <jchhatba@redhat.com>
+Date: Tue, 23 Jan 2018 22:43:49 +0530
+Subject: [PATCH] Minor update steps for ODL
+
+Updating OpenStack (within release) means updating ODL from v1 to v1.1.
+This is done by "openstack overcloud update" which collects
+update_tasks. ODL needs 2 different steps to achieve this
+minor update. These are called Level1 and Level2. L1 is
+simple - stop ODL, update, start. This is taken care by paunch
+and no separate implementation is needed. L2 has extra steps
+which are implemented in update_tasks and post_update_tasks.
+
+Updating ODL within the same major release (1->1.1) consists of either
+L1 or L2 steps. These steps are decided from ODLUpdateLevel parameter
+specified in environments/services-docker/update-odl.yaml.
+
+Upgrading ODL to the next major release (1.1->2) requires
+only the L2 steps. These are implemented as upgrade_tasks and
+post_upgrade_tasks in https://review.openstack.org/489201.
+
+Steps involved in level 2 update are
+ 1. Block OVS instances to connect to ODL
+ 2. Set ODL upgrade flag to True
+ 3. Start ODL
+ 4. Start Neutron re-sync and wait for it to finish
+ 5. Delete OVS groups and ports
+ 6. Stop OVS
+ 7. Unblock OVS ports
+ 8. Start OVS
+ 9. Unset ODL upgrade flag
+
+These steps are exactly same as upgrade_tasks.
+The logic implemented is:
+follow upgrade_tasks; when update_level == 2
+
+Change-Id: Ie532800663dd24313a7350b5583a5080ddb796e7
+---
+
+diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2
+index 595e16c..c4fb05f 100644
+--- a/common/deploy-steps.j2
++++ b/common/deploy-steps.j2
+@@ -23,6 +23,7 @@
+ {% set post_upgrade_steps_max = 4 -%}
+ {% set fast_forward_upgrade_steps_max = 9 -%}
+ {% set fast_forward_upgrade_prep_steps_max = 3 -%}
++{% set post_update_steps_max = 4 -%}
+
+ heat_template_version: queens
+
+@@ -590,3 +591,15 @@
+ - include_tasks: {{role.name}}/fast_forward_upgrade_tasks.yaml
+ when: role_name == '{{role.name}}' and ansible_hostname == {{role.name}}[0]
+ {%- endfor %}
++ post_update_steps_tasks: |
++{%- for role in roles %}
++ - include: {{role.name}}/post_update_tasks.yaml
++ when: role_name == '{{role.name}}'
++{%- endfor %}
++ post_update_steps_playbook: |
++ - hosts: overcloud
++ tasks:
++ - include: post_update_steps_tasks.yaml
++ with_sequence: start=0 end={{post_update_steps_max-1}}
++ loop_control:
++ loop_var: step
+diff --git a/common/services.yaml b/common/services.yaml
+index 2a62c1b..c197b05 100644
+--- a/common/services.yaml
++++ b/common/services.yaml
+@@ -283,6 +283,16 @@
+ expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+
++ PostUpdateTasks:
++ type: OS::Heat::Value
++ properties:
++ type: comma_delimited_list
++ value:
++ yaql:
++ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
++ expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct()
++ data: {get_attr: [ServiceChain, role_data]}
++
+ UpgradeBatchTasks:
+ type: OS::Heat::Value
+ properties:
+@@ -349,6 +359,7 @@
+ upgrade_tasks: {get_attr: [UpgradeTasks, value]}
+ post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]}
+ update_tasks: {get_attr: [UpdateTasks, value]}
++ post_update_tasks: {get_attr: [PostUpdateTasks, value]}
+ upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]}
+ service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+diff --git a/docker/services/opendaylight-api.yaml b/docker/services/opendaylight-api.yaml
+index 6175db9..3cafe53 100644
+--- a/docker/services/opendaylight-api.yaml
++++ b/docker/services/opendaylight-api.yaml
+@@ -44,6 +44,14 @@
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
++ ODLUpdateLevel:
++ default: 1
++ description: Specify the level of update
++ type: number
++ constraints:
++ - allowed_values:
++ - 1
++ - 2
+
+ conditions:
+
+@@ -167,23 +175,25 @@
+ - opendaylight_enabled.rc == 0
+ service: name=opendaylight state=stopped enabled=no
+ # Containarised deployment upgrade steps
+- - name: remove journal and snapshots
+- when: step|int == 0
+- file:
+- path: /var/lib/opendaylight/{{item}}
+- state: absent
+- with_items:
+- - snapshots
+- - journal
+- - name: Set ODL upgrade flag to True
+- copy:
+- dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
+- content: |
+- <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
+- <upgradeInProgress>true</upgradeInProgress>
+- </config>
+- when: step|int == 1
+- post_upgrade_tasks:
++ - name: ODL container L2 update and upgrade tasks
++ block: &odl_container_upgrade_tasks
++ - name: remove journal and snapshots
++ when: step|int == 0
++ file:
++ path: /var/lib/opendaylight/{{item}}
++ state: absent
++ with_items:
++ - snapshots
++ - journal
++ - name: Set ODL upgrade flag to True
++ copy:
++ dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
++ content: |
++ <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
++ <upgradeInProgress>true</upgradeInProgress>
++ </config>
++ when: step|int == 1
++ post_upgrade_tasks: &odl_container_post_upgrade_tasks
+ - name: Unset upgrade flag in ODL
+ shell:
+ str_replace:
+@@ -192,7 +202,20 @@
+ -H "Content-Type: application/json" \
+ $ODL_URI/restconf/config/genius-mdsalutil:config'
+ params:
+- $ODL_USERNAME: {get_param: [OpenDaylightBase, OpenDaylightUsername]}
+- $ODL_PASSWORD: {get_param: [OpenDaylightBase, OpenDaylightPassword]}
++ $ODL_USERNAME: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::username']}
++ $ODL_PASSWORD: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::password']}
+ $ODL_URI: {get_param: [EndpointMap, OpenDaylightInternal, uri]}
+ when: step|int == 0
++ update_tasks:
++ - name: Get ODL update level
++ block: &get_odl_update_level
++ - name: store update level to update_level variable
++ set_fact:
++ odl_update_level: {get_param: ODLUpdateLevel}
++ - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++ block: *odl_container_upgrade_tasks
++ when: odl_update_level == 2
++ post_update_tasks:
++ - block: *get_odl_update_level
++ - block: *odl_container_post_upgrade_tasks
++ when: odl_update_level == 2
+\ No newline at end of file
+diff --git a/environments/services-docker/update-odl.yaml b/environments/services-docker/update-odl.yaml
+new file mode 100644
+index 0000000..87d74ef
+--- /dev/null
++++ b/environments/services-docker/update-odl.yaml
+@@ -0,0 +1,11 @@
++# This file describes parameters needed for ODL update.
++# This file is to be used along with other env files during
++# level 2 minor update.
++# Level 2 update involves yang changes in ODL within same ODL release and
++# hence needs DB wipe and resync.
++# Level 1 is simple update - stop ODL, pull new image, start ODL
++# This file is not be used during level1 update or major upgrade.
++# In case doubt, please reach out to ODL developers on #tripleo IRC channel
++
++parameter_defaults:
++ ODLUpdateLevel: 2
+\ No newline at end of file
+diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
+index 3390645..958e1bb 100644
+--- a/puppet/services/opendaylight-ovs.yaml
++++ b/puppet/services/opendaylight-ovs.yaml
+@@ -104,6 +104,14 @@
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
++ ODLUpdateLevel:
++ default: 1
++ description: Specify the level of update
++ type: number
++ constraints:
++ - allowed_values:
++ - 1
++ - 2
+
+ parameter_groups:
+ - label: deprecated
+@@ -230,14 +238,16 @@
+ - openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
+ # Container upgrade steps.
+- - name: Block connections to ODL. #This rule will be inserted at the top.
+- iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
+- when: step|int == 0
+- with_items:
+- - 6640
+- - 6653
+- - 6633
+- post_upgrade_tasks:
++ - name: ODL container L2 update and upgrade tasks
++ block: &odl_container_upgrade_tasks
++ - name: Block connections to ODL. #This rule will be inserted at the top.
++ iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
++ when: step|int == 0
++ with_items:
++ - 6640
++ - 6653
++ - 6633
++ post_upgrade_tasks: &odl_container_post_upgrade_tasks
+ - name: Check service openvswitch is running
+ command: systemctl is-active --quiet openvswitch
+ tags: common
+@@ -260,6 +270,20 @@
+ - name: start openvswitch service
+ when: step|int == 3
+ service : name=openvswitch state=started
++ update_tasks:
++ - name: Get ODL update level
++ block: &get_odl_update_level
++ - name: store update level to update_level variable
++ set_fact:
++ odl_update_level: {get_param: ODLUpdateLevel}
++ - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++ block: *odl_container_upgrade_tasks
++ when: odl_update_level == 2
++ post_update_tasks:
++ - block: *get_odl_update_level
++ - block: *odl_container_post_upgrade_tasks
++ when: odl_update_level == 2
++
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+@@ -267,4 +291,4 @@
+ - service: ovs
+ network: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ type: node
+- - null
++ - null
+\ No newline at end of file
+diff --git a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+index 45703d0..e2943de 100644
+--- a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
++++ b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+@@ -1,6 +1,6 @@
+ ---
+
+-features:
++upgrade:
+ - Add ODL upgradability
+ Steps of upgrade are as follows
+ 1. Block OVS instances to connect to ODL done in upgrade_tasks
+diff --git a/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+new file mode 100644
+index 0000000..1bcf8ed
+--- /dev/null
++++ b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+@@ -0,0 +1,19 @@
++---
++features:
++ - Minor update ODL steps are added. ODL minor update (within same ODL
++ release) can have 2 different workflow. These are called level 1 and
++ level2. Level 1 is simple - stop, update and start ODL. Level 2 is
++ complex and involved yang model changes. This requires wiping of
++ DB and resync to repopulate the data.
++ Steps involved in level 2 update are
++ 1. Block OVS instances to connect to ODL
++ 2. Set ODL upgrade flag to True
++ 3. Start ODL
++ 4. Start Neutron re-sync and wait for it to finish
++ 5. Delete OVS groups and ports
++ 6. Stop OVS
++ 7. Unblock OVS ports
++ 8. Start OVS
++ 9. Unset ODL upgrade flag
++ To achieve L2 update, use "-e environments/services-docker/
++ update-odl.yaml" along with other env files to the update command.
+\ No newline at end of file
+diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
+index 59473f5..9ab6a87 100755
+--- a/tools/yaml-validate.py
++++ b/tools/yaml-validate.py
+@@ -46,11 +46,11 @@
+ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'fast_forward_upgrade_tasks',
+ 'post_upgrade_tasks', 'update_tasks',
+- 'service_config_settings', 'host_prep_tasks',
+- 'metadata_settings', 'kolla_config',
+- 'global_config_settings', 'logging_source',
+- 'logging_groups', 'external_deploy_tasks',
+- 'external_post_deploy_tasks',
++ 'post_update_tasks', 'service_config_settings',
++ 'host_prep_tasks', 'metadata_settings',
++ 'kolla_config', 'global_config_settings',
++ 'logging_source', 'logging_groups',
++ 'external_deploy_tasks', 'external_post_deploy_tasks',
+ 'docker_config_scripts', 'step_config']
+ REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+ 'config_image']
diff --git a/apex/tests/test_apex_build_utils.py b/apex/tests/test_apex_build_utils.py
index d9d542d..f18103c 100644
--- a/apex/tests/test_apex_build_utils.py
+++ b/apex/tests/test_apex_build_utils.py
@@ -9,17 +9,20 @@
import argparse
import git
+import os
+import unittest
from mock import patch
from apex import build_utils
+from apex.tests import constants as con
from nose.tools import (
assert_is_instance,
assert_raises)
-class TestBuildUtils(object):
+class TestBuildUtils(unittest.TestCase):
@classmethod
def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@@ -165,3 +168,19 @@ class TestBuildUtils(object):
def test_main_debug(self, mock_get_parser):
with patch.object(build_utils.sys, 'argv', self.sys_argv_debug):
build_utils.main()
+
+ def test_strip_patch_sections(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ tmp_patch = build_utils.strip_patch_sections(dummy_patch)
+ self.assertNotRegex(tmp_patch, 'releasenotes')
+ self.assertNotRegex(tmp_patch, 'Minor update ODL steps')
+ self.assertNotRegex(tmp_patch, 'Steps of upgrade are as follows')
+ self.assertNotRegex(tmp_patch, 'Steps invlolved in level 2 update')
+
+ def test_strip_no_patch_sections(self):
+ with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+ dummy_patch = fh.read()
+ tmp_patch = build_utils.strip_patch_sections(dummy_patch,
+ sections=[])
+ self.assertEqual(dummy_patch, tmp_patch)
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
index c32f72c..d501746 100644
--- a/apex/tests/test_apex_common_builder.py
+++ b/apex/tests/test_apex_common_builder.py
@@ -10,11 +10,20 @@
import unittest
from apex.builders import common_builder as c_builder
+from apex.builders import exceptions
from apex.common import constants as con
from mock import patch
from mock import mock_open
from mock import MagicMock
+DOCKER_YAML = {
+ 'resource_registry': {
+ 'OS::TripleO::Services::NovaApi': '../docker/services/nova-api.yaml',
+ 'OS::TripleO::Services::NovaConductor':
+ '../docker/services/nova-conductor.yaml'
+ }
+}
+
class TestCommonBuilder(unittest.TestCase):
@classmethod
@@ -68,6 +77,54 @@ class TestCommonBuilder(unittest.TestCase):
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
@patch('builtins.open', mock_open())
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_add_upstream_patches_docker_puppet(
+ self, mock_customize, mock_get_patch):
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/puppet-tripleo'
+ }]
+ project_path = '/etc/puppet/modules/tripleo'
+ patch_file = "{}.patch".format(change_id)
+ patch_file_path = "/dummytmp/{}".format(patch_file)
+ test_virt_ops = [
+ {con.VIRT_INSTALL: 'patch'},
+ {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+ project_path)},
+ {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+ project_path, patch_file)}]
+ mock_get_patch.return_value = 'some random diff'
+ c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('builtins.open', mock_open())
+ @patch('apex.builders.common_builder.project_to_docker_image')
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.build_utils.get_patch')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_add_upstream_patches_docker_python(
+ self, mock_customize, mock_get_patch, mock_build_docker_file,
+ mock_project2docker):
+ mock_project2docker.return_value = ['NovaApi']
+ change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+ patches = [{
+ 'change-id': change_id,
+ 'project': 'openstack/nova'
+ }]
+ mock_get_patch.return_value = 'some random diff'
+ services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
+ '/dummytmp/',
+ uc_ip='192.0.2.1',
+ docker_tag='latest')
+ assert mock_customize.not_called
+ assert mock_build_docker_file.called
+ self.assertSetEqual(services, {'NovaApi'})
+
+ @patch('builtins.open', mock_open())
@patch('apex.virtual.utils.virt_customize')
def test_add_repo(self, mock_customize):
c_builder.add_repo('fake/url', 'dummyrepo', 'dummy.qcow2',
@@ -85,3 +142,15 @@ class TestCommonBuilder(unittest.TestCase):
self.assertEqual(c_builder.create_git_archive('fake/url', 'dummyrepo',
'/dummytmp/'),
'/dummytmp/dummyrepo.tar')
+
+ def test_project_to_docker_image(self):
+ found_services = c_builder.project_to_docker_image(project='nova')
+ assert 'nova-api' in found_services
+
+ @patch('apex.common.utils.open_webpage')
+ def test_project_to_docker_image_bad_web_content(
+ self, mock_open_web):
+ mock_open_web.return_value = b'{"blah": "blah"}'
+ self.assertRaises(exceptions.ApexCommonBuilderException,
+ c_builder.project_to_docker_image,
+ 'nova')
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 6f2a947..0e4041c 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -12,12 +12,14 @@ import os
import shutil
import urllib.error
+from apex.common import exceptions
from apex.common import utils
from apex.settings.network_settings import NetworkSettings
from apex.tests.constants import (
TEST_CONFIG_DIR,
TEST_PLAYBOOK_DIR)
+from mock import patch, mock_open
from nose.tools import (
assert_equal,
assert_is_instance,
@@ -25,6 +27,7 @@ from nose.tools import (
assert_raises)
NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
+a_mock_open = mock_open(read_data=None)
class TestCommonUtils:
@@ -100,3 +103,48 @@ class TestCommonUtils:
url, ['dummy_test.tar'])
assert os.path.isfile('/tmp/fetch_test/test.txt')
shutil.rmtree('/tmp/fetch_test')
+
+ def test_nofetch_upstream_and_unpack(self):
+ test_file = 'overcloud-full.tar.md5'
+ url = 'https://images.rdoproject.org/master/delorean/' \
+ 'current-tripleo/stable/'
+ os.makedirs('/tmp/fetch_test', exist_ok=True)
+ target = "/tmp/fetch_test/{}".format(test_file)
+ open(target, 'w').close()
+ target_mtime = os.path.getmtime(target)
+ utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+ url, [test_file], fetch=False)
+ post_target_mtime = os.path.getmtime(target)
+ shutil.rmtree('/tmp/fetch_test')
+ assert_equal(target_mtime, post_target_mtime)
+
+ def test_nofetch_upstream_and_unpack_no_target(self):
+ test_file = 'overcloud-full.tar.md5'
+ url = 'https://images.rdoproject.org/master/delorean/' \
+ 'current-tripleo/stable/'
+ utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+ url, [test_file])
+ assert os.path.isfile("/tmp/fetch_test/{}".format(test_file))
+ shutil.rmtree('/tmp/fetch_test')
+
+ def test_open_webpage(self):
+ output = utils.open_webpage('http://opnfv.org')
+ assert output is not None
+
+ def test_open_invalid_webpage(self):
+ assert_raises(urllib.request.URLError, utils.open_webpage,
+ 'http://inv4lIdweb-page.com')
+
+ @patch('builtins.open', a_mock_open)
+ @patch('yaml.safe_dump')
+ @patch('yaml.safe_load')
+ def test_edit_tht_env(self, mock_yaml_load, mock_yaml_dump):
+ settings = {'SomeParameter': 'some_value'}
+ mock_yaml_load.return_value = {
+ 'parameter_defaults': {'SomeParameter': 'dummy'}
+ }
+ utils.edit_tht_env('/dummy-environment.yaml', 'parameter_defaults',
+ settings)
+ new_data = {'parameter_defaults': settings}
+ mock_yaml_dump.assert_called_once_with(new_data, a_mock_open(),
+ default_flow_style=False)
diff --git a/apex/tests/test_apex_deploy.py b/apex/tests/test_apex_deploy.py
index 403b709..6c2a185 100644
--- a/apex/tests/test_apex_deploy.py
+++ b/apex/tests/test_apex_deploy.py
@@ -143,7 +143,8 @@ class TestDeploy(unittest.TestCase):
'sfc': False,
'vpn': False,
'yardstick': 'test',
- 'os_version': DEFAULT_OS_VERSION}}
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': False}}
args = mock_parser.return_value.parse_args.return_value
args.virtual = False
args.quickstart = False
@@ -216,7 +217,8 @@ class TestDeploy(unittest.TestCase):
'sfc': False,
'vpn': False,
'yardstick': 'test',
- 'os_version': DEFAULT_OS_VERSION}}
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': False}}
args = mock_parser.return_value.parse_args.return_value
args.virtual = True
args.quickstart = False
@@ -236,3 +238,67 @@ class TestDeploy(unittest.TestCase):
args.virt_compute_ram = 16
args.virt_default_ram = 10
main()
+
+ @patch('apex.deploy.c_builder')
+ @patch('apex.deploy.uc_builder')
+ @patch('apex.deploy.oc_builder')
+ @patch('apex.deploy.network_data.create_network_data')
+ @patch('apex.deploy.shutil')
+ @patch('apex.deploy.oc_deploy')
+ @patch('apex.deploy.uc_lib')
+ @patch('apex.deploy.build_vms')
+ @patch('apex.deploy.Inventory')
+ @patch('apex.deploy.virt_utils')
+ @patch('apex.deploy.oc_cfg')
+ @patch('apex.deploy.parsers')
+ @patch('apex.deploy.utils')
+ @patch('apex.deploy.NetworkEnvironment')
+ @patch('apex.deploy.NetworkSettings')
+ @patch('apex.deploy.DeploySettings')
+ @patch('apex.deploy.os')
+ @patch('apex.deploy.json')
+ @patch('apex.deploy.jumphost')
+ @patch('apex.deploy.validate_cross_settings')
+ @patch('apex.deploy.validate_deploy_args')
+ @patch('apex.deploy.create_deploy_parser')
+ @patch('builtins.open', a_mock_open, create=True)
+ def test_main_virt_containers_upstream(
+ self, mock_parser, mock_val_args, mock_cross_sets, mock_jumphost,
+ mock_json, mock_os, mock_deploy_sets, mock_net_sets, mock_net_env,
+ mock_utils, mock_parsers, mock_oc_cfg, mock_virt_utils,
+ mock_inv, mock_build_vms, mock_uc_lib, mock_oc_deploy,
+ mock_shutil, mock_network_data, mock_oc_builder,
+ mock_uc_builder, mock_c_builder):
+
+ ds_opts_dict = {'global_params': MagicMock(),
+ 'deploy_options': {'gluon': False,
+ 'congress': False,
+ 'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs',
+ 'sfc': False,
+ 'vpn': False,
+ 'yardstick': 'test',
+ 'os_version': DEFAULT_OS_VERSION,
+ 'containers': True}}
+ args = mock_parser.return_value.parse_args.return_value
+ args.virtual = True
+ args.quickstart = False
+ args.debug = True
+ args.virt_default_ram = 10
+ args.ha_enabled = True
+ args.virt_compute_nodes = 1
+ args.virt_compute_ram = None
+ args.virt_default_ram = 12
+ args.upstream = True
+ net_sets = mock_net_sets.return_value
+ net_sets.enabled_network_list = ['admin']
+ deploy_sets = mock_deploy_sets.return_value
+ deploy_sets.__getitem__.side_effect = ds_opts_dict.__getitem__
+ deploy_sets.__contains__.side_effect = ds_opts_dict.__contains__
+ main()
+ args.virt_compute_ram = 16
+ args.virt_default_ram = 10
+ main()
+ mock_oc_deploy.prep_image.assert_called
+ # TODO(trozet) add assertions here with arguments for functions in
+ # deploy main
diff --git a/apex/tests/test_apex_overcloud_builder.py b/apex/tests/test_apex_overcloud_builder.py
index e9a6e6c..46b5f87 100644
--- a/apex/tests/test_apex_overcloud_builder.py
+++ b/apex/tests/test_apex_overcloud_builder.py
@@ -11,7 +11,9 @@ import unittest
from apex.builders import overcloud_builder as oc_builder
from apex.common import constants as con
-from mock import patch
+from mock import patch, mock_open
+
+a_mock_open = mock_open(read_data=None)
class TestOvercloudBuilder(unittest.TestCase):
@@ -37,14 +39,69 @@ class TestOvercloudBuilder(unittest.TestCase):
mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
archive = '/dummytmp/puppet-opendaylight.tar'
test_virt_ops = [
- {con.VIRT_INSTALL: 'opendaylight'},
{con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
{con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
- "puppet-opendaylight.tar"}
+ "puppet-opendaylight.tar"},
+ {con.VIRT_INSTALL: 'opendaylight'}
]
oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION, 'dummy.qcow2',
- '/dummytmp/')
+ '/dummytmp/', uc_ip='192.0.2.2',
+ os_version=con.DEFAULT_OS_VERSION)
+ assert mock_git_archive.called
+ assert mock_add_repo.called
+ mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('apex.builders.overcloud_builder.build_dockerfile')
+ @patch('apex.builders.common_builder.create_git_archive')
+ @patch('apex.builders.common_builder.add_repo')
+ @patch('apex.virtual.utils.virt_customize')
+ def test_inject_opendaylight_docker(self, mock_customize, mock_add_repo,
+ mock_git_archive, mock_build_docker):
+ mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
+ archive = '/dummytmp/puppet-opendaylight.tar'
+ test_virt_ops = [
+ {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
+ {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
+ {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
+ "puppet-opendaylight.tar"},
+ ]
+ oc_builder.inject_opendaylight('oxygen', 'dummy.qcow2',
+ '/dummytmp/', uc_ip='192.0.2.2',
+ os_version=con.DEFAULT_OS_VERSION,
+ docker_tag='latest')
+ odl_url = "https://nexus.opendaylight.org/content/repositories" \
+ "/opendaylight-oxygen-epel-7-x86_64-devel/"
+ docker_cmds = [
+ "RUN yum remove opendaylight -y",
+ "RUN echo $'[opendaylight]\\n\\",
+ "baseurl={}\\n\\".format(odl_url),
+ "gpgcheck=0\\n\\",
+ "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+ "RUN yum -y install opendaylight"
+ ]
+ src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
assert mock_git_archive.called
assert mock_add_repo.called
+ assert mock_build_docker.called_once_with(
+ 'opendaylight', '/dummytmp', docker_cmds, src_img_uri
+ )
mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+ @patch('builtins.open', a_mock_open)
+ @patch('os.makedirs')
+ @patch('os.path.isfile')
+ @patch('os.path.isdir')
+ def test_build_dockerfile(self, mock_isdir, mock_isfile, mock_makedirs):
+ src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
+ oc_builder.build_dockerfile('nova-api', '/tmpdummy/', ['RUN dummy'],
+ src_img_uri)
+ a_mock_open.assert_called_with(
+ '/tmpdummy/containers/nova-api/Dockerfile', 'a+')
+ a_mock_open().write.assert_called_once_with('RUN dummy')
+
+ @patch('tarfile.open')
+ @patch('os.path.isdir')
+ def test_archive_docker_patches(self, mock_isdir, mock_tarfile):
+ oc_builder.archive_docker_patches('/tmpdummy/')
+ assert mock_tarfile.assert_called
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index 59e9048..ae2e8f0 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -7,6 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import os
import sys
import unittest
@@ -24,8 +25,11 @@ from apex.overcloud.deploy import make_ssh_key
from apex.overcloud.deploy import prep_env
from apex.overcloud.deploy import generate_ceph_key
from apex.overcloud.deploy import prep_storage_env
+from apex.overcloud.deploy import prep_sriov_env
from apex.overcloud.deploy import external_network_cmds
from apex.overcloud.deploy import create_congress_cmds
+from apex.overcloud.deploy import SDN_FILE_MAP
+from apex.overcloud.deploy import get_docker_sdn_file
from nose.tools import (
assert_regexp_matches,
@@ -70,19 +74,40 @@ class TestOvercloudDeploy(unittest.TestCase):
res = '/usr/share/openstack-tripleo-heat-templates/environments/test'
assert_equal(build_sdn_env_list(ds, sdn_map), [res])
+ def test_build_sdn_env_list_with_string(self):
+ ds = {'sdn_controller': 'opendaylight',
+ 'sriov': 'xxx'}
+ prefix = '/usr/share/openstack-tripleo-heat-templates/environments'
+ res = [os.path.join(prefix, 'neutron-opendaylight.yaml'),
+ os.path.join(prefix, 'neutron-opendaylight-sriov.yaml')]
+ assert_equal(build_sdn_env_list(ds, SDN_FILE_MAP), res)
+
+ def test_build_sdn_env_list_with_default(self):
+ ds = {'sdn_controller': 'opendaylight',
+ 'vpn': True}
+ prefix = '/usr/share/openstack-tripleo-heat-templates/environments'
+ res = [os.path.join(prefix, 'neutron-opendaylight.yaml'),
+ os.path.join(prefix, 'neutron-bgpvpn-opendaylight.yaml')]
+ assert_equal(build_sdn_env_list(ds, SDN_FILE_MAP), res)
+
+ @patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@patch('apex.overcloud.deploy.build_sdn_env_list')
@patch('builtins.open', mock_open())
- def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage):
+ def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage,
+ mock_prep_sriov):
mock_sdn_list.return_value = []
- ds = {'deploy_options': MagicMock(),
+ ds = {'deploy_options':
+ {'ha_enabled': True,
+ 'congress': True,
+ 'tacker': True,
+ 'containers': False,
+ 'barometer': True,
+ 'ceph': False,
+ 'sriov': False
+ },
'global_params': MagicMock()}
- ds['global_params'].__getitem__.side_effect = \
- lambda i: True if i == 'ha_enabled' else MagicMock()
- ds['deploy_options'].__getitem__.side_effect = \
- lambda i: True if i == 'congress' else MagicMock()
- ds['deploy_options'].__contains__.side_effect = \
- lambda i: True if i == 'congress' else MagicMock()
+
ns = {'ntp': ['ntp']}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
@@ -96,11 +121,50 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_in('--control-scale 3', result_cmd)
assert_in('--compute-scale 2', result_cmd)
+ @patch('apex.overcloud.deploy.prep_sriov_env')
+ @patch('apex.overcloud.deploy.prep_storage_env')
+ @patch('builtins.open', mock_open())
+ def test_create_deploy_cmd_containers_sdn(self, mock_prep_storage,
+ mock_prep_sriov):
+ ds = {'deploy_options':
+ {'ha_enabled': True,
+ 'congress': False,
+ 'tacker': False,
+ 'containers': True,
+ 'barometer': False,
+ 'ceph': True,
+ 'sdn_controller': 'opendaylight',
+ 'sriov': False
+ },
+ 'global_params': MagicMock()}
+
+ ns = {'ntp': ['ntp']}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ virt = True
+ result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
+ assert_in('--ntp-server ntp', result_cmd)
+ assert_not_in('enable_tacker.yaml', result_cmd)
+ assert_not_in('enable_congress.yaml', result_cmd)
+ assert_not_in('enable_barometer.yaml', result_cmd)
+ assert_in('virtual-environment.yaml', result_cmd)
+ assert_in('--control-scale 3', result_cmd)
+ assert_in('--compute-scale 2', result_cmd)
+ assert_in('docker-images.yaml', result_cmd)
+ assert_in('sdn-images.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/docker.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
+ 'storage-environment.yaml', result_cmd)
+ assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+ '/services-docker/neutron-opendaylight.yaml', result_cmd)
+
+ @patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@patch('apex.overcloud.deploy.build_sdn_env_list')
@patch('builtins.open', mock_open())
def test_create_deploy_cmd_no_ha_bm(self, mock_sdn_list,
- mock_prep_storage):
+ mock_prep_storage, mock_prep_sriov):
mock_sdn_list.return_value = []
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
@@ -119,9 +183,11 @@ class TestOvercloudDeploy(unittest.TestCase):
assert_not_in('enable_congress.yaml', result_cmd)
assert_not_in('enable_barometer.yaml', result_cmd)
+ @patch('apex.overcloud.deploy.prep_sriov_env')
@patch('apex.overcloud.deploy.prep_storage_env')
@patch('apex.overcloud.deploy.build_sdn_env_list')
- def test_create_deploy_cmd_raises(self, mock_sdn_list, mock_prep_storage):
+ def test_create_deploy_cmd_raises(self, mock_sdn_list, mock_prep_storage,
+ mock_prep_sriov):
mock_sdn_list.return_value = []
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
@@ -139,12 +205,14 @@ class TestOvercloudDeploy(unittest.TestCase):
def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils):
ds_opts = {'dataplane': 'fdio',
'sdn_controller': 'opendaylight',
- 'odl_version': 'master'}
+ 'odl_version': 'master',
+ 'sriov': False}
ds = {'deploy_options': MagicMock(),
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@@ -159,7 +227,8 @@ class TestOvercloudDeploy(unittest.TestCase):
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@@ -178,8 +247,38 @@ class TestOvercloudDeploy(unittest.TestCase):
lambda i: ds_opts.get(i, MagicMock())
ds['deploy_options'].__contains__.side_effect = \
lambda i: True if i in ds_opts else MagicMock()
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
+ mock_virt_utils.virt_customize.assert_called()
+
+ @patch('apex.overcloud.deploy.c_builder')
+ @patch('apex.overcloud.deploy.oc_builder')
+ @patch('apex.overcloud.deploy.virt_utils')
+ @patch('apex.overcloud.deploy.shutil')
+ @patch('apex.overcloud.deploy.os.path')
+ @patch('builtins.open', mock_open())
+ def test_prep_image_sdn_odl_upstream_containers_patches(
+ self, mock_os_path, mock_shutil, mock_virt_utils,
+ mock_oc_builder, mock_c_builder):
+ ds_opts = {'dataplane': 'ovs',
+ 'sdn_controller': 'opendaylight',
+ 'odl_version': con.DEFAULT_ODL_VERSION,
+ 'odl_vpp_netvirt': True}
+ ds = {'deploy_options': MagicMock(),
+ 'global_params': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ ds['deploy_options'].__contains__.side_effect = \
+ lambda i: True if i in ds_opts else MagicMock()
+ ns = MagicMock()
+ mock_c_builder.add_upstream_patches.return_value = ['nova-api']
+ patches = ['dummy_nova_patch']
+ rv = prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test',
+ docker_tag='latest', patches=patches, upstream=True)
+ mock_oc_builder.inject_opendaylight.assert_called
mock_virt_utils.virt_customize.assert_called()
+ mock_c_builder.add_upstream_patches.assert_called
+ self.assertListEqual(sorted(rv), ['nova-api', 'opendaylight'])
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@@ -194,7 +293,8 @@ class TestOvercloudDeploy(unittest.TestCase):
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
@patch('apex.overcloud.deploy.virt_utils')
@@ -209,14 +309,15 @@ class TestOvercloudDeploy(unittest.TestCase):
'global_params': MagicMock()}
ds['deploy_options'].__getitem__.side_effect = \
lambda i: ds_opts.get(i, MagicMock())
- prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test')
+ ns = MagicMock()
+ prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
@patch('apex.overcloud.deploy.os.path.isfile')
def test_prep_image_no_image(self, mock_isfile):
mock_isfile.return_value = False
assert_raises(ApexDeployException, prep_image,
- {}, 'undercloud.qcow2', '/tmp')
+ {}, {}, 'undercloud.qcow2', '/tmp')
def test_make_ssh_key(self):
priv, pub = make_ssh_key()
@@ -236,6 +337,7 @@ class TestOvercloudDeploy(unittest.TestCase):
{'sdn_controller': 'opendaylight',
'odl_vpp_routing_node': 'test',
'dataplane': 'ovs_dpdk',
+ 'sriov': 'xxx',
'performance': {'Compute': {'vpp': {'main-core': 'test',
'corelist-workers': 'test'},
'ovs': {'dpdk_cores': 'test'},
@@ -278,6 +380,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ds = {'deploy_options':
{'sdn_controller': False,
'dataplane': 'fdio',
+ 'sriov': 'xxx',
'performance': {'Compute': {},
'Controller': {}}}}
ns = {'domain_name': 'test.domain',
@@ -301,7 +404,8 @@ class TestOvercloudDeploy(unittest.TestCase):
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
- assert_in('NeutronVPPAgentPhysnets: \'datacentre:tenant_nic\'',
+ assert_in('NeutronVPPAgentPhysnets: '
+ '\'datacentre:tenant_nic,external:tap0\'',
output)
assert_in('NeutronVPPAgentPhysnets', output)
finally:
@@ -317,6 +421,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ds = {'deploy_options':
{'sdn_controller': 'opendaylight',
'dataplane': 'fdio',
+ 'sriov': 'xxx',
'dvr': True}}
ns = {'domain_name': 'test.domain',
'networks':
@@ -356,21 +461,135 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_ceph_key):
mock_fileinput.input.return_value = \
['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
- ds = {'deploy_options': MagicMock()}
- ds['deploy_options'].__getitem__.side_effect = \
- lambda i: '/dev/sdx' if i == 'ceph_device' else MagicMock()
- ds['deploy_options'].__contains__.side_effect = \
- lambda i: True if i == 'ceph_device' else MagicMock()
- prep_storage_env(ds, '/tmp')
+ ds = {'deploy_options': {
+ 'ceph_device': '/dev/sdx',
+ 'containers': False
+ }}
+ ns = {}
+ prep_storage_env(ds, ns, virtual=False, tmp_dir='/tmp')
+
+ @patch('apex.overcloud.deploy.utils.edit_tht_env')
+ @patch('apex.overcloud.deploy.generate_ceph_key')
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_storage_env_containers(self, mock_isfile, mock_fileinput,
+ mock_ceph_key, mock_edit_tht):
+ mock_fileinput.input.return_value = \
+ ['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
+ ds = {'deploy_options': {
+ 'ceph_device': '/dev/sdx',
+ 'containers': True,
+ 'os_version': 'master'
+ }, 'global_params': {'ha_enabled': False}}
+ ns = {'networks': {con.ADMIN_NETWORK: {'installer_vm':
+ {'ip': '192.0.2.1'}}}
+ }
+ prep_storage_env(ds, ns, virtual=True, tmp_dir='/tmp')
+ ceph_params = {
+ 'DockerCephDaemonImage':
+ '192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos'
+ '-7',
+ 'CephPoolDefaultSize': 1,
+ 'CephAnsibleExtraConfig': {
+ 'centos_package_dependencies': [],
+ 'ceph_osd_docker_memory_limit': '1g',
+ 'ceph_mds_docker_memory_limit': '1g'
+ },
+ 'CephPoolDefaultPgNum': 32,
+ 'CephAnsibleDisksConfig': {
+ 'devices': ['/dev/sdx'],
+ 'journal_size': 512,
+ 'osd_scenario': 'collocated'
+ }
+ }
+ mock_edit_tht.assert_called_with('/tmp/storage-environment.yaml',
+ 'parameter_defaults',
+ ceph_params)
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_storage_env_raises(self, mock_isfile):
mock_isfile.return_value = False
ds = {'deploy_options': MagicMock()}
- assert_raises(ApexDeployException, prep_storage_env, ds, '/tmp')
+ ns = {}
+ assert_raises(ApexDeployException, prep_storage_env, ds,
+ ns, virtual=False, tmp_dir='/tmp')
+
+ @patch('apex.overcloud.deploy.generate_ceph_key')
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_sriov_env(self, mock_isfile, mock_fileinput, mock_ceph_key):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'sriov': 'xxx'}}
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ # Run tests
+ mock_fileinput.input.return_value = \
+ ['# NovaSchedulerDefaultFilters',
+ '# NovaSchedulerAvailableFilters',
+ '#NeutronPhysicalDevMappings: "datacentre:ens20f2"',
+ '#NeutronSriovNumVFs: \"ens20f2:5\"',
+ '#NovaPCIPassthrough:',
+ '# - devname: \"ens20f2\"',
+ '# physical_network: \"datacentre\"']
+ prep_sriov_env(ds, '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NovaSchedulerDefaultFilters', output)
+ assert_in('NovaSchedulerAvailableFilters', output)
+ assert_in('NeutronPhysicalDevMappings: \"nfv_sriov:xxx\"', output)
+ assert_in('NeutronSriovNumVFs: \"xxx:8\"', output)
+ assert_in('NovaPCIPassthrough:', output)
+ assert_in('- devname: \"xxx\"', output)
+ assert_in('physical_network: \"nfv_sriov\"', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
+ @patch('apex.overcloud.deploy.os.path.isfile')
+ @patch('builtins.open', mock_open())
+ def test_prep_sriov_env_raises(self, mock_isfile):
+ ds_opts = {'sriov': True}
+ ds = {'deploy_options': MagicMock()}
+ ds['deploy_options'].__getitem__.side_effect = \
+ lambda i: ds_opts.get(i, MagicMock())
+ mock_isfile.return_value = False
+ ds = {'deploy_options': MagicMock()}
+ assert_raises(ApexDeployException, prep_sriov_env, ds, '/tmp')
def test_external_network_cmds(self):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs'}}
+
+ cidr = MagicMock()
+ cidr.version = 6
+ ns_dict = {'networks':
+ {'external': [{'floating_ip_range': (0, 1),
+ 'nic_mapping':
+ {'compute': {'vlan': 'native'}},
+ 'gateway': 'gw',
+ 'cidr': cidr}]}}
+ ns = MagicMock()
+ ns.enabled_network_list = ['external']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ cmds = ' '.join(external_network_cmds(ns, ds))
+ assert_in('--external', cmds)
+ assert_in('--allocation-pool start=0,end=1', cmds)
+ assert_in('--gateway gw', cmds)
+ assert_in('--network external', cmds)
+ assert_in('--provider-physical-network datacentre', cmds)
+
+ def test_external_network_cmds_nosdn_fdio(self):
+ ds = {'deploy_options':
+ {'sdn_controller': False,
+ 'dataplane': 'fdio'}}
+
cidr = MagicMock()
cidr.version = 6
ns_dict = {'networks':
@@ -382,13 +601,18 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
ns.enabled_network_list = ['external']
ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
- cmds = ' '.join(external_network_cmds(ns))
+ cmds = ' '.join(external_network_cmds(ns, ds))
assert_in('--external', cmds)
assert_in('--allocation-pool start=0,end=1', cmds)
assert_in('--gateway gw', cmds)
assert_in('--network external', cmds)
+ assert_in('--provider-physical-network external', cmds)
def test_external_network_cmds_no_ext(self):
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs'}}
+
cidr = MagicMock()
cidr.version = 6
ns_dict = {'apex':
@@ -402,8 +626,7 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
ns.enabled_network_list = ['admin']
ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
- external_network_cmds(ns)
- cmds = ' '.join(external_network_cmds(ns))
+ cmds = ' '.join(external_network_cmds(ns, ds))
assert_in('--external', cmds)
assert_in('--allocation-pool start=0,end=1', cmds)
assert_in('--network external', cmds)
@@ -417,3 +640,19 @@ class TestOvercloudDeploy(unittest.TestCase):
def test_create_congress_cmds_raises(self, mock_parsers):
mock_parsers.return_value.__getitem__.side_effect = KeyError()
assert_raises(KeyError, create_congress_cmds, 'overcloud_file')
+
+ def test_get_docker_sdn_file(self):
+ ds_opts = {'ha_enabled': True,
+ 'congress': True,
+ 'tacker': True,
+ 'containers': False,
+ 'barometer': True,
+ 'ceph': False,
+ 'sdn_controller': 'opendaylight'
+ }
+ output = get_docker_sdn_file(ds_opts)
+ self.assertEqual(output,
+ ('/usr/share/openstack-tripleo-heat-templates'
+ '/environments/services-docker/neutron-opendaylight'
+ '.yaml')
+ )
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
index 9458bf9..fce7a55 100644
--- a/apex/tests/test_apex_undercloud.py
+++ b/apex/tests/test_apex_undercloud.py
@@ -7,6 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import ipaddress
import libvirt
import os
import subprocess
@@ -121,7 +122,8 @@ class TestUndercloud(unittest.TestCase):
mock_generate_config, mock_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
ns = MagicMock()
- uc.configure(ns, 'playbook', '/tmp/dir')
+ ds = MagicMock()
+ uc.configure(ns, ds, 'playbook', '/tmp/dir')
@patch('apex.undercloud.undercloud.utils')
@patch.object(Undercloud, 'generate_config', return_value={})
@@ -131,10 +133,11 @@ class TestUndercloud(unittest.TestCase):
mock_generate_config, mock_utils):
uc = Undercloud('img_path', 'tplt_path', external_network=True)
ns = MagicMock()
+ ds = MagicMock()
subps_err = subprocess.CalledProcessError(1, 'cmd')
mock_utils.run_ansible.side_effect = subps_err
assert_raises(ApexUndercloudException,
- uc.configure, ns, 'playbook', '/tmp/dir')
+ uc.configure, ns, ds, 'playbook', '/tmp/dir')
@patch('apex.undercloud.undercloud.os.remove')
@patch('apex.undercloud.undercloud.os.path')
@@ -185,12 +188,42 @@ class TestUndercloud(unittest.TestCase):
@patch.object(Undercloud, '_get_vm', return_value=None)
@patch.object(Undercloud, 'create')
def test_generate_config(self, mock_get_vm, mock_create):
- ns_net = MagicMock()
- ns_net.__getitem__.side_effect = \
- lambda i: '1234/24' if i is 'cidr' else MagicMock()
- ns = {'apex': MagicMock(),
- 'dns-domain': 'dns',
- 'networks': {'admin': ns_net,
- 'external': [ns_net]}}
-
- Undercloud('img_path', 'tplt_path').generate_config(ns)
+ ns = MagicMock()
+ ns.enabled_network_list = ['admin', 'external']
+ ns_dict = {
+ 'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin':
+ {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+ 'installer_vm': {'ip': '192.0.2.1',
+ 'vlan': 'native'},
+ 'dhcp_range': ['192.0.2.15', '192.0.2.30']
+ },
+ 'external':
+ [{'enabled': True,
+ 'cidr': ipaddress.ip_network('192.168.0.0/24'),
+ 'installer_vm': {'ip': '192.168.0.1',
+ 'vlan': 'native'}
+ }]
+ }
+ }
+ ns.__getitem__.side_effect = ns_dict.__getitem__
+ ns.__contains__.side_effect = ns_dict.__contains__
+ ds = {'global_params': {}}
+
+ Undercloud('img_path', 'tplt_path').generate_config(ns, ds)
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ @patch('apex.undercloud.undercloud.virt_utils')
+ def test_update_delorean(self, mock_vutils, mock_uc_create, mock_get_vm):
+ uc = Undercloud('img_path', 'tmplt_path', external_network=True)
+ uc._update_delorean_repo()
+ download_cmd = (
+ "curl -L -f -o "
+ "/etc/yum.repos.d/deloran.repo "
+ "https://trunk.rdoproject.org/centos7-{}"
+ "/current-tripleo/delorean.repo".format(
+ constants.DEFAULT_OS_VERSION))
+ test_ops = [{'--run-command': download_cmd}]
+ mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
diff --git a/apex/tests/test_apex_virtual_utils.py b/apex/tests/test_apex_virtual_utils.py
index 643069f..a9eb78d 100644
--- a/apex/tests/test_apex_virtual_utils.py
+++ b/apex/tests/test_apex_virtual_utils.py
@@ -12,6 +12,7 @@ import unittest
from mock import patch
+from apex.virtual.exceptions import ApexVirtualException
from apex.virtual.utils import DEFAULT_VIRT_IP
from apex.virtual.utils import get_virt_ip
from apex.virtual.utils import generate_inventory
@@ -66,13 +67,30 @@ class TestVirtualUtils(unittest.TestCase):
assert_is_instance(generate_inventory('target_file', ha_enabled=True),
dict)
+ @patch('apex.virtual.utils.get_virt_ip')
+ @patch('apex.virtual.utils.subprocess.check_output')
@patch('apex.virtual.utils.iptc')
@patch('apex.virtual.utils.subprocess.check_call')
@patch('apex.virtual.utils.vbmc_lib')
- def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc):
+ def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc,
+ mock_check_output, mock_get_virt_ip):
+ mock_get_virt_ip.return_value = '192.168.122.1'
+ mock_check_output.return_value = b'blah |dummy \nstatus | running'
host_setup({'test': 2468})
mock_subprocess.assert_called_with(['vbmc', 'start', 'test'])
+ @patch('apex.virtual.utils.get_virt_ip')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup_vbmc_fails(self, mock_vbmc_lib, mock_subprocess,
+ mock_iptc, mock_check_output,
+ mock_get_virt_ip):
+ mock_get_virt_ip.return_value = '192.168.122.1'
+ mock_check_output.return_value = b'blah |dummy \nstatus | stopped'
+ assert_raises(ApexVirtualException, host_setup, {'test': 2468})
+
@patch('apex.virtual.utils.iptc')
@patch('apex.virtual.utils.subprocess.check_call')
@patch('apex.virtual.utils.vbmc_lib')
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 013570d..d76174b 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -31,8 +31,10 @@ class Undercloud:
"""
def __init__(self, image_path, template_path,
root_pw=None, external_network=False,
- image_name='undercloud.qcow2'):
+ image_name='undercloud.qcow2',
+ os_version=constants.DEFAULT_OS_VERSION):
self.ip = None
+ self.os_version = os_version
self.root_pw = root_pw
self.external_net = external_network
self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
@@ -61,27 +63,40 @@ class Undercloud:
if self.external_net:
networks.append('external')
console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
+ root = 'vda' if platform.machine() == 'aarch64' else 'sda'
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
baremetal_interfaces=networks,
direct_boot='overcloud-full',
kernel_args=['console={}'.format(console),
- 'root=/dev/sda'],
+ 'root=/dev/{}'.format(root)],
default_network=True,
template_dir=self.template_path)
self.setup_volumes()
self.inject_auth()
+ self._update_delorean_repo()
- def _set_ip(self):
- ip_out = self.vm.interfaceAddresses(
+ @staticmethod
+ def _get_ip(vm):
+ ip_out = vm.interfaceAddresses(
libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0)
if ip_out:
for (name, val) in ip_out.items():
for ipaddr in val['addrs']:
if ipaddr['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
- self.ip = ipaddr['addr']
- return True
+ return ipaddr['addr']
+
+ def _set_ip(self):
+ ip = self._get_ip(self.vm)
+ if ip:
+ self.ip = ip
+ return True
+
+ @staticmethod
+ def get_ip():
+ vm = Undercloud._get_vm()
+ return Undercloud._get_ip(vm)
def start(self):
"""
@@ -110,19 +125,24 @@ class Undercloud:
"Unable to find IP for undercloud. Check if VM booted "
"correctly")
- def configure(self, net_settings, playbook, apex_temp_dir):
+ def configure(self, net_settings, deploy_settings,
+ playbook, apex_temp_dir, virtual_oc=False):
"""
Configures undercloud VM
- :param net_setings: Network settings for deployment
+ :param net_settings: Network settings for deployment
+ :param deploy_settings: Deployment settings for deployment
:param playbook: playbook to use to configure undercloud
:param apex_temp_dir: temporary apex directory to hold configs/logs
+ :param virtual_oc: Boolean to determine if overcloud is virt
:return: None
"""
logging.info("Configuring Undercloud...")
# run ansible
- ansible_vars = Undercloud.generate_config(net_settings)
+ ansible_vars = Undercloud.generate_config(net_settings,
+ deploy_settings)
ansible_vars['apex_temp_dir'] = apex_temp_dir
+ ansible_vars['virtual_overcloud'] = virtual_oc
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -179,21 +199,28 @@ class Undercloud:
virt_utils.virt_customize(virt_ops, self.volume)
@staticmethod
- def generate_config(ns):
+ def generate_config(ns, ds):
"""
Generates a dictionary of settings for configuring undercloud
:param ns: network settings to derive undercloud settings
+ :param ds: deploy settings to derive undercloud settings
:return: dictionary of settings
"""
ns_admin = ns['networks']['admin']
intro_range = ns['apex']['networks']['admin']['introspection_range']
config = dict()
+ # Check if this is an ARM deployment
+ config['aarch64'] = platform.machine() == 'aarch64'
+ # Configuration for undercloud.conf
config['undercloud_config'] = [
"enable_ui false",
"undercloud_update_packages false",
"undercloud_debug false",
"inspection_extras false",
+ "ipxe_enabled {}".format(
+ str(ds['global_params'].get('ipxe', True) and
+ not config['aarch64'])),
"undercloud_hostname undercloud.{}".format(ns['dns-domain']),
"local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
str(ns_admin['cidr']).split('/')[1]),
@@ -225,8 +252,32 @@ class Undercloud:
"prefix": str(ns_external['cidr']).split('/')[1],
"enabled": ns_external['enabled']
}
-
- # Check if this is an ARM deployment
- config['aarch64'] = platform.machine() == 'aarch64'
+ # TODO(trozet): clean this logic up and merge with above
+ if 'external' in ns.enabled_network_list:
+ nat_cidr = ns_external['cidr']
+ else:
+ nat_cidr = ns['networks']['admin']['cidr']
+ config['nat_cidr'] = str(nat_cidr)
+ if nat_cidr.version == 6:
+ config['nat_network_ipv6'] = True
+ else:
+ config['nat_network_ipv6'] = False
+ config['http_proxy'] = ns.get('http_proxy', '')
+ config['https_proxy'] = ns.get('https_proxy', '')
return config
+
+ def _update_delorean_repo(self):
+ if utils.internet_connectivity():
+ logging.info('Updating delorean repo on Undercloud')
+ delorean_repo = (
+ "https://trunk.rdoproject.org/centos7-{}"
+ "/current-tripleo/delorean.repo".format(self.os_version))
+ cmd = ("curl -L -f -o "
+ "/etc/yum.repos.d/deloran.repo {}".format(delorean_repo))
+ try:
+ virt_utils.virt_customize([{constants.VIRT_RUN_CMD: cmd}],
+ self.volume)
+ except Exception:
+ logging.warning("Failed to download and update delorean repo "
+ "for Undercloud")
diff --git a/apex/utils.py b/apex/utils.py
new file mode 100644
index 0000000..f791461
--- /dev/null
+++ b/apex/utils.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# TODO(trozet) migrate rest of utils.sh here
+
+import argparse
+import datetime
+import logging
+import os
+import sys
+import tempfile
+
+from apex.common import constants
+from apex.common import parsers
+from apex.undercloud import undercloud as uc_lib
+from apex.common import utils
+
+VALID_UTILS = ['fetch_logs']
+START_TIME = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M")
+APEX_TEMP_DIR = tempfile.mkdtemp(prefix="apex-logs-{}-".format(START_TIME))
+
+
+def fetch_logs(args):
+ uc_ip = uc_lib.Undercloud.get_ip()
+ if not uc_ip:
+ raise Exception('No Undercloud IP found')
+ logging.info("Undercloud IP is: {}".format(uc_ip))
+ fetch_vars = dict()
+ fetch_vars['stackrc'] = 'source /home/stack/stackrc'
+ fetch_vars['apex_temp_dir'] = APEX_TEMP_DIR
+ fetch_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'fetch_overcloud_nodes.yml')
+ try:
+ utils.run_ansible(fetch_vars, fetch_playbook, host=uc_ip,
+ user='stack', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Retrieved overcloud nodes info")
+ except Exception:
+ logging.error("Failed to retrieve overcloud nodes. Please check log")
+ raise
+ nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
+ fetch_vars['overcloud_nodes'] = parsers.parse_nova_output(nova_output)
+ fetch_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
+ 'GlobalKnownHostsFile=/dev/null -o ' \
+ 'UserKnownHostsFile=/dev/null -o ' \
+ 'LogLevel=error'
+ fetch_playbook = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
+ 'fetch_overcloud_logs.yml')
+ # Run per overcloud node
+ for node, ip in fetch_vars['overcloud_nodes'].items():
+ logging.info("Executing fetch logs overcloud playbook on "
+ "node {}".format(node))
+ try:
+ utils.run_ansible(fetch_vars, fetch_playbook, host=ip,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Logs retrieved for node {}".format(node))
+ except Exception:
+ logging.error("Log retrieval failed "
+ "for node {}. Please check log".format(node))
+ raise
+ logging.info("Log retrieval complete and stored in {}".format(
+ APEX_TEMP_DIR))
+
+
+def execute_actions(args):
+ for action in VALID_UTILS:
+ if hasattr(args, action) and getattr(args, action):
+ util_module = __import__('apex').utils
+ func = getattr(util_module, action)
+ logging.info("Executing action: {}".format(action))
+ func(args)
+
+
+def main():
+ util_parser = argparse.ArgumentParser()
+ util_parser.add_argument('-f', '--fetch-logs',
+ dest='fetch_logs',
+ required=False,
+ default=False,
+ action='store_true',
+ help='Fetch all overcloud logs')
+ util_parser.add_argument('--lib-dir',
+ default='/usr/share/opnfv-apex',
+ help='Directory path for apex ansible '
+ 'and third party libs')
+ args = util_parser.parse_args(sys.argv[1:])
+ os.makedirs(os.path.dirname('./apex_util.log'), exist_ok=True)
+ formatter = '%(asctime)s %(levelname)s: %(message)s'
+ logging.basicConfig(filename='./apex_util.log',
+ format=formatter,
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging.DEBUG)
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ console.setFormatter(logging.Formatter(formatter))
+ logging.getLogger('').addHandler(console)
+
+ execute_actions(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
index 3b2c446..ba0398b 100755
--- a/apex/virtual/configure_vm.py
+++ b/apex/virtual/configure_vm.py
@@ -118,9 +118,9 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
'user_interface': '',
}
- # assign scsi as default for aarch64
+ # assign virtio as default for aarch64
if arch == 'aarch64' and diskbus == 'sata':
- diskbus = 'scsi'
+ diskbus = 'virtio'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
diff --git a/apex/virtual/exceptions.py b/apex/virtual/exceptions.py
new file mode 100644
index 0000000..e3dff51
--- /dev/null
+++ b/apex/virtual/exceptions.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexVirtualException(Exception):
+ pass
diff --git a/apex/virtual/utils.py b/apex/virtual/utils.py
index 226af1b..8b24bc4 100644
--- a/apex/virtual/utils.py
+++ b/apex/virtual/utils.py
@@ -18,6 +18,8 @@ import xml.etree.ElementTree as ET
from apex.common import utils as common_utils
from apex.virtual import configure_vm as vm_lib
+from apex.virtual import exceptions as exc
+from time import sleep
from virtualbmc import manager as vbmc_lib
DEFAULT_RAM = 8192
@@ -131,11 +133,39 @@ def host_setup(node):
chain.insert_rule(rule)
try:
subprocess.check_call(['vbmc', 'start', name])
- logging.debug("Started vbmc for domain {}".format(name))
+ logging.debug("Started VBMC for domain {}".format(name))
except subprocess.CalledProcessError:
- logging.error("Failed to start vbmc for {}".format(name))
+ logging.error("Failed to start VBMC for {}".format(name))
raise
- logging.debug('vmbcs setup: {}'.format(vbmc_manager.list()))
+
+ logging.info("Checking VBMC {} is up".format(name))
+ is_running = False
+ for x in range(0, 4):
+ logging.debug("Polling to see if VBMC is up, attempt {}".format(x))
+ try:
+ output = subprocess.check_output(['vbmc', 'show', name],
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ logging.warning('Unable to issue "vbmc show" cmd')
+ continue
+ for line in output.decode('utf-8').split('\n'):
+ if 'status' in line:
+ if 'running' in line:
+ is_running = True
+ break
+ else:
+ logging.debug('VBMC status is not "running"')
+ break
+ if is_running:
+ break
+ sleep(1)
+ if is_running:
+ logging.info("VBMC {} is up and running".format(name))
+ else:
+ logging.error("Failed to verify VBMC is running")
+ raise exc.ApexVirtualException("Failed to bring up vbmc "
+ "{}".format(name))
+ logging.debug('VBMCs setup: {}'.format(vbmc_manager.list()))
def virt_customize(ops, target):
diff --git a/build/Makefile b/build/Makefile
index 805cf22..fb6734b 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -276,6 +276,7 @@ iso: iso-clean images rpms $(CENTISO)
cd $(BUILD_DIR)/centos/Packages && yumdownloader ipxe-roms-qemu python34-idna python34-pycparser python-crypto python-httplib2
cd $(BUILD_DIR)/centos/Packages && yumdownloader python-jinja2 python-keyczar python-paramiko sshpass python-ecdsa python34-ply
cd $(BUILD_DIR)/centos/Packages && yumdownloader libvirt-python python-lxml python-passlib python2-jmespath
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader python34-urllib3 python34-pysocks python34-requests python34-chardet
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-asn1crypto-0.22.0-1.el7.centos.noarch.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-cryptography-2.0.3-1.el7.centos.x86_64.rpm
@@ -286,6 +287,11 @@ iso: iso-clean images rpms $(CENTISO)
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-pycrypto-2.6.1-1.el7.centos.x86_64.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-pyghmi-1.0.22-1.el7.centos.noarch.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-virtualbmc-1.2.0-1.el7.centos.noarch.rpm
+ cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-smmap2-2.0.3-1.el7.centos.noarch.rpm
+ cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-pygerrit2-2.0.3-1.el7.centos.noarch.rpm
+ cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-gitdb2-2.0.3-1.el7.centos.noarch.rpm
+ cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-GitPython-2.1.7-1.el7.centos.noarch.rpm
+ cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-distro-1.2.0-1.el7.centos.noarch.rpm
# regenerate yum repo data
@echo "Generating new yum metadata"
createrepo --update -g $(BUILD_ROOT)/c7-opnfv-x86_64-comps.xml $(BUILD_DIR)/centos
diff --git a/build/barometer-install.sh b/build/barometer-install.sh
index 15753a4..0ea401d 100755
--- a/build/barometer-install.sh
+++ b/build/barometer-install.sh
@@ -22,14 +22,15 @@ source ./variables.sh
# Versions/branches
COLLECTD_OPENSTACK_PLUGINS_BRANCH="stable/pike"
-ARCH="6.el7.centos.x86_64.rpm"
+ARCH="8.el7.centos.x86_64.rpm"
+
# don't fail because of missing certificate
GETFLAG="--no-check-certificate"
# Locations of repos
ARTIFACTS_BAROM="artifacts.opnfv.org/barometer"
COLLECTD_OPENSTACK_REPO="https://github.com/openstack/collectd-ceilometer-plugin"
-PUPPET_BAROMETER_REPO="https://github.com/johnhinman/puppet-barometer"
+PUPPET_BAROMETER_REPO="https://github.com/opnfv/barometer.git"
# upload barometer packages tar, extract, and install
@@ -58,22 +59,38 @@ function barometer_pkgs {
| cut -d'-' -f9)
RDT_SUFFIX=$INTEL_RDT_VER-1.el7.centos.x86_64.rpm
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-$RDT_SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-$RDT_SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-$SUFFIX
- wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-$SUFFIX
- curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"
-
- tar cfz collectd.tar.gz *.rpm get-pip.py
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-${RDT_SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-${RDT_SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-sensors-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ceph-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_json-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-apache-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-write_http-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-mysql-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ping-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-smart-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_xml-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-disk-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdcached-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-iptables-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ipmi-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-netlink-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdtool-${SUFFIX}
+ wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-lvm-${SUFFIX}
+
+ tar cfz collectd.tar.gz *.rpm
cp collectd.tar.gz ${BUILD_DIR}
popd > /dev/null
@@ -87,8 +104,8 @@ function barometer_pkgs {
# get the barometer puppet module and tar it
rm -rf puppet-barometer
- git clone $PUPPET_BAROMETER_REPO
- pushd puppet-barometer/ > /dev/null
+ git clone $PUPPET_BAROMETER_REPO puppet-barometer
+ pushd puppet-barometer/puppet-barometer/ > /dev/null
git archive --format=tar.gz HEAD > ${BUILD_DIR}/puppet-barometer.tar.gz
popd > /dev/null
@@ -108,16 +125,19 @@ function barometer_pkgs {
--upload ${BUILD_DIR}/puppet-barometer.tar.gz:/etc/puppet/modules/ \
--run-command 'tar xfz /opt/collectd.tar.gz -C /opt' \
--install libstatgrab,log4cplus,rrdtool,rrdtool-devel \
- --install mcelog,python34,python34-libs,python34-devel \
+ --install mcelog,python34,python34-libs,python34-devel,python34-pip \
--install libvirt,libvirt-devel,gcc \
-a $OVERCLOUD_IMAGE
LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
- --run-command 'python3.4 /opt/get-pip.py' \
--run-command 'pip3 install requests libvirt-python pbr babel future six' \
-a $OVERCLOUD_IMAGE
LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
+ --run-command 'yum remove -y collectd-write_sensu-5.8.0-3.el7.x86_64' \
+ -a $OVERCLOUD_IMAGE
+
+ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
--run-command "yum install -y \
/opt/libcollectdclient-${SUFFIX} \
/opt/libcollectdclient-devel-${SUFFIX} \
@@ -131,7 +151,24 @@ function barometer_pkgs {
/opt/collectd-intel_rdt-${SUFFIX} \
/opt/collectd-snmp-${SUFFIX} \
/opt/collectd-snmp_agent-${SUFFIX} \
- /opt/collectd-virt-${SUFFIX}" \
+ /opt/collectd-virt-${SUFFIX} \
+ /opt/collectd-sensors-${SUFFIX} \
+ /opt/collectd-ceph-${SUFFIX} \
+ /opt/collectd-curl_json-${SUFFIX} \
+ /opt/collectd-apache-${SUFFIX} \
+ /opt/collectd-write_http-${SUFFIX} \
+ /opt/collectd-mysql-${SUFFIX} \
+ /opt/collectd-ping-${SUFFIX} \
+ /opt/collectd-smart-${SUFFIX} \
+ /opt/collectd-curl_xml-${SUFFIX} \
+ /opt/collectd-disk-${SUFFIX} \
+ /opt/collectd-rrdcached-${SUFFIX} \
+ /opt/collectd-iptables-${SUFFIX} \
+ /opt/collectd-curl-${SUFFIX} \
+ /opt/collectd-ipmi-${SUFFIX} \
+ /opt/collectd-netlink-${SUFFIX} \
+ /opt/collectd-rrdtool-${SUFFIX} \
+ /opt/collectd-lvm-${SUFFIX}" \
-a $OVERCLOUD_IMAGE
# install collectd-openstack-plugins
@@ -149,4 +186,3 @@ function barometer_pkgs {
--run-command 'mkdir -p /etc/collectd/collectd.conf.d' \
-a $OVERCLOUD_IMAGE
}
-
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 93d3dc1..959a392 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2016-10-14
description: >
Software Config to drive os-net-config to configure multiple interfaces
@@ -78,192 +78,200 @@ parameters:
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- {%- if not nets['external'][0]['enabled'] or nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
- type: ovs_bridge
- name: {get_input: bridge_name}
- members:
- -
- type: interface
- name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
- # force the MAC address of the bridge to this interface
- primary: true
- {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
- -
- type: vlan
- vlan_id: {get_param: ExternalNetworkVlanID}
- addresses:
+ str_replace:
+ template:
+ get_file: /usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ -
+ {%- if not nets['external'][0]['enabled'] or nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
+ type: ovs_bridge
+ {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+ name: br-isolated
+ {%- else %}
+ name: br-ex
+ {%- endif %}
+ members:
-
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- {%- endif %}
- {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- {%- endif %}
- {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- {%- endif %}
- {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- addresses:
+ type: interface
+ name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
+ # force the MAC address of the bridge to this interface
+ primary: true
+ {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
+ -
+ type: vlan
+ vlan_id: {get_param: ExternalNetworkVlanID}
+ addresses:
-
- ip_netmask: {get_param: InternalApiIpSubnet}
- {%- endif %}
- {%- else %}
- type: {{ nets['admin']['nic_mapping'][role]['phys_type'] }}
- {%- if nets['admin']['nic_mapping'][role]['phys_type'] == 'linux_bridge' %}
- name: br-ctlplane
- members:
- -
- type: interface
- name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
- primary: true
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ default: true
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ {%- endif %}
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
+ -
+ type: vlan
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ {%- endif %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ {%- endif %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ {%- endif %}
{%- else %}
- name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
- {%- endif %}
- {%- endif %}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
- list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- {%- if external_net_af == 6 or role == 'compute' or not nets['external'][0]['enabled'] %}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
+ type: {{ nets['admin']['nic_mapping'][role]['phys_type'] }}
+ {%- if nets['admin']['nic_mapping'][role]['phys_type'] == 'linux_bridge' %}
+ name: br-ctlplane
+ members:
+ -
+ type: interface
+ name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
+ primary: true
+ {%- else %}
+ name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
{%- endif %}
-
- {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
- {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
- -
- type: ovs_user_bridge
- name: {{ ovs_dpdk_bridge }}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- members:
- -
- type: ovs_dpdk_port
- name: dpdk0
- driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
- members:
- -
- type: interface
- name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
- # force the MAC address of the bridge to this interface
- primary: true
- {%- else %}
- -
- type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
- name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
- {%- if 'uio-driver' in nets['tenant']['nic_mapping'][role] %}
- uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio-driver'] }}
- {%- endif %}
- {%- if 'interface-options' in nets['tenant']['nic_mapping'][role] %}
- options: '{{ nets['tenant']['nic_mapping'][role]['interface-options'] }}'
- {%- endif %}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- {%- endif %}
- {%- endif %}
- {%- if nets['external'][0]['enabled'] and external_net_type != 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
- -
- type: {{ nets['external'][0]['nic_mapping'][role]['phys_type'] }}
- name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
- {%- if 'uio-driver' in nets['external'][0]['nic_mapping'][role] %}
- uio_driver: {{ nets['external'][0]['nic_mapping'][role]['uio-driver'] }}
- {%- endif %}
- {%- if role == 'controller' %}
- dns_servers: {get_param: DnsServers}
{%- endif %}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- -
- {%- if role == 'controller' %}
- default: true
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ {%- if external_net_af == 6 or role == 'compute' or not nets['external'][0]['enabled'] %}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
{%- endif %}
- ip_netmask: 0.0.0.0/0
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
- -
+
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
{%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
- type: ovs_user_bridge
+ -
+ type: ovs_user_bridge
+ name: {{ ovs_dpdk_bridge }}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ members:
+ -
+ type: ovs_dpdk_port
+ name: dpdk0
+ driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
+ members:
+ -
+ type: interface
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+ # force the MAC address of the bridge to this interface
+ primary: true
{%- else %}
- type: ovs_bridge
+ -
+ type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+ {%- if 'uio-driver' in nets['tenant']['nic_mapping'][role] %}
+ uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio-driver'] }}
+ {%- endif %}
+ {%- if 'interface-options' in nets['tenant']['nic_mapping'][role] %}
+ options: '{{ nets['tenant']['nic_mapping'][role]['interface-options'] }}'
+ {%- endif %}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ {%- endif %}
{%- endif %}
- name: {get_input: bridge_name}
- use_dhcp: false
- members:
- -
- type: interface
- name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
- # force the MAC address of the bridge to this interface
- primary: true
- {%- if role == 'controller' %}
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- -
- default: true
- ip_netmask: 0.0.0.0/0
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ {%- if nets['external'][0]['enabled'] and external_net_type != 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+ -
+ type: {{ nets['external'][0]['nic_mapping'][role]['phys_type'] }}
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
+ {%- if 'uio-driver' in nets['external'][0]['nic_mapping'][role] %}
+ uio_driver: {{ nets['external'][0]['nic_mapping'][role]['uio-driver'] }}
+ {%- endif %}
+ {%- if role == 'controller' %}
+ dns_servers: {get_param: DnsServers}
+ {%- endif %}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ {%- if role == 'controller' %}
+ default: true
+ {%- endif %}
+ ip_netmask: 0.0.0.0/0
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+ -
+ {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
+ type: ovs_user_bridge
+ {%- else %}
+ type: {{ nets['external'][0]['nic_mapping'][role]['phys_type'] }}
+ {%- endif %}
+ name: br-ex
+ use_dhcp: false
+ members:
+ -
+ type: interface
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
+ # force the MAC address of the bridge to this interface
+ primary: true
+ {%- if role == 'controller' %}
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ default: true
+ ip_netmask: 0.0.0.0/0
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ {%- endif %}
+ {%- endif %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
+ -
+ type: interface
+ name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ {%- endif %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
+ -
+ type: interface
+ name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
{%- endif %}
- {%- endif %}
- {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
- -
- type: interface
- name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- {%- endif %}
- {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
- -
- type: interface
- name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- {%- endif %}
outputs:
OS::stack_id:
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 4ef6ef8..84bd2f7 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -160,7 +160,7 @@ parameter_defaults:
ComputeServices:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Sshd
- #- OS::TripleO::Services::Barometer
+ - OS::TripleO::Services::Barometer
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephOSD
@@ -172,6 +172,7 @@ parameter_defaults:
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::ComputeNeutronCorePlugin
- OS::TripleO::Services::ComputeNeutronOvsAgent
@@ -196,6 +197,8 @@ resource_registry:
OS::TripleO::Services::SwiftStorage: OS::Heat::None
#OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::BarbicanApi: "/usr/share/openstack-tripleo-heat-\
+ templates/puppet/services/barbican-api.yaml"
# Extra Config
OS::TripleO::ComputeExtraConfigPre: OS::Heat::None
OS::TripleO::ControllerExtraConfigPre: OS::Heat::None
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index 098ab02..f7d55e5 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -48,6 +48,7 @@ qemu-img resize overcloud-full_build.qcow2 +1500M
# installing forked apex-puppet-tripleo
# upload neutron port data plane status
LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
+ --run-command "curl -f https://trunk.rdoproject.org/centos7-pike/delorean-deps.repo > /etc/yum.repos.d/delorean-deps.repo" \
--run-command "xfs_growfs /dev/sda" \
--upload ${BUILD_DIR}/apex-puppet-tripleo.tar.gz:/etc/puppet/modules \
--run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf apex-puppet-tripleo.tar.gz" \
@@ -66,9 +67,10 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
--upload ${BUILD_ROOT}/patches/neutron_openstackclient_dps.patch:/usr/lib/python2.7/site-packages/ \
--upload ${BUILD_ROOT}/patches/puppet-neutron-add-sfc.patch:/usr/share/openstack-puppet/modules/neutron/ \
--upload ${BUILD_ROOT}/patches/congress-parallel-execution.patch:/usr/lib/python2.7/site-packages/ \
+ --upload ${BUILD_ROOT}/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch:/usr/share/openstack-puppet/modules/neutron/ \
+ --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \
+ --install openstack-utils \
-a overcloud-full_build.qcow2
-# --upload ${BUILD_ROOT}/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch:/usr/share/openstack-puppet/modules/neutron/ \
-# --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \
# --upload ${BUILD_ROOT}/patches/puppet-neutron-add-external_network_bridge-option.patch:/usr/share/openstack-puppet/modules/neutron/ \
# --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-add-external_network_bridge-option.patch" \
@@ -109,8 +111,8 @@ cat > ${BUILD_DIR}/kubernetes.repo << EOF
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
-gpgcheck=1
-repo_gpgcheck=1
+gpgcheck=0
+repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
@@ -143,11 +145,12 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
--install python-etcd,puppet-etcd \
--install patch \
--install docker,kubelet,kubeadm,kubectl,kubernetes-cni \
+ --upload ${BUILD_ROOT}/patches/puppet-ceph.patch:/etc/puppet/modules/ceph/ \
+ --run-command "cd /etc/puppet/modules/ceph && patch -p1 < puppet-ceph.patch" \
-a overcloud-full_build.qcow2
# upload and install barometer packages
- # FIXME collectd pkgs conflict during upgrade to Pike
- # barometer_pkgs overcloud-full_build.qcow2
+ barometer_pkgs overcloud-full_build.qcow2
fi # end x86_64 specific items
diff --git a/build/patches/neutron-patch-NSDriver.patch b/build/patches/neutron-patch-NSDriver.patch
index f01d031..84b4fb0 100644
--- a/build/patches/neutron-patch-NSDriver.patch
+++ b/build/patches/neutron-patch-NSDriver.patch
@@ -1,6 +1,6 @@
-From d51e6ba77c3f40c7c04c97b1de06bf9344c95929 Mon Sep 17 00:00:00 2001
+From ea53f407637d7ed8b5447fc261b1577d4795744a Mon Sep 17 00:00:00 2001
From: Feng Pan <fpan@redhat.com>
-Date: Thu, 20 Jul 2017 16:12:45 -0400
+Date: Sun, 5 Feb 2017 21:34:19 -0500
Subject: [PATCH] Add NSDriver
---
@@ -10,7 +10,7 @@ Subject: [PATCH] Add NSDriver
3 files changed, 86 insertions(+), 8 deletions(-)
diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py
-index f65c706..e9fc4b7 100644
+index 71e8cbcf35..7152cd94ff 100644
--- a/neutron/agent/l3/namespaces.py
+++ b/neutron/agent/l3/namespaces.py
@@ -18,6 +18,7 @@ import functools
@@ -18,10 +18,10 @@ index f65c706..e9fc4b7 100644
from oslo_utils import excutils
+from neutron.agent.linux.interface import OVSInterfaceDriver
- from neutron._i18n import _LE, _LW
from neutron.agent.linux import ip_lib
-@@ -110,8 +111,9 @@ class Namespace(object):
+ LOG = logging.getLogger(__name__)
+@@ -119,8 +120,9 @@ class Namespace(object):
class RouterNamespace(Namespace):
@@ -32,7 +32,7 @@ index f65c706..e9fc4b7 100644
name = self._get_ns_name(router_id)
super(RouterNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
-@@ -132,7 +134,7 @@ class RouterNamespace(Namespace):
+@@ -140,7 +142,7 @@ class RouterNamespace(Namespace):
elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
ns_ip.del_veth(d.name)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
@@ -42,10 +42,10 @@ index f65c706..e9fc4b7 100644
bridge=self.agent_conf.external_network_bridge,
namespace=self.name,
diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py
-index 46db6a5..6775882 100644
+index f578a9e5e2..cadc0371d7 100644
--- a/neutron/agent/l3/router_info.py
+++ b/neutron/agent/l3/router_info.py
-@@ -30,6 +30,7 @@ from neutron.common import exceptions as n_exc
+@@ -29,6 +29,7 @@ from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.ipam import utils as ipam_utils
@@ -53,7 +53,7 @@ index 46db6a5..6775882 100644
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
-@@ -52,6 +53,7 @@ class RouterInfo(object):
+@@ -51,6 +52,7 @@ class RouterInfo(object):
interface_driver,
use_ipv6=False):
self.agent = agent
@@ -61,7 +61,7 @@ index 46db6a5..6775882 100644
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
-@@ -63,7 +65,7 @@ class RouterInfo(object):
+@@ -62,7 +64,7 @@ class RouterInfo(object):
self.router = router
self.use_ipv6 = use_ipv6
ns = self.create_router_namespace_object(
@@ -82,7 +82,7 @@ index 46db6a5..6775882 100644
@property
def router(self):
-@@ -609,7 +611,7 @@ class RouterInfo(object):
+@@ -630,7 +632,7 @@ class RouterInfo(object):
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
@@ -91,7 +91,7 @@ index 46db6a5..6775882 100644
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
-@@ -679,7 +681,7 @@ class RouterInfo(object):
+@@ -700,7 +702,7 @@ class RouterInfo(object):
self._add_route_to_gw(ex_gw_port, device_name=interface_name,
namespace=ns_name, preserve_ips=preserve_ips)
@@ -101,7 +101,7 @@ index 46db6a5..6775882 100644
ip_cidrs,
namespace=ns_name,
diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py
-index 1f1ed39..b7781e2 100644
+index 88d6e67f31..c0fab604d1 100644
--- a/neutron/agent/linux/interface.py
+++ b/neutron/agent/linux/interface.py
@@ -15,7 +15,7 @@
@@ -113,7 +113,7 @@ index 1f1ed39..b7781e2 100644
import netaddr
from neutron_lib import constants
from oslo_config import cfg
-@@ -317,6 +317,80 @@ class NullDriver(LinuxInterfaceDriver):
+@@ -308,6 +308,80 @@ class NullDriver(LinuxInterfaceDriver):
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
@@ -132,7 +132,7 @@ index 1f1ed39..b7781e2 100644
+ return True
+ attempt += 1
+ eventlet.sleep(1)
-+ LOG.error(_LE("Device %(dev)s was not created in %(time)d seconds"),
++ LOG.error("Device %(dev)s was not created in %(time)d seconds",
+ {'dev': device_name,
+ 'time': NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE})
+ return False
@@ -144,8 +144,8 @@ index 1f1ed39..b7781e2 100644
+ # Note: network_device_mtu will be deprecated in future
+ mtu_override = self.conf.network_device_mtu
+ except cfg.NoSuchOptError:
-+ LOG.warning(_LW("Config setting for MTU deprecated - any "
-+ "override will be ignored."))
++ LOG.warning("Config setting for MTU deprecated - any "
++ "override will be ignored.")
+ mtu_override = None
+ if mtu_override:
+ mtu = mtu_override
@@ -195,5 +195,5 @@ index 1f1ed39..b7781e2 100644
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
--
-2.9.3
+2.14.3
diff --git a/build/patches/puppet-ceph.patch b/build/patches/puppet-ceph.patch
new file mode 100644
index 0000000..18bf9ee
--- /dev/null
+++ b/build/patches/puppet-ceph.patch
@@ -0,0 +1,76 @@
+From 99a0bcc818ed801f6cb9e07a9904ee40e624bdab Mon Sep 17 00:00:00 2001
+From: Tim Rozet <trozet@redhat.com>
+Date: Mon, 5 Mar 2018 17:03:00 -0500
+Subject: [PATCH] Fixes ceph key import failures by adding multiple attempts
+
+Signed-off-by: Tim Rozet <trozet@redhat.com>
+---
+ manifests/key.pp | 42 +++++++++++++++++-------------------------
+ 1 file changed, 17 insertions(+), 25 deletions(-)
+
+diff --git a/manifests/key.pp b/manifests/key.pp
+index 911df1a..d47a4c3 100644
+--- a/manifests/key.pp
++++ b/manifests/key.pp
+@@ -123,22 +123,6 @@ define ceph::key (
+ }
+ }
+
+- # ceph-authtool --add-key is idempotent, will just update pre-existing keys
+- exec { "ceph-key-${name}":
+- command => "/bin/true # comment to satisfy puppet syntax requirements
+-set -ex
+-ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}",
+- unless => "/bin/true # comment to satisfy puppet syntax requirements
+-set -x
+-NEW_KEYRING=\$(mktemp)
+-ceph-authtool \$NEW_KEYRING --name '${name}' --add-key '${secret}' ${caps}
+-diff -N \$NEW_KEYRING ${keyring_path}
+-rv=\$?
+-rm \$NEW_KEYRING
+-exit \$rv",
+- require => [ File[$keyring_path], ],
+- logoutput => true,
+- }
+
+ if $inject {
+
+@@ -162,18 +146,26 @@ exit \$rv",
+ exec { "ceph-injectkey-${name}":
+ command => "/bin/true # comment to satisfy puppet syntax requirements
+ set -ex
++cat ${keyring_path}
++ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}
++cat ${keyring_path}
+ ceph ${cluster_option} ${inject_id_option} ${inject_keyring_option} auth import -i ${keyring_path}",
+- unless => "/bin/true # comment to satisfy puppet syntax requirements
+-set -x
+-OLD_KEYRING=\$(mktemp)
+-ceph ${cluster_option} ${inject_id_option} ${inject_keyring_option} auth get ${name} -o \$OLD_KEYRING || true
+-diff -N \$OLD_KEYRING ${keyring_path}
+-rv=$?
+-rm \$OLD_KEYRING
+-exit \$rv",
+- require => [ Class['ceph'], Exec["ceph-key-${name}"], ],
++ require => [ File[$keyring_path], Class['ceph'] ],
+ logoutput => true,
++ tries => 6,
++ try_sleep => 10
+ }
+
++ } else {
++
++ # ceph-authtool --add-key is idempotent, will just update pre-existing keys
++ exec { "ceph-key-${name}":
++ command => "/bin/true # comment to satisfy puppet syntax requirements
++set -ex
++ceph-authtool ${keyring_path} --name '${name}' --add-key '${secret}' ${caps}
++cat ${keyring_path}",
++ require => [ File[$keyring_path], ],
++ logoutput => true,
++ }
+ }
+ }
+--
+2.14.3
+
diff --git a/build/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch b/build/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch
index a84ab83..00e7183 100644
--- a/build/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch
+++ b/build/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch
@@ -1,18 +1,20 @@
-From 8e37e0fae6195ec177828a9e9d36c6ee009cd372 Mon Sep 17 00:00:00 2001
+From 8676df91883d52e53f2762107267e106ce8c1c64 Mon Sep 17 00:00:00 2001
From: Feng Pan <fpan@redhat.com>
-Date: Thu, 18 May 2017 17:39:42 -0400
-Subject: [PATCH] Add ml2 type_drivers setting
+Date: Fri, 16 Mar 2018 08:47:30 -0400
+Subject: [PATCH] Add vpp ml2 type_driver config
-Change-Id: Ie47a1ace6302d7eccd3ead676c4e1cde7e82c5d2
+Change-Id: I60fb724f2a61377f65df7608c4d70f534c5539f5
+Signed-off-by: Feng Pan <fpan@redhat.com>
---
- manifests/agents/ml2/vpp.pp | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
+ manifests/agents/ml2/vpp.pp | 14 +++++++++++++-
+ manifests/plugins/ml2/vpp.pp | 6 ++++++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/manifests/agents/ml2/vpp.pp b/manifests/agents/ml2/vpp.pp
-index f17d67e8..08427833 100644
+index f8204024..ca3a333b 100644
--- a/manifests/agents/ml2/vpp.pp
+++ b/manifests/agents/ml2/vpp.pp
-@@ -42,6 +42,11 @@
+@@ -42,6 +42,15 @@
# in the vpp config.
# Defaults to false.
#
@@ -21,32 +23,60 @@ index f17d67e8..08427833 100644
+# Could be an array that can contain flat, vlan or vxlan
+# Defaults to $::os_service_default.
+#
++# [*service_plugins*]
++# (optional) service plugins config
++# Defaults to $::os_service_default.
++#
class neutron::agents::ml2::vpp (
- $package_ensure = 'present',
- $enabled = true,
-@@ -51,6 +56,7 @@ class neutron::agents::ml2::vpp (
- $etcd_port = $::os_service_default,
- $etcd_user = $::os_service_default,
- $etcd_pass = $::os_service_default,
-+ $type_drivers = $::os_service_default,
- $purge_config = false,
+ $package_ensure = 'present',
+ $enabled = true,
+@@ -51,6 +60,8 @@ class neutron::agents::ml2::vpp (
+ $etcd_port = $::os_service_default,
+ $etcd_user = $::os_service_default,
+ $etcd_pass = $::os_service_default,
++ $type_drivers = $::os_service_default,
++ $service_plugins = $::os_service_default,
+ $purge_config = false,
) {
include ::neutron::deps
-@@ -61,12 +67,12 @@ class neutron::agents::ml2::vpp (
- }
-
- neutron_agent_vpp {
-- 'ml2_vpp/physnets': value => $physnets;
-+ 'ml2_vpp/physnets': value => $physnets;
- 'ml2_vpp/etcd_host': value => $etcd_host;
+@@ -66,7 +77,8 @@ class neutron::agents::ml2::vpp (
'ml2_vpp/etcd_port': value => $etcd_port;
'ml2_vpp/etcd_user': value => $etcd_user;
'ml2_vpp/etcd_pass': value => $etcd_pass;
-- 'DEFAULT/host': value => $::hostname;
+- 'DEFAULT/host': value => $::hostname;
+ 'ml2/type_drivers': value => join(any2array($type_drivers), ',');
++ 'DEFAULT/service_plugins': value => $service_plugins;
}
package { 'neutron-vpp-agent':
+diff --git a/manifests/plugins/ml2/vpp.pp b/manifests/plugins/ml2/vpp.pp
+index 0410a43e..b0c3c4d8 100644
+--- a/manifests/plugins/ml2/vpp.pp
++++ b/manifests/plugins/ml2/vpp.pp
+@@ -20,11 +20,16 @@
+ # (optional) Password for etcd authentication
+ # Defaults to $::os_service_default.
+ #
++# [*l3_hosts*]
++# (optional) L3 vpp-routing hosts
++# Defaults to $::os_service_default.
++#
+ class neutron::plugins::ml2::vpp (
+ $etcd_host = $::os_service_default,
+ $etcd_port = $::os_service_default,
+ $etcd_user = $::os_service_default,
+ $etcd_pass = $::os_service_default,
++ $l3_hosts = $::os_service_default,
+ ) {
+ include ::neutron::deps
+ require ::neutron::plugins::ml2
+@@ -34,5 +39,6 @@ class neutron::plugins::ml2::vpp (
+ 'ml2_vpp/etcd_port': value => $etcd_port;
+ 'ml2_vpp/etcd_user': value => $etcd_user;
+ 'ml2_vpp/etcd_pass': value => $etcd_pass, secret => true;
++ 'ml2_vpp/l3_hosts': value => $l3_hosts;
+ }
+ }
--
-2.13.3
+2.14.3
diff --git a/build/patches/tacker-client-fix-symmetrical.patch b/build/patches/tacker-client-fix-symmetrical.patch
deleted file mode 100644
index eab01a6..0000000
--- a/build/patches/tacker-client-fix-symmetrical.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 9630f711a88a69480c44d6ac21244d9a8b0d92c7 Mon Sep 17 00:00:00 2001
-From: Tim Rozet <trozet@redhat.com>
-Date: Fri, 18 Aug 2017 16:22:23 -0400
-Subject: [PATCH] Fixes passing boolean as string for symmetrical
-
-Bug where 'True'/'False' strings were being passed in REST to Tacker
-service which would end up throwing an exception because the DB type for
-symmetrical is boolean/small int. This converts it to boolean in the
-client.
-
-Closes-Bug: 1711550
-
-Change-Id: Ide2aeab73b1dd88beb6e491e6b07cdee9fb7e48a
-Signed-off-by: Tim Rozet <trozet@redhat.com>
----
-
-diff --git a/tackerclient/tacker/v1_0/nfvo/vnffg.py b/tackerclient/tacker/v1_0/nfvo/vnffg.py
-index 729cd19..92b98ed 100644
---- a/tackerclient/tacker/v1_0/nfvo/vnffg.py
-+++ b/tackerclient/tacker/v1_0/nfvo/vnffg.py
-@@ -97,7 +97,9 @@
- help=_('List of logical VNFD name to VNF instance name mapping. '
- 'Example: VNF1:my_vnf1,VNF2:my_vnf2'))
- parser.add_argument(
-- '--symmetrical', metavar='{True,False}',
-+ '--symmetrical',
-+ action='store_true',
-+ default=False,
- help=_('Should a reverse path be created for the NFP'))
- parser.add_argument(
- '--param-file',
diff --git a/build/rpm_specs/networking-vpp.spec b/build/rpm_specs/networking-vpp.spec
index 8068783..4c84f20 100644
--- a/build/rpm_specs/networking-vpp.spec
+++ b/build/rpm_specs/networking-vpp.spec
@@ -2,7 +2,7 @@
Summary: OpenStack Networking for VPP
Name: python-networking-vpp
-Version: 17.07
+Version: 18.04
Release: %{release}%{?git}%{?dist}
License: Apache 2.0
@@ -12,7 +12,7 @@ Url: https://github.com/openstack/networking-vpp/
BuildArch: noarch
AutoReq: no
-Requires: vpp
+Requires: vpp python-jwt
Vendor: OpenStack <openstack-dev@lists.openstack.org>
Packager: Feng Pan <fpan@redhat.com>
@@ -27,7 +27,7 @@ Description=Networking VPP ML2 Agent
[Service]
ExecStartPre=/usr/bin/systemctl is-active vpp
-ExecStart=/usr/bin/vpp-agent --config-file /etc/neutron/plugins/ml2/vpp_agent.ini
+ExecStart=/usr/bin/vpp-agent --config-file /etc/neutron/plugins/ml2/vpp_agent.ini --log-file /var/log/neutron/vpp-agent.log
Type=simple
Restart=on-failure
RestartSec=5s
@@ -37,6 +37,13 @@ WantedBy=multi-user.target
EOF
+%preun
+%systemd_preun neutron-vpp-agent.service
+
+%postun
+%systemd_postun
+rm -rf %{python2_sitelib}/networking_vpp*
+
%install
python setup.py install -O1 --root=%{buildroot} --record=INSTALLED_FILES
mkdir -p %{buildroot}%{_libdir}/systemd/system
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index dde13a7..0d12e8d 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -11,13 +11,13 @@ URL: https://gerrit.opnfv.org/gerrit/apex.git
Source0: opnfv-apex-common.tar.gz
BuildArch: noarch
-BuildRequires: python-docutils python34-devel
+BuildRequires: python34-docutils python34-devel
Requires: opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools python34-libvirt
Requires: initscripts net-tools iputils iproute iptables python34 python34-yaml python34-jinja2 python3-ipmi python34-virtualbmc
Requires: ipxe-roms-qemu >= 20160127-1
Requires: libvirt-devel ansible
Requires: python34-iptables python34-cryptography python34-pbr
-Requires: python34-GitPython python34-pygerrit2
+Requires: python34-GitPython python34-pygerrit2 python34-distro
%description
Scripts for OPNFV deployment using Apex
@@ -55,7 +55,6 @@ install docs/release/release-notes/release-notes.html %{buildroot}%{_docdir}/opn
install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example
install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example
install config/network/network_settings_v6.yaml %{buildroot}%{_docdir}/opnfv/network_settings_v6.yaml.example
-install config/network/network_settings_vpp.yaml %{buildroot}%{_docdir}/opnfv/network_settings_vpp.yaml.example
install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/inventory.yaml.example
%files
@@ -65,6 +64,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%attr(755,root,root) %{_bindir}/opnfv-deploy
%attr(755,root,root) %{_bindir}/opnfv-clean
%attr(755,root,root) %{_bindir}/opnfv-util
+%attr(755,root,root) %{_bindir}/opnfv-pyutil
%{_datadir}/opnfv-apex/
%{_sysconfdir}/bash_completion.d/apex
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
@@ -76,8 +76,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-pike-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-pike-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-pike_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-queens_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-master_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-pike_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-queens_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-master_upstream-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
@@ -92,10 +96,14 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-l2gw-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-l2gw-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-nofeature-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-sriov-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-sriov-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-gluon-noha.yaml
%{_sysconfdir}/opnfv-apex/os-ovn-nofeature-noha.yaml
%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml
@@ -104,7 +112,6 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/network_settings.yaml
%{_sysconfdir}/opnfv-apex/network_settings_vlans.yaml
%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
-%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml
%doc %{_docdir}/opnfv/LICENSE.rst
%doc %{_docdir}/opnfv/installation-instructions.html
%doc %{_docdir}/opnfv/release-notes.rst
@@ -112,10 +119,17 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/deploy_settings.yaml.example
%doc %{_docdir}/opnfv/network_settings.yaml.example
%doc %{_docdir}/opnfv/network_settings_v6.yaml.example
-%doc %{_docdir}/opnfv/network_settings_vpp.yaml.example
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Tue Apr 17 2018 Feng Pan <fpan@redhat.com> - 6.0-4
+ Removes network_settings_vpp.yaml
+* Tue Apr 03 2018 Tim Rozet <trozet@redhat.com> - 6.0-3
+ Adds fetch logs
+* Fri Mar 09 2018 Tim Rozet <trozet@redhat.com> - 6.0-2
+ Add upstream deploy files with containers
+* Wed Feb 14 2018 Tim Rozet <trozet@redhat.com> - 6.0-1
+ Fix docutils requirement and add python34-distro
* Wed Nov 29 2017 Tim Rozet <trozet@redhat.com> - 6.0-0
Bump version for Fraser
* Wed Oct 25 2017 Tim Rozet <trozet@redhat.com> - 5.0-9
diff --git a/build/undercloud.sh b/build/undercloud.sh
index 0cfb673..6bb8ac9 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -59,8 +59,6 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \
--upload ${CACHE_DIR}/${calipso_script}:/root/ \
--install "libguestfs-tools" \
--install "python-tackerclient" \
- --upload ${BUILD_ROOT}/patches/tacker-client-fix-symmetrical.patch:/usr/lib/python2.7/site-packages/ \
- --run-command "cd usr/lib/python2.7/site-packages/ && patch -p1 < tacker-client-fix-symmetrical.patch" \
--run-command "yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" \
--install yum-utils,lvm2,device-mapper-persistent-data \
-a undercloud_build.qcow2
diff --git a/build/upstream-environment.yaml b/build/upstream-environment.yaml
index ef6cdb6..debe6f3 100644
--- a/build/upstream-environment.yaml
+++ b/build/upstream-environment.yaml
@@ -6,6 +6,7 @@ parameters:
CloudDomain: opnfvlf.org
parameter_defaults:
+ DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
HostKey:
diff --git a/build/variables.sh b/build/variables.sh
index e966e33..f944c59 100644
--- a/build/variables.sh
+++ b/build/variables.sh
@@ -44,16 +44,17 @@ kvmfornfv_kernel_rpm="kvmfornfv-4bfeded9-apex-kernel-4.4.50_rt62_centos.x86_64.r
calipso_uri_base="https://git.opnfv.org/calipso/plain/app/install"
calipso_script="calipso-installer.py"
-netvpp_repo="https://github.com/openstack/networking-vpp"
-netvpp_branch="17.07"
+#netvpp_repo="https://github.com/openstack/networking-vpp"
+netvpp_repo="https://github.com/fepan/networking-vpp"
+netvpp_branch="test-fdio-fix"
netvpp_commit=$(git ls-remote ${netvpp_repo} ${netvpp_branch} | awk '{print substr($1,1,7)}')
-netvpp_pkg=python-networking-vpp-17.07-1.git${NETVPP_COMMIT}$(rpm -E %dist).noarch.rpm
+netvpp_pkg=python-networking-vpp-18.04-1.git${NETVPP_COMMIT}$(rpm -E %dist).noarch.rpm
gluon_rpm=gluon-0.0.1-1_20170302.noarch.rpm
nosdn_vpp_rpms=(
-'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp/17.07.01-release.x86_64/vpp-17.07.01-release.x86_64.rpm'
-'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-api-python/17.07.01-release.x86_64/vpp-api-python-17.07.01-release.x86_64.rpm'
-'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-lib/17.07.01-release.x86_64/vpp-lib-17.07.01-release.x86_64.rpm'
-'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-plugins/17.07.01-release.x86_64/vpp-plugins-17.07.01-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp/18.01.1-release.x86_64/vpp-18.01.1-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-plugins/18.01.1-release.x86_64/vpp-plugins-18.01.1-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-lib/18.01.1-release.x86_64/vpp-lib-18.01.1-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-api-python/18.01.1-release.x86_64/vpp-api-python-18.01.1-release.x86_64.rpm'
)
diff --git a/ci/PR_revision.log b/ci/PR_revision.log
index b83ca40..9aecb90 100644
--- a/ci/PR_revision.log
+++ b/ci/PR_revision.log
@@ -10,4 +10,8 @@ apex-puppet-tripleo, Ie336c22b366bd478963ca14e25d645fec0cded7a
apex-tripleo-heat-templates, I0749ed6e0d27bd4c9a5bb19657579d400501d09e
apex-puppet-tripleo, I486c4045e29c7032526be6e19d11e7979070c2d9
-apex-os-net-config, Idaf03d78d2ce657ac484c1285a795d98760d0112 \ No newline at end of file
+apex-os-net-config, Idaf03d78d2ce657ac484c1285a795d98760d0112
+apex-tripleo-heat-templates, Ib94b0182fd5fbc3a254cae5862a82982bf3131be
+apex-tripleo-heat-templates, I1b4d3eea61e29e1ede0e06d78fde842ef13b7b8e
+
+apex-tripleo-heat-templates, I37039207bc7cf9965d26e6dfa034e84bf9b7224d
diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml
index 47bf083..b8f0100 100644
--- a/config/deploy/deploy_settings.yaml
+++ b/config/deploy/deploy_settings.yaml
@@ -7,8 +7,17 @@
# If ha_enabled is false, there will only be one controller.
global_params:
ha_enabled: true
+ # introspect defaults to True,
+ # Enables/disables the introspection process at deploy time.
+ introspect: true
+ # ipxe defaults to True
+ # Enables/disables the use of ipxe for provisioning
+ ipxe: true
deploy_options:
+ # Whether or not to use containers for the overcloud services
+ containers: true
+
# Which SDN controller to use. Valid options are 'opendaylight', 'onos',
# 'opendaylight-external', 'opencontrail' or false. A value of false will
# use Neutron's OVS ML2 controller.
@@ -46,6 +55,11 @@ deploy_options:
# The dataplane should be specified as fdio if this is set to true
vpp: false
+ # Whether to install and configure SRIOV service in the compute node(s) to
+ # allow VMs to use VFs/PFs. The user must know in advance the name of the
+ # SRIOV capable NIC that will be configured.
+ sriov: em2
+
# Whether to run vsperf after the install has completed
# vsperf: false
diff --git a/config/deploy/os-nosdn-master_upstream-noha.yaml b/config/deploy/os-nosdn-master_upstream-noha.yaml
new file mode 100644
index 0000000..e775811
--- /dev/null
+++ b/config/deploy/os-nosdn-master_upstream-noha.yaml
@@ -0,0 +1,11 @@
+---
+global_params:
+ ha_enabled: false
+deploy_options:
+ containers: true
+ os_version: master
+ sdn_controller: false
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-nosdn-pike-noha.yaml b/config/deploy/os-nosdn-pike_upstream-noha.yaml
index 1141784..1141784 100644
--- a/config/deploy/os-nosdn-pike-noha.yaml
+++ b/config/deploy/os-nosdn-pike_upstream-noha.yaml
diff --git a/config/deploy/os-nosdn-queens_upstream-noha.yaml b/config/deploy/os-nosdn-queens_upstream-noha.yaml
new file mode 100644
index 0000000..efadc31
--- /dev/null
+++ b/config/deploy/os-nosdn-queens_upstream-noha.yaml
@@ -0,0 +1,11 @@
+---
+global_params:
+ ha_enabled: false
+deploy_options:
+ containers: true
+ os_version: queens
+ sdn_controller: false
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-odl-l2gw-ha.yaml b/config/deploy/os-odl-l2gw-ha.yaml
new file mode 100644
index 0000000..a22da3b
--- /dev/null
+++ b/config/deploy/os-odl-l2gw-ha.yaml
@@ -0,0 +1,12 @@
+---
+global_params:
+ ha_enabled: true
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: nitrogen
+ tacker: false
+ congress: true
+ sfc: false
+ vpn: false
+ l2gw: true
diff --git a/config/deploy/os-odl-l2gw-noha.yaml b/config/deploy/os-odl-l2gw-noha.yaml
new file mode 100644
index 0000000..ae5218a
--- /dev/null
+++ b/config/deploy/os-odl-l2gw-noha.yaml
@@ -0,0 +1,12 @@
+---
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: nitrogen
+ tacker: false
+ congress: true
+ sfc: false
+ vpn: false
+ l2gw: true
diff --git a/config/deploy/os-odl-pike-noha.yaml b/config/deploy/os-odl-master_upstream-noha.yaml
index 44eff66..39ced49 100644
--- a/config/deploy/os-odl-pike-noha.yaml
+++ b/config/deploy/os-odl-master_upstream-noha.yaml
@@ -3,12 +3,13 @@ global_params:
ha_enabled: false
patches:
undercloud:
- - change-id: I301370fbf47a71291614dd60e4c64adc7b5ebb42
+ - change-id: Ie380cc41ca50a294a2647d673f339d02111bf6b3
project: openstack/tripleo-heat-templates
deploy_options:
- os_version: pike
+ containers: true
+ os_version: master
sdn_controller: opendaylight
- odl_version: carbon
+ odl_version: master
tacker: false
congress: false
sfc: false
diff --git a/config/deploy/os-odl-pike_upstream-noha.yaml b/config/deploy/os-odl-pike_upstream-noha.yaml
new file mode 100644
index 0000000..3fe1c73
--- /dev/null
+++ b/config/deploy/os-odl-pike_upstream-noha.yaml
@@ -0,0 +1,12 @@
+---
+global_params:
+ ha_enabled: false
+deploy_options:
+ containers: false
+ os_version: pike
+ sdn_controller: opendaylight
+ odl_version: carbon
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-odl-queens_upstream-noha.yaml b/config/deploy/os-odl-queens_upstream-noha.yaml
new file mode 100644
index 0000000..75a7346
--- /dev/null
+++ b/config/deploy/os-odl-queens_upstream-noha.yaml
@@ -0,0 +1,16 @@
+---
+global_params:
+ ha_enabled: false
+ patches:
+ undercloud:
+ - change-id: Ie380cc41ca50a294a2647d673f339d02111bf6b3
+ project: openstack/tripleo-heat-templates
+deploy_options:
+ containers: true
+ os_version: queens
+ sdn_controller: opendaylight
+ odl_version: oxygen
+ tacker: false
+ congress: false
+ sfc: false
+ vpn: false
diff --git a/config/deploy/os-odl-sriov-ha.yaml b/config/deploy/os-odl-sriov-ha.yaml
new file mode 100644
index 0000000..03e34a2
--- /dev/null
+++ b/config/deploy/os-odl-sriov-ha.yaml
@@ -0,0 +1,21 @@
+---
+global_params:
+ ha_enabled: true
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: nitrogen
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ sriov: em2
+ performance:
+ Controller:
+ kernel:
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
diff --git a/config/deploy/os-odl-sriov-noha.yaml b/config/deploy/os-odl-sriov-noha.yaml
new file mode 100644
index 0000000..52b5aa1
--- /dev/null
+++ b/config/deploy/os-odl-sriov-noha.yaml
@@ -0,0 +1,21 @@
+---
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ sdn_controller: opendaylight
+ odl_version: nitrogen
+ tacker: true
+ congress: true
+ sfc: false
+ vpn: false
+ sriov: em2
+ performance:
+ Controller:
+ kernel:
+ Compute:
+ kernel:
+ hugepagesz: 2M
+ hugepages: 2048
+ intel_iommu: 'on'
+ iommu: pt
diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml
index fe11a9b..a8ddca1 100644
--- a/config/network/network_settings.yaml
+++ b/config/network/network_settings.yaml
@@ -57,6 +57,10 @@ syslog:
server: 10.128.1.24
transport: 'tcp'
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
# Common network settings
networks:
# Admin configuration (pxe and jumpstart)
@@ -177,7 +181,10 @@ networks:
# Mapping for compute profile (nodes assigned as Compute nodes)
compute:
# Physical interface type (interface or bond)
- phys_type: interface
+ # Note that this phys_type for external network will be changed
+ # to vpp_interface for odl_fdio scenarios and linux_bridge for
+ # nosdn_fdio scenarios.
+ phys_type: ovs_bridge
# VLAN tag to use with this NIC
vlan: native
# Physical NIC members of this mapping
@@ -186,7 +193,10 @@ networks:
- eth2
# Mapping for controller profile (nodes assigned as Controller nodes)
controller:
- phys_type: interface
+ # Note that this phys_type for external network will be changed
+ # to vpp_interface for odl_fdio scenarios and linux_bridge for
+ # nosdn_fdio scenarios.
+ phys_type: ovs_bridge
vlan: native
members:
- eth2
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index 7dddf34..176bc7c 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -57,6 +57,10 @@ syslog:
server: 10.128.1.24
transport: 'tcp'
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
# Common network settings
networks:
# Admin configuration (pxe and jumpstart)
diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml
index 345dbbd..29cd193 100644
--- a/config/network/network_settings_vlans.yaml
+++ b/config/network/network_settings_vlans.yaml
@@ -57,6 +57,10 @@ syslog:
server: 10.128.1.24
transport: 'tcp'
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
# Common network settings
networks:
# Admin configuration (pxe and jumpstart)
diff --git a/config/network/network_settings_vpp.yaml b/config/network/network_settings_vpp.yaml
deleted file mode 100644
index 2f6bba5..0000000
--- a/config/network/network_settings_vpp.yaml
+++ /dev/null
@@ -1,314 +0,0 @@
----
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 5 following networks:
-#
-# - admin
-# - tenant*
-# - external*
-# - storage*
-# - api*
-# *) optional networks
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitly configured.
-#
-# See short description of the networks in the comments below.
-#
-# "admin" is the short name for Control Plane Network.
-# This network should be IPv4 even it is an IPv6 deployment
-# IPv6 does not have PXE boot support.
-# During OPNFV deployment it is used for node provisioning which will require
-# PXE booting as well as running a DHCP server on this network. Be sure to
-# disable any other DHCP/TFTP server on this network.
-#
-# "tenant" is the network used for tenant traffic.
-#
-# "external" is the network which should have internet or external
-# connectivity. External OpenStack networks will be configured to egress this
-# network. There can be multiple external networks, but only one assigned as
-# "public" which OpenStack public API's will register.
-#
-# "storage" is the network for storage I/O.
-#
-# "api" is an optional network for splitting out OpenStack service API
-# communication. This should be used for IPv6 deployments.
-
-
-# Meta data for the network configuration
-network-config-metadata:
- title: LF-POD-1 Network config
- version: 0.1
- created: Mon Dec 28 2015
- comment: None
-
-# DNS Settings
-dns-domain: opnfvlf.org
-dns-search: opnfvlf.org
-dns_nameservers:
- - 8.8.8.8
- - 8.8.4.4
-# NTP servers
-ntp:
- - 0.se.pool.ntp.org
- - 1.se.pool.ntp.org
-# Syslog server
-syslog:
- server: 10.128.1.24
- transport: 'tcp'
-
-# Common network settings
-networks:
- # Admin configuration (pxe and jumpstart)
- admin:
- enabled: true
- # Network settings for the Installer VM on admin network
- installer_vm:
- # Indicates if this VM will be bridged to an interface, or to a bond
- nic_type: interface
- # Interfaces to bridge for installer VM (use multiple values for bond)
- members:
- - em1
- # VLAN tag to use for this network on Installer VM, native means none
- vlan: native
- # IP to assign to Installer VM on this network
- ip: 192.0.2.1
- # Usable ip range for the overcloud node IPs (including VIPs)
- # Last IP is used for host bridge (i.e. br-admin).
- # If empty entire range is usable.
- # Cannot overlap with dhcp_range or introspection_range.
- overcloud_ip_range:
- - 192.0.2.51
- - 192.0.2.99
- # Gateway (only needed when public_network is disabled)
- gateway: 192.0.2.1
- # Subnet in CIDR format 192.168.1.0/24
- cidr: 192.0.2.0/24
- # DHCP range for the admin network, automatically provisioned if empty
- dhcp_range:
- - 192.0.2.2
- - 192.0.2.50
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface or bond)
- phys_type: interface
- # Physical NIC members (Single value allowed for phys_type: interface)
- members:
- - eth0
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- phys_type: interface
- members:
- - eth0
-
- # Tenant network configuration
- tenant:
- enabled: true
- # Subnet in CIDR format 192.168.1.0/24
- cidr: 11.0.0.0/24
- # Tenant network MTU
- mtu: 1500
- # Tenant network Overlay segmentation ID range:
- # VNI, VLAN-ID, etc.
- overlay_id_range: 2,65535
-
- # Tenant network segmentation type:
- # vlan, vxlan, gre
- segmentation_type: vxlan
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface/bond)
- phys_type: interface
- # VLAN tag to use with this NIC
- vlan: native
- # Physical NIC members of this mapping
- # Single value allowed for phys_type: interface
- members:
- # Note logical name like nic1 not valid for fdio deployment yet.
- - eth1
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- # Physical interface type (interface/bond)
- phys_type: interface
- vlan: native
- # Note: logicial names like nic1 are not valid for fdio deployment yet.
- members:
- - eth1
-
- # Can contain 1 or more external networks
- external:
- - public:
- enabled: true
- # Public network MTU
- mtu: 1500
- # Network settings for the Installer VM on external network
- # (note only valid on 'public' external network)
- installer_vm:
- # Indicates if this VM will be bridged to an interface, or to a bond
- nic_type: interface
- vlan: native
- # Interfaces to bridge for installer VM (use multiple values for bond)
- members:
- - em1
- # IP to assign to Installer VM on this network
- ip: 192.168.37.1
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- # Range to allocate to floating IPs for the public network with Neutron
- floating_ip_range:
- - 192.168.37.200
- - 192.168.37.220
- # Usable ip range for the overcloud node IPs (including VIPs)
- # Last IP will be used for host bridge (i.e. br-public).
- # If empty entire range is usable.
- # Cannot overlap with dhcp_range or introspection_range.
- overcloud_ip_range:
- - 192.168.37.10
- - 192.168.37.199
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface or bond)
- phys_type: interface
- # VLAN tag to use with this NIC
- vlan: native
- # Physical NIC members of this mapping
- # Single value allowed for phys_type: interface
- members:
- - eth2
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- phys_type: interface
- vlan: native
- members:
- - eth2
- # External network to be created in OpenStack by Services tenant
- external_overlay:
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- # another external network
- # This is an example and not yet supported
- - private_cloud:
- enabled: false
- mtu: 1500
- # Network settings for the Installer VM on external network
- # note only valid on 'public' external network
- installer_vm:
- # Indicates if this VM will be bridged to an interface, or to a bond
- nic_type: interface
- vlan: 101
- # Interfaces to bridge for installer VM (use multiple values for bond)
- members:
- - em1
- # IP to assign to Installer VM on this network
- ip: 192.168.38.1
- cidr: 192.168.38.0/24
- gateway: 192.168.38.1
- # Range to allocate to floating IPs for the public network with Neutron
- floating_ip_range:
- - 192.168.38.200
- - 192.168.38.220
- # Usable IP range for overcloud nodes (including VIPs)i
- # usually this is a shared subnet.
- # Cannot overlap with dhcp_range or introspection_range.
- overcloud_ip_range:
- - 192.168.38.10
- - 192.168.38.199
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface or bond)
- phys_type: interface
- # VLAN tag to use with this NIC
- vlan: 101
- # Physical NIC members of this mappingi
- # Single value allowed for phys_type: interface
- # Note: logical names like nic1 are not valid for fdio deployment yet.
- members:
- - eth3
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- phys_type: interface
- vlan: 101
- members:
- - eth3
- # External network to be created in OpenStack by Services tenant
- external_overlay:
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
-
- # Storage network configuration
- storage:
- enabled: true
- # Subnet in CIDR format
- cidr: 12.0.0.0/24
- # Storage network MTU
- mtu: 1500
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface or bond)
- phys_type: interface
- # VLAN tag to use with this NIC
- vlan: native
- # Physical NIC members of this mapping
- # Single value allowed for phys_type: interface
- members:
- # Note logical names like nic1 not valid for fdio deployment yet.
- - eth3
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- phys_type: interface
- vlan: native
- members:
- - eth3
-
- api:
- # API network configuration
- enabled: false
- # Subnet in CIDR format
- cidr: fd00:fd00:fd00:4000::/64
- # VLAN tag to use for Overcloud hosts on this network
- vlan: 13
- # Api network MTU
- mtu: 1500
- # Mapping of network configuration for Overcloud Nodes
- nic_mapping:
- # Mapping for compute profile (nodes assigned as Compute nodes)
- compute:
- # Physical interface type (interface or bond)
- phys_type: interface
- # VLAN tag to use with this NIC
- vlan: native
- # Physical NIC members of this mapping
- # Single value allowed for phys_type: interface
- # Note logical names like nic1 not valid for fdio deployment yet.
- members:
- - eth4
- # Mapping for controller profile (nodes assigned as Controller nodes)
- controller:
- phys_type: interface
- vlan: native
- members:
- - eth4
-
-# Apex specific settings
-apex:
- networks:
- admin:
- # Range used for introspection phase (examining nodes).
- # This cannot overlap with dhcp_range or overcloud_ip_range.
- # for the overcloud default external network
- introspection_range:
- - 192.0.2.100
- - 192.0.2.120
diff --git a/docs/contributor/upstream-overcloud-container-design.rst b/docs/contributor/upstream-overcloud-container-design.rst
new file mode 100644
index 0000000..4b368c2
--- /dev/null
+++ b/docs/contributor/upstream-overcloud-container-design.rst
@@ -0,0 +1,126 @@
+=======================================
+Overcloud Container Design/Architecture
+=======================================
+
+This document describes the changes done to implement container deployments in
+Apex.
+
+ * OOO container architecture
+ * Upstream vs Downstream deployment
+ * Apex container deployment overview
+
+OOO container architecture
+--------------------------
+
+Typically in OOO each OpenStack service is represented by a TripleO Heat
+Template stored under the puppet/services directory in the THT code base. For
+containers, there are new templates created in the docker/services directory
+which include templates for most of the previously defined puppet services.
+These docker templates in almost all cases inherit their puppet template
+counterpart and then build off of that to provide OOO docker specific
+configuration.
+
+The containers configuration in OOO is still done via puppet, and config files
+are then copied into a host directory to be later mounted in the service
+container during deployment. The docker template contains docker specific
+settings to the service, including what files to mount into the container,
+along with which puppet resources to execute, etc. Note, the puppet code is
+still stored locally on the host, while the service python code is stored in
+the container image.
+
+RDO has its own registry which stores the Docker images per service to use in
+deployments. The container image is usually just a CentOS 7 container with the
+relevant service RPM installed.
+
+In addition, Ceph no longer uses puppet to deploy. puppet-ceph was previously
+used to configure Ceph on the overcloud, but has been replaced with
+Ceph-Ansible. During container deployment, the undercloud calls a mistral
+workflow to initiate a Ceph-Ansible playbook that will download the Ceph Daemon
+container image to the overcloud and configure it.
+
+Upstream vs. Downstream deployment
+----------------------------------
+
+In Apex we typically build artifacts and then deploy from them. This works in
+the past as we usually modify disk images (qcow2s) with files or patches and
+distribute them as RPMs. However, with containers space becomes an issue. The
+size of each container image ranges from 800 MB to over 2GB. This makes it
+unfeasible to download all of the possible images and store them into a disk
+image for distribution.
+
+Therefore for container deployments the only option is to deploy using
+upstream. This means that only upstream undercloud/overcloud images are pulled
+at deploy time, and the required containers are docker pulled during deployment
+into the undercloud. For upstream deployments the modified time of the
+RDO images are checked and cached locally, to refrain from unnecessary
+downloading of artifacts. Also, the optional '--no-fetch' argument may be
+provided at deploy time, to ignore pulling any new images, as long as previous
+artifacts are cached locally.
+
+Apex container deployment
+-------------------------
+
+For deploying containers with Apex, a new deploy setting is available,
+'containers'. When this flag is used, along with '--upstream' the following
+workflow occurs:
+
+ 1. The upstream RDO images for undercloud/overcloud are checked and
+ downloaded if necessary.
+ 2. The undercloud VM is installed and configured as a normal deployment.
+ 3. The overcloud prep image method is called which is modified now for
+ patches and containers. The method will now return a set of container
+ images which are going to be patched. These can be either due to a change
+ in OpenDaylight version for example, or patches included in the deploy
+ settings for the overcloud that include a python path.
+ 4. During the overcloud image prep, a new directory in the Apex tmp dir is
+ created called 'containers' which then includes sub-directories for each
+ docker image which is being patched (for example, 'containers/nova-api').
+ 5. A Dockerfile is created inside of the directory created in step 4, which
+ holds Dockerfile operations to rebuild the container with patches or any
+ required changes. Several container images could be used for different
+ services inside of an OS project. For example, there are different images
+ for each nova service (nova-api, nova-conductor, nova-compute). Therefore
+ a lookup is done to figure out all of the container images that a
+ hypothetically provided nova patch would apply to. Then a directory and
+ Dockerfile is created for each image. All of this is tar'ed and
+ compressed into an archive which will be copied to the undercloud.
+ 6. Next, the deployment is checked to see if a Ceph devices was provided in
+ Apex settings. If it is not, then a persistent loop device is created
+ in the overcloud image to serve as storage backend for Ceph OSDs. Apex
+ previously used a directory '/srv/data' to serve as the backend to the
+ OSDs, but that is no longer supported with Ceph-Ansible.
+ 7. The deployment command is then created, as usual, but with minor changes
+ to add docker.yaml and docker-ha.yaml files which are required to deploy
+ containers with OOO.
+ 8. Next a new playbook is executed, 'prepare_overcloud_containers.yaml',
+ which includes several steps:
+
+ a. The previously archived docker image patches are copied and unpacked
+ into /home/stack.
+ b. 'overcloud_containers' and 'sdn_containers' image files are then
+ prepared which are basically just yaml files which indicate which
+ docker images to pull and where to store them. Which in our case is a
+ local docker registry.
+ c. The docker images are then pulled and stored into the local registry.
+ The reason for using a local registry is to then have a static source
+ of images that do not change every time a user deploys. This allows
+ for more control and predictability in deployments.
+ d. Next, the images in the local registry are cross-checked against
+ the images that were previously collected as requiring patches. Any
+ image which then exists in the local registry and also requires changes
+ is then rebuilt by the docker build command, tagged with 'apex' and
+ then pushed into the local registry. This helps the user distinguish
+ which containers have been modified by Apex, in case any debugging is
+ needed in comparing upstream docker images with Apex modifications.
+ e. Then new OOO image files are created, to indicate to OOO that the
+ docker images to use for deployment are the ones in the local registry.
+ Also, the ones modified by Apex are modified with the 'apex' tag.
+ f. The relevant Ceph Daemon Docker image is pulled and pushed into the
+ local registry for deployment.
+ 9. At this point the OOO deployment command is initiated as in regular
+ Apex deployments. Each container will be started on the overcloud and
+ puppet executed in it to gather the configuration files in Step 1. This
+ leads to Step 1 taking longer than it used to in non-containerized
+ deployments. Following this step, the containers are then brought up in
+ their regular step order, while mounting the previously generated
+ configuration files.
diff --git a/docs/release/installation/architecture.rst b/docs/release/installation/architecture.rst
index 079c26d..70067ed 100644
--- a/docs/release/installation/architecture.rst
+++ b/docs/release/installation/architecture.rst
@@ -160,6 +160,14 @@ issues per scenario. The following scenarios correspond to a supported
+-------------------------+-------------+---------------+
| os-odl-bgpvpn-noha | SDNVPN | Yes |
+-------------------------+-------------+---------------+
+| os-odl-sriov-ha | Apex | No |
++-------------------------+-------------+---------------+
+| os-odl-sriov-noha | Apex | No |
++-------------------------+-------------+---------------+
+| os-odl-l2gw-ha | Apex | No |
++-------------------------+-------------+---------------+
+| os-odl-l2gw-noha | Apex | No |
++-------------------------+-------------+---------------+
| os-odl-sfc-ha | SFC | No |
+-------------------------+-------------+---------------+
| os-odl-sfc-noha | SFC | Yes |
diff --git a/docs/release/installation/baremetal.rst b/docs/release/installation/baremetal.rst
index 49997f8..703d169 100644
--- a/docs/release/installation/baremetal.rst
+++ b/docs/release/installation/baremetal.rst
@@ -88,7 +88,7 @@ Install Bare Metal Jump Host
``sudo yum install https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm``
``sudo yum install epel-release``
- ``sudo curl -o /etc/yum/repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/euphrates/opnfv-apex.repo``
+ ``sudo curl -o /etc/yum.repos.d/opnfv-apex.repo http://artifacts.opnfv.org/apex/euphrates/opnfv-apex.repo``
The RDO Project release repository is needed to install OpenVSwitch, which
is a dependency of opnfv-apex. If you do not have external connectivity to
@@ -224,7 +224,7 @@ Follow the steps below to execute:
network_settings.yaml allows you to customize your networking topology.
Note it can also be useful to run the command with the ``--debug``
argument which will enable a root login on the overcloud nodes with
- password: 'opnfv-apex'. It is also useful in some cases to surround the
+ password: 'opnfvapex'. It is also useful in some cases to surround the
deploy command with ``nohup``. For example:
``nohup <deploy command> &``, will allow a deployment to continue even if
ssh access to the Jump Host is lost during deployment.
@@ -238,5 +238,5 @@ Follow the steps below to execute:
3. When the deployment is complete the undercloud IP and overcloud dashboard
url will be printed. OPNFV has now been deployed using Apex.
-.. _`Execution Requirements (Bare Metal Only)`: index.html#execution-requirements-bare-metal-only
-.. _`Network Requirements`: index.html#network-requirements
+.. _`Execution Requirements (Bare Metal Only)`: requirements.html#execution-requirements-bare-metal-only
+.. _`Network Requirements`: requirements.html#network-requirements
diff --git a/docs/release/installation/virtual.rst b/docs/release/installation/virtual.rst
index 9336b8e..af8aece 100644
--- a/docs/release/installation/virtual.rst
+++ b/docs/release/installation/virtual.rst
@@ -80,7 +80,7 @@ Follow the steps below to execute:
-n network_settings.yaml -d deploy_settings.yaml``
Note it can also be useful to run the command with the ``--debug``
argument which will enable a root login on the overcloud nodes with
- password: 'opnfv-apex'. It is also useful in some cases to surround the
+ password: 'opnfvapex'. It is also useful in some cases to surround the
deploy command with ``nohup``. For example:
``nohup <deploy command> &``, will allow a deployment to continue even if
ssh access to the Jump Host is lost during deployment.
@@ -98,5 +98,5 @@ Verifying the Setup - VMs
To verify the set you can follow the instructions in the `Verifying the Setup`_
section.
-.. _`Install Bare Metal Jump Host`: index.html#install-bare-metal-jump-host
-.. _`Verifying the Setup`: index.html#verifying-the-setup
+.. _`Install Bare Metal Jump Host`: baremetal.html#install-bare-metal-jump-host
+.. _`Verifying the Setup`: verification.html#verifying-the-setup
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
index c0e1cd3..fbac6ee 100644
--- a/lib/ansible/playbooks/configure_undercloud.yml
+++ b/lib/ansible/playbooks/configure_undercloud.yml
@@ -32,6 +32,18 @@
regexp: 'Defaults\s*requiretty'
state: absent
become: yes
+ - lineinfile:
+ path: /etc/environment
+ regexp: '^http_proxy'
+ line: "http_proxy={{ http_proxy }}"
+ become: yes
+ when: http_proxy
+ - lineinfile:
+ path: /etc/environment
+ regexp: '^https_proxy'
+ line: "https_proxy={{ https_proxy }}"
+ become: yes
+ when: https_proxy
- name: openstack-configs undercloud
shell: openstack-config --set undercloud.conf DEFAULT {{ item }}
with_items: "{{ undercloud_config }}"
@@ -39,9 +51,6 @@
shell: openstack-config --set /etc/ironic/ironic.conf {{ item }}
become: yes
with_items: "{{ ironic_config }}"
- - name: openstack-configs undercloud aarch64
- shell: openstack-config --set undercloud.conf DEFAULT ipxe_enabled false
- when: aarch64
- lineinfile:
path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
regexp: '_link_ip_address_pxe_configs'
@@ -134,11 +143,48 @@
- external_network.enabled
- aarch64
become: yes
+ - block:
+ - name: Undercloud NAT - MASQUERADE interface
+ iptables:
+ table: nat
+ chain: POSTROUTING
+ out_interface: eth0
+ jump: MASQUERADE
+ - name: Undercloud NAT - MASQUERADE interface with subnet
+ iptables:
+ table: nat
+ chain: POSTROUTING
+ out_interface: eth0
+ jump: MASQUERADE
+ source: "{{ nat_cidr }}"
+ - name: Undercloud NAT - Allow Forwarding
+ iptables:
+ chain: FORWARD
+ in_interface: eth2
+ jump: ACCEPT
+ - name: Undercloud NAT - Allow Stateful Forwarding
+ iptables:
+ chain: FORWARD
+ in_interface: eth2
+ jump: ACCEPT
+ source: "{{ nat_cidr }}"
+ ctstate: ESTABLISHED,RELATED
+ - name: Undercloud NAT - Save iptables
+ shell: service iptables save
+ become: yes
+ when:
+ - not nat_network_ipv6
+ - virtual_overcloud
- name: fetch storage environment file
fetch:
src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
dest: "{{ apex_temp_dir }}/"
flat: yes
+ - name: fetch sriov environment file
+ fetch:
+ src: /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-sriov.yaml
+ dest: "{{ apex_temp_dir }}/"
+ flat: yes
- include: undercloud_aarch64.yml
when: aarch64
diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml
index 545ee33..fb1da46 100644
--- a/lib/ansible/playbooks/deploy_dependencies.yml
+++ b/lib/ansible/playbooks/deploy_dependencies.yml
@@ -7,6 +7,7 @@
with_items:
- python-lxml
- libvirt-python
+ - libguestfs-tools
- sysctl:
name: net.ipv4.ip_forward
state: present
@@ -72,6 +73,12 @@
when:
- ansible_architecture == "x86_64"
- "'Y' not in nested_result.stdout"
+ - modprobe:
+ name: ip6_tables
+ state: present
+ - modprobe:
+ name: ip_tables
+ state: present
- name: Generate SSH key for root if missing
shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
- name: Check that /u/l/python3.4/site-packages/virtualbmc/vbmc.py exists
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
index b2d9234..268a517 100644
--- a/lib/ansible/playbooks/deploy_overcloud.yml
+++ b/lib/ansible/playbooks/deploy_overcloud.yml
@@ -30,6 +30,12 @@
owner: root
group: root
mode: 0664
+ - copy:
+ src: "{{ apex_temp_dir }}/neutron-opendaylight-sriov.yaml"
+ dest: /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-sriov.yaml
+ owner: root
+ group: root
+ mode: 0664
- systemd:
name: openstack-swift-proxy
state: restarted
@@ -57,15 +63,13 @@
become_user: stack
- name: Import inventory (baremetal)
shell: "{{ stackrc }} && openstack overcloud node import instackenv.json"
- when: not virtual
+ when: introspect
- name: Introspect inventory (baremetal)
shell: "{{ stackrc }} && openstack overcloud node introspect --all-manageable --provide"
- when:
- - not virtual
- - not aarch64
+ when: introspect
- name: Import inventory (virtual)
shell: "{{ stackrc }} && openstack overcloud node import --provide instackenv.json"
- when: virtual
+ when: not introspect
- name: Set flavors
shell: '{{ stackrc }} && openstack flavor set --property "cpu_arch"="{{ ansible_architecture }}" {{ item }}'
with_items:
diff --git a/lib/ansible/playbooks/fetch_overcloud_logs.yml b/lib/ansible/playbooks/fetch_overcloud_logs.yml
new file mode 100644
index 0000000..1ab5247
--- /dev/null
+++ b/lib/ansible/playbooks/fetch_overcloud_logs.yml
@@ -0,0 +1,25 @@
+---
+- hosts: all
+ tasks:
+ - name: Archive logs
+ archive:
+ path:
+ - /var/log
+ - /etc/puppet
+ - /etc/nova
+ - /etc/neutron
+ - /etc/heat
+ - /etc/haproxy
+ - /etc/glance
+ - /etc/puppet
+ - /etc/vpp
+ - /etc/os-net-config
+ - /opt/opendaylight/data/log
+ - /opt/opendaylight/etc
+ dest: /root/logging.tar.gz
+ become: yes
+ - name: Fetch /var/log/
+ fetch:
+ src: /root/logging.tar.gz
+ dest: "{{ apex_temp_dir }}/"
+ become: yes
diff --git a/lib/ansible/playbooks/fetch_overcloud_nodes.yml b/lib/ansible/playbooks/fetch_overcloud_nodes.yml
new file mode 100644
index 0000000..bcb5f0f
--- /dev/null
+++ b/lib/ansible/playbooks/fetch_overcloud_nodes.yml
@@ -0,0 +1,13 @@
+---
+- hosts: all
+ tasks:
+ - name: Get overcloud nodes and IPs
+ shell: "{{ stackrc }} && openstack server list -f json"
+ register: nova_list
+ - name: Write nova list output to file
+ local_action: copy content="{{ nova_list.stdout }}" dest="{{ apex_temp_dir }}/nova_output"
+ - name: Get ironic node information
+ shell: "{{ stackrc }} && openstack server list -f json"
+ register: ironic_list
+ - name: Write ironic list output to file
+ local_action: copy content="{{ ironic_list.stdout }}" dest="{{ apex_temp_dir }}/ironic_output"
diff --git a/lib/ansible/playbooks/post_deploy_overcloud.yml b/lib/ansible/playbooks/post_deploy_overcloud.yml
index af1c648..13623f2 100644
--- a/lib/ansible/playbooks/post_deploy_overcloud.yml
+++ b/lib/ansible/playbooks/post_deploy_overcloud.yml
@@ -51,3 +51,20 @@
owner: root
group: tacker
become: yes
+ - name: Restart Controller Neutron/Nova Services (Pike Workaround)
+ shell: "systemctl restart {{ item }}"
+ become: yes
+ when:
+ - "'controller' in ansible_hostname"
+ - os_version == 'pike'
+ with_items:
+ - neutron-server
+ - openstack-nova-api
+ - openstack-nova-scheduler
+ - openstack-nova-conductor
+ - name: Restart Compute Nova Compute (Pike Workaround)
+ shell: "systemctl restart openstack-nova-compute"
+ become: yes
+ when:
+ - "'compute' in ansible_hostname"
+ - os_version == 'pike'
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
index 2e1bf0f..d0206f8 100644
--- a/lib/ansible/playbooks/post_deploy_undercloud.yml
+++ b/lib/ansible/playbooks/post_deploy_undercloud.yml
@@ -26,9 +26,7 @@
group: stack
mode: 0644
become: yes
- with_items:
- - overcloudrc
- - overcloudrc.v3
+ with_items: "{{ overcloudrc_files }}"
- name: Inject OS_PROJECT_ID and OS_TENANT_NAME into overcloudrc
lineinfile:
line: "{{ item }}"
@@ -74,56 +72,20 @@
when: sdn != false
become: yes
become_user: stack
- with_items:
- - overcloudrc
- - overcloudrc.v3
- - name: Undercloud NAT - MASQUERADE interface
- iptables:
- table: nat
- chain: POSTROUTING
- out_interface: eth0
- jump: MASQUERADE
- when:
- - virtual
- - not external_network_ipv6
- become: yes
- - name: Undercloud NAT - MASQUERADE interface with subnet
- iptables:
- table: nat
- chain: POSTROUTING
- out_interface: eth0
- jump: MASQUERADE
- source: "{{ external_cidr }}"
- when:
- - virtual
- - not external_network_ipv6
- become: yes
- - name: Undercloud NAT - Allow Forwarding
- iptables:
- chain: FORWARD
- in_interface: eth2
- jump: ACCEPT
- when:
- - virtual
- - not external_network_ipv6
- become: yes
- - name: Undercloud NAT - Allow Stateful Forwarding
- iptables:
- chain: FORWARD
- in_interface: eth2
- jump: ACCEPT
- source: "{{ external_cidr }}"
- ctstate: ESTABLISHED,RELATED
- when:
- - virtual
- - not external_network_ipv6
+ with_items: "{{ overcloudrc_files }}"
+ - name: Register OS Region
+ shell: "{{ overcloudrc }} && openstack endpoint list -c Region -f json"
+ register: region
become: yes
- - name: Undercloud NAT - Save iptables
- shell: service iptables save
+ become_user: stack
+ - name: Write Region into overcloudrc
+ lineinfile:
+ line: "export OS_REGION_NAME={{(region.stdout|from_json)[1]['Region']}}"
+ regexp: 'OS_REGION_NAME'
+ path: "/home/stack/{{ item }}"
become: yes
- when:
- - virtual
- - not external_network_ipv6
+ become_user: stack
+ with_items: "{{ overcloudrc_files }}"
- name: Create congress datasources
shell: "{{ overcloudrc }} && openstack congress datasource create {{ item }}"
become: yes
diff --git a/lib/ansible/playbooks/prepare_overcloud_containers.yml b/lib/ansible/playbooks/prepare_overcloud_containers.yml
new file mode 100644
index 0000000..88a8df1
--- /dev/null
+++ b/lib/ansible/playbooks/prepare_overcloud_containers.yml
@@ -0,0 +1,105 @@
+---
+- hosts: all
+ tasks:
+ - name: Upload container patches archive
+ copy:
+ src: "{{ apex_temp_dir }}/docker_patches.tar.gz"
+ dest: "/home/stack/docker_patches.tar.gz"
+ owner: stack
+ group: stack
+ mode: 0644
+ when: patched_docker_services|length > 0
+ - name: Unpack container patches archive
+ unarchive:
+ src: /home/stack/docker_patches.tar.gz
+ remote_src: yes
+ list_files: yes
+ group: stack
+ owner: stack
+ dest: /home/stack/
+ when: patched_docker_services|length > 0
+ - name: Prepare generic docker registry image file
+ shell: >
+ {{ stackrc }} && openstack overcloud container image prepare
+ --namespace trunk.registry.rdoproject.org/{{ os_version }}
+ --tag {{ container_tag }}
+ --push-destination {{ undercloud_ip }}:8787
+ -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml
+ --output-images-file overcloud_containers.yml
+ become: yes
+ become_user: stack
+ - name: Prepare SDN docker registry image file
+ shell: >
+ {{ stackrc }} && openstack overcloud container image prepare
+ --namespace trunk.registry.rdoproject.org/{{ os_version }}
+ --tag {{ container_tag }}
+ --push-destination {{ undercloud_ip }}:8787
+ -e {{ sdn_env_file }}
+ --output-images-file sdn_containers.yml
+ become: yes
+ become_user: stack
+ when: sdn != false
+ - name: Upload docker images to local registry
+ shell: >
+ {{ stackrc }} && openstack overcloud container image upload
+ --config-file /home/stack/overcloud_containers.yml
+ - name: Upload SDN docker images to local registry
+ shell: >
+ {{ stackrc }} && openstack overcloud container image upload
+ --config-file /home/stack/sdn_containers.yml
+ when: sdn != false
+ - name: Collect docker images in registry
+ uri:
+ url: http://{{ undercloud_ip }}:8787/v2/_catalog
+ body_format: json
+ register: response
+ - name: Patch Docker images
+ shell: >
+ cd /home/stack/containers/{{ item }} && docker build
+ -t {{ undercloud_ip }}:8787/{{ os_version }}/centos-binary-{{ item }}:apex .
+ when:
+ - patched_docker_services|length > 0
+ - item in (response.json)['repositories']|join(" ")
+ with_items: "{{ patched_docker_services }}"
+ - name: Push patched docker images to local registry
+ shell: docker push {{ undercloud_ip }}:8787/{{ os_version }}/centos-binary-{{ item }}:apex
+ when:
+ - patched_docker_services|length > 0
+ - item in (response.json)['repositories']|join(" ")
+ with_items: "{{ patched_docker_services }}"
+ - name: Prepare deployment generic docker image file
+ shell: >
+ {{ stackrc }} && openstack overcloud container image prepare
+ --namespace {{ undercloud_ip }}:8787/{{ os_version }}
+ --tag {{ container_tag }}
+ -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml
+ --output-env-file docker-images.yaml
+ become: yes
+ become_user: stack
+ - name: Prepare deployment SDN docker image file
+ shell: >
+ {{ stackrc }} && openstack overcloud container image prepare
+ --namespace {{ undercloud_ip }}:8787/{{ os_version }}
+ --tag {{ container_tag }}
+ -e {{ sdn_env_file }}
+ --output-env-file sdn-images.yaml
+ when: sdn != false
+ become: yes
+ become_user: stack
+ - name: Modify Images with Apex tag
+ replace:
+ path: "{{ item[0] }}"
+ regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item[1] }}):.*"
+ replace: '\1:apex'
+ with_nested:
+ - [ '/home/stack/sdn-images.yaml', '/home/stack/docker-images.yaml']
+ - "{{ patched_docker_services }}"
+ - name: Pull Ceph docker image
+ shell: docker pull {{ ceph_docker_image }}
+ become: yes
+ - name: Tag Ceph image for local registry
+ shell: docker tag {{ ceph_docker_image }} {{ undercloud_ip }}:8787/{{ ceph_docker_image }}
+ become: yes
+ - name: Push Ceph docker image to local registry
+ shell: docker push {{ undercloud_ip }}:8787/{{ ceph_docker_image }}
+ become: yes
diff --git a/lib/ansible/playbooks/undercloud_aarch64.yml b/lib/ansible/playbooks/undercloud_aarch64.yml
index 040831c..ddaf1b0 100644
--- a/lib/ansible/playbooks/undercloud_aarch64.yml
+++ b/lib/ansible/playbooks/undercloud_aarch64.yml
@@ -23,6 +23,8 @@
dest: /tftpboot/EFI/centos/grub.cfg
mode: 0644
- shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi'
+ - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \$pybasedir/drivers/modules/pxe_grub_config.template'
+
- systemd:
name: openstack-ironic-conductor
state: restarted
diff --git a/requirements.txt b/requirements.txt
index 0326a8c..18bd020 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,3 +11,4 @@ PyYAML
Jinja2>=2.8
GitPython
pygerrit2
+distro
diff --git a/setup.cfg b/setup.cfg
index 52ad12f..c9e4298 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -25,6 +25,7 @@ setup-hooks =
console_scripts =
opnfv-deploy = apex.deploy:main
opnfv-clean = apex.clean:main
+ opnfv-pyutil = apex.utils:main
[files]
packages =