diff options
31 files changed, 341 insertions, 90 deletions
diff --git a/apex/build.py b/apex/build.py index 08f91abe..dff25ac8 100644 --- a/apex/build.py +++ b/apex/build.py @@ -225,6 +225,7 @@ def main(): console.setLevel(log_level) console.setFormatter(logging.Formatter(formatter)) logging.getLogger('').addHandler(console) + utils.install_ansible() # Since we only support building inside of git repo this should be fine try: apex_root = subprocess.check_output( diff --git a/apex/common/utils.py b/apex/common/utils.py index 13250a45..b727b11a 100644 --- a/apex/common/utils.py +++ b/apex/common/utils.py @@ -8,10 +8,12 @@ ############################################################################## import datetime +import distro import json import logging import os import pprint +import socket import subprocess import tarfile import time @@ -192,3 +194,29 @@ def fetch_upstream_and_unpack(dest, url, targets): tar = tarfile.open(target_dest) tar.extractall(path=dest) tar.close() + + +def install_ansible(): + # we only install for CentOS/Fedora for now + dist = distro.id() + if 'centos' in dist: + pkg_mgr = 'yum' + elif 'fedora' in dist: + pkg_mgr = 'dnf' + else: + return + + # yum python module only exists for 2.x, so use subprocess + try: + subprocess.check_call([pkg_mgr, '-y', 'install', 'ansible']) + except subprocess.CalledProcessError: + logging.warning('Unable to install Ansible') + + +def internet_connectivity(): + try: + urllib.request.urlopen('http://opnfv.org', timeout=3) + return True + except (urllib.request.URLError, socket.timeout): + logging.debug('No internet connectivity detected') + return False diff --git a/apex/deploy.py b/apex/deploy.py index d2f1c936..5703e081 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -234,6 +234,7 @@ def main(): console.setLevel(log_level) console.setFormatter(logging.Formatter(formatter)) logging.getLogger('').addHandler(console) + utils.install_ansible() validate_deploy_args(args) # Parse all settings deploy_settings = DeploySettings(args.deploy_settings_file) @@ -381,7 +382,8 @@ def main(): args.deploy_dir, root_pw=root_pw, external_network=uc_external, - image_name=os.path.basename(uc_image)) + image_name=os.path.basename(uc_image), + os_version=os_version) undercloud.start() # Generate nic templates @@ -389,7 +391,7 @@ def main(): oc_cfg.create_nic_template(net_settings, deploy_settings, role, args.deploy_dir, APEX_TEMP_DIR) # Install Undercloud - undercloud.configure(net_settings, + undercloud.configure(net_settings, deploy_settings, os.path.join(args.lib_dir, ANSIBLE_PATH, 'configure_undercloud.yml'), APEX_TEMP_DIR) @@ -410,8 +412,8 @@ def main(): if not upstream: oc_deploy.prep_env(deploy_settings, net_settings, inventory, opnfv_env, net_env_target, APEX_TEMP_DIR) - oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR, - root_pw=root_pw) + oc_deploy.prep_image(deploy_settings, net_settings, sdn_image, + APEX_TEMP_DIR, root_pw=root_pw) else: shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2')) @@ -448,6 +450,8 @@ def main(): deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc' deploy_vars['upstream'] = upstream deploy_vars['os_version'] = os_version + deploy_vars['http_proxy'] = net_settings.get('http_proxy', '') + deploy_vars['https_proxy'] = net_settings.get('https_proxy', '') for dns_server in net_settings['dns_servers']: deploy_vars['dns_server_args'] += " --dns-nameserver {}".format( dns_server) @@ -518,6 +522,7 @@ def main(): # TODO(trozet): just parse all ds_opts as deploy vars one time deploy_vars['sfc'] = ds_opts['sfc'] deploy_vars['vpn'] = ds_opts['vpn'] + deploy_vars['l2gw'] = ds_opts.get('l2gw') # TODO(trozet): pull all logs and store in tmp dir in overcloud # playbook post_overcloud = os.path.join(args.lib_dir, ANSIBLE_PATH, diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index 5c957965..5bbcaede 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -11,6 +11,7 @@ import base64 import fileinput import logging import os +import platform import shutil import uuid import struct @@ -37,6 +38,7 @@ SDN_FILE_MAP = { 'dvr': 'neutron-opendaylight-fdio-dvr.yaml', 'default': 'neutron-opendaylight-honeycomb.yaml' }, + 'l2gw': 'neutron-l2gw-opendaylight.yaml', 'default': 'neutron-opendaylight.yaml', }, 'onos': { @@ -151,6 +153,14 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, raise ApexDeployException("Invalid number of control or computes") elif num_control > 1 and not ds['global_params']['ha_enabled']: num_control = 1 + if platform.machine() == 'aarch64': + # aarch64 deploys were not completing in the default 90 mins. + # Not sure if this is related to the hardware the OOO support + # was developed on or the virtualization support in CentOS + # Either way it will probably get better over time as the aarch + # support matures in CentOS and deploy time should be tested in + # the future so this multiplier can be removed. + con.DEPLOY_TIMEOUT *= 2 cmd = "openstack overcloud deploy --templates --timeout {} " \ .format(con.DEPLOY_TIMEOUT) # build cmd env args @@ -176,10 +186,11 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, return cmd -def prep_image(ds, img, tmp_dir, root_pw=None): +def prep_image(ds, ns, img, tmp_dir, root_pw=None): """ Locates sdn image and preps for deployment. :param ds: deploy settings + :param ns: network settings :param img: sdn image :param tmp_dir: dir to store modified sdn image :param root_pw: password to configure for overcloud image @@ -209,6 +220,18 @@ def prep_image(ds, img, tmp_dir, root_pw=None): ".service" }]) + if ns.get('http_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'http_proxy={}' >> /etc/environment".format( + ns['http_proxy'])}) + + if ns.get('https_proxy', ''): + virt_cmds.append({ + con.VIRT_RUN_CMD: + "echo 'https_proxy={}' >> /etc/environment".format( + ns['https_proxy'])}) + if ds_opts['vpn']: virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"}) virt_cmds.append({ @@ -533,6 +556,9 @@ def prep_storage_env(ds, tmp_dir): elif 'CephAdminKey' in line: print(" CephAdminKey: {}".format(generate_ceph_key().decode( 'utf-8'))) + elif 'CephClientKey' in line: + print(" CephClientKey: {}".format(generate_ceph_key().decode( + 'utf-8'))) else: print(line) if 'ceph_device' in ds_opts and ds_opts['ceph_device']: diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py index f2012b24..eec98225 100644 --- a/apex/settings/deploy_settings.py +++ b/apex/settings/deploy_settings.py @@ -23,7 +23,8 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller', 'ceph', 'gluon', 'rt_kvm', - 'os_version'] + 'os_version', + 'l2gw'] OPT_DEPLOY_SETTINGS = ['performance', 'vsperf', diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py index 8ff98a8d..420a70d6 100644 --- a/apex/tests/test_apex_overcloud_deploy.py +++ b/apex/tests/test_apex_overcloud_deploy.py @@ -154,7 +154,8 @@ class TestOvercloudDeploy(unittest.TestCase): 'global_params': MagicMock()} ds['deploy_options'].__getitem__.side_effect = \ lambda i: ds_opts.get(i, MagicMock()) - prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test') + ns = MagicMock() + prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test') mock_virt_utils.virt_customize.assert_called() @patch('apex.overcloud.deploy.virt_utils') @@ -169,7 +170,8 @@ class TestOvercloudDeploy(unittest.TestCase): 'global_params': MagicMock()} ds['deploy_options'].__getitem__.side_effect = \ lambda i: ds_opts.get(i, MagicMock()) - prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test') + ns = MagicMock() + prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test') mock_virt_utils.virt_customize.assert_called() @patch('apex.overcloud.deploy.virt_utils') @@ -188,7 +190,8 @@ class TestOvercloudDeploy(unittest.TestCase): lambda i: ds_opts.get(i, MagicMock()) ds['deploy_options'].__contains__.side_effect = \ lambda i: True if i in ds_opts else MagicMock() - prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test') + ns = MagicMock() + prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test') mock_virt_utils.virt_customize.assert_called() @patch('apex.overcloud.deploy.virt_utils') @@ -204,7 +207,8 @@ class TestOvercloudDeploy(unittest.TestCase): 'global_params': MagicMock()} ds['deploy_options'].__getitem__.side_effect = \ lambda i: ds_opts.get(i, MagicMock()) - prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test') + ns = MagicMock() + prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test') mock_virt_utils.virt_customize.assert_called() @patch('apex.overcloud.deploy.virt_utils') @@ -219,14 +223,15 @@ class TestOvercloudDeploy(unittest.TestCase): 'global_params': MagicMock()} ds['deploy_options'].__getitem__.side_effect = \ lambda i: ds_opts.get(i, MagicMock()) - prep_image(ds, 'undercloud.qcow2', '/tmp', root_pw='test') + ns = MagicMock() + prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test') mock_virt_utils.virt_customize.assert_called() @patch('apex.overcloud.deploy.os.path.isfile') def test_prep_image_no_image(self, mock_isfile): mock_isfile.return_value = False assert_raises(ApexDeployException, prep_image, - {}, 'undercloud.qcow2', '/tmp') + {}, {}, 'undercloud.qcow2', '/tmp') def test_make_ssh_key(self): priv, pub = make_ssh_key() diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py index 9458bf9f..0df785f9 100644 --- a/apex/tests/test_apex_undercloud.py +++ b/apex/tests/test_apex_undercloud.py @@ -121,7 +121,8 @@ class TestUndercloud(unittest.TestCase): mock_generate_config, mock_utils): uc = Undercloud('img_path', 'tplt_path', external_network=True) ns = MagicMock() - uc.configure(ns, 'playbook', '/tmp/dir') + ds = MagicMock() + uc.configure(ns, ds, 'playbook', '/tmp/dir') @patch('apex.undercloud.undercloud.utils') @patch.object(Undercloud, 'generate_config', return_value={}) @@ -131,10 +132,11 @@ class TestUndercloud(unittest.TestCase): mock_generate_config, mock_utils): uc = Undercloud('img_path', 'tplt_path', external_network=True) ns = MagicMock() + ds = MagicMock() subps_err = subprocess.CalledProcessError(1, 'cmd') mock_utils.run_ansible.side_effect = subps_err assert_raises(ApexUndercloudException, - uc.configure, ns, 'playbook', '/tmp/dir') + uc.configure, ns, ds, 'playbook', '/tmp/dir') @patch('apex.undercloud.undercloud.os.remove') @patch('apex.undercloud.undercloud.os.path') @@ -192,5 +194,21 @@ class TestUndercloud(unittest.TestCase): 'dns-domain': 'dns', 'networks': {'admin': ns_net, 'external': [ns_net]}} + ds = {'global_params': {}} - Undercloud('img_path', 'tplt_path').generate_config(ns) + Undercloud('img_path', 'tplt_path').generate_config(ns, ds) + + @patch.object(Undercloud, '_get_vm', return_value=None) + @patch.object(Undercloud, 'create') + @patch('apex.undercloud.undercloud.virt_utils') + def test_update_delorean(self, mock_vutils, mock_uc_create, mock_get_vm): + uc = Undercloud('img_path', 'tmplt_path', external_network=True) + uc._update_delorean_repo() + download_cmd = ( + "curl -L -f -o " + "/etc/yum.repos.d/deloran.repo " + "https://trunk.rdoproject.org/centos7-{}" + "/current-tripleo/delorean.repo".format( + constants.DEFAULT_OS_VERSION)) + test_ops = {'--run-command': download_cmd} + mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume) diff --git a/apex/tests/test_apex_virtual_utils.py b/apex/tests/test_apex_virtual_utils.py index 643069f3..a9eb78dd 100644 --- a/apex/tests/test_apex_virtual_utils.py +++ b/apex/tests/test_apex_virtual_utils.py @@ -12,6 +12,7 @@ import unittest from mock import patch +from apex.virtual.exceptions import ApexVirtualException from apex.virtual.utils import DEFAULT_VIRT_IP from apex.virtual.utils import get_virt_ip from apex.virtual.utils import generate_inventory @@ -66,13 +67,30 @@ class TestVirtualUtils(unittest.TestCase): assert_is_instance(generate_inventory('target_file', ha_enabled=True), dict) + @patch('apex.virtual.utils.get_virt_ip') + @patch('apex.virtual.utils.subprocess.check_output') @patch('apex.virtual.utils.iptc') @patch('apex.virtual.utils.subprocess.check_call') @patch('apex.virtual.utils.vbmc_lib') - def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc): + def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc, + mock_check_output, mock_get_virt_ip): + mock_get_virt_ip.return_value = '192.168.122.1' + mock_check_output.return_value = b'blah |dummy \nstatus | running' host_setup({'test': 2468}) mock_subprocess.assert_called_with(['vbmc', 'start', 'test']) + @patch('apex.virtual.utils.get_virt_ip') + @patch('apex.virtual.utils.subprocess.check_output') + @patch('apex.virtual.utils.iptc') + @patch('apex.virtual.utils.subprocess.check_call') + @patch('apex.virtual.utils.vbmc_lib') + def test_host_setup_vbmc_fails(self, mock_vbmc_lib, mock_subprocess, + mock_iptc, mock_check_output, + mock_get_virt_ip): + mock_get_virt_ip.return_value = '192.168.122.1' + mock_check_output.return_value = b'blah |dummy \nstatus | stopped' + assert_raises(ApexVirtualException, host_setup, {'test': 2468}) + @patch('apex.virtual.utils.iptc') @patch('apex.virtual.utils.subprocess.check_call') @patch('apex.virtual.utils.vbmc_lib') diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py index 013570d3..915c85f3 100644 --- a/apex/undercloud/undercloud.py +++ b/apex/undercloud/undercloud.py @@ -31,8 +31,10 @@ class Undercloud: """ def __init__(self, image_path, template_path, root_pw=None, external_network=False, - image_name='undercloud.qcow2'): + image_name='undercloud.qcow2', + os_version=constants.DEFAULT_OS_VERSION): self.ip = None + self.os_version = os_version self.root_pw = root_pw self.external_net = external_network self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH, @@ -61,17 +63,19 @@ class Undercloud: if self.external_net: networks.append('external') console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0' + root = 'vda' if platform.machine() == 'aarch64' else 'sda' self.vm = vm_lib.create_vm(name='undercloud', image=self.volume, baremetal_interfaces=networks, direct_boot='overcloud-full', kernel_args=['console={}'.format(console), - 'root=/dev/sda'], + 'root=/dev/{}'.format(root)], default_network=True, template_dir=self.template_path) self.setup_volumes() self.inject_auth() + self._update_delorean_repo() def _set_ip(self): ip_out = self.vm.interfaceAddresses( @@ -110,10 +114,12 @@ class Undercloud: "Unable to find IP for undercloud. Check if VM booted " "correctly") - def configure(self, net_settings, playbook, apex_temp_dir): + def configure(self, net_settings, deploy_settings, + playbook, apex_temp_dir): """ Configures undercloud VM - :param net_setings: Network settings for deployment + :param net_settings: Network settings for deployment + :param deploy_settings: Deployment settings for deployment :param playbook: playbook to use to configure undercloud :param apex_temp_dir: temporary apex directory to hold configs/logs :return: None @@ -121,7 +127,8 @@ class Undercloud: logging.info("Configuring Undercloud...") # run ansible - ansible_vars = Undercloud.generate_config(net_settings) + ansible_vars = Undercloud.generate_config(net_settings, + deploy_settings) ansible_vars['apex_temp_dir'] = apex_temp_dir try: utils.run_ansible(ansible_vars, playbook, host=self.ip, @@ -179,21 +186,28 @@ class Undercloud: virt_utils.virt_customize(virt_ops, self.volume) @staticmethod - def generate_config(ns): + def generate_config(ns, ds): """ Generates a dictionary of settings for configuring undercloud :param ns: network settings to derive undercloud settings + :param ds: deploy settings to derive undercloud settings :return: dictionary of settings """ ns_admin = ns['networks']['admin'] intro_range = ns['apex']['networks']['admin']['introspection_range'] config = dict() + # Check if this is an ARM deployment + config['aarch64'] = platform.machine() == 'aarch64' + # Configuration for undercloud.conf config['undercloud_config'] = [ "enable_ui false", "undercloud_update_packages false", "undercloud_debug false", "inspection_extras false", + "ipxe_enabled {}".format( + str(ds['global_params'].get('ipxe', True) and + not config['aarch64'])), "undercloud_hostname undercloud.{}".format(ns['dns-domain']), "local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']), str(ns_admin['cidr']).split('/')[1]), @@ -226,7 +240,22 @@ class Undercloud: "enabled": ns_external['enabled'] } - # Check if this is an ARM deployment - config['aarch64'] = platform.machine() == 'aarch64' + config['http_proxy'] = ns.get('http_proxy', '') + config['https_proxy'] = ns.get('https_proxy', '') return config + + def _update_delorean_repo(self): + if utils.internet_connectivity(): + logging.info('Updating delorean repo on Undercloud') + delorean_repo = ( + "https://trunk.rdoproject.org/centos7-{}" + "/current-tripleo/delorean.repo".format(self.os_version)) + cmd = ("curl -L -f -o " + "/etc/yum.repos.d/deloran.repo {}".format(delorean_repo)) + try: + virt_utils.virt_customize({constants.VIRT_RUN_CMD: cmd}, + self.volume) + except Exception: + logging.warning("Failed to download and update delorean repo " + "for Undercloud") diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py index 3b2c4462..ba0398bb 100755 --- a/apex/virtual/configure_vm.py +++ b/apex/virtual/configure_vm.py @@ -118,9 +118,9 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'], 'user_interface': '', } - # assign scsi as default for aarch64 + # assign virtio as default for aarch64 if arch == 'aarch64' and diskbus == 'sata': - diskbus = 'scsi' + diskbus = 'virtio' # Configure the bus type for the target disk device params['diskbus'] = diskbus nicparams = { diff --git a/apex/virtual/exceptions.py b/apex/virtual/exceptions.py new file mode 100644 index 00000000..e3dff51a --- /dev/null +++ b/apex/virtual/exceptions.py @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + + +class ApexVirtualException(Exception): + pass diff --git a/apex/virtual/utils.py b/apex/virtual/utils.py index 226af1b5..8b24bc40 100644 --- a/apex/virtual/utils.py +++ b/apex/virtual/utils.py @@ -18,6 +18,8 @@ import xml.etree.ElementTree as ET from apex.common import utils as common_utils from apex.virtual import configure_vm as vm_lib +from apex.virtual import exceptions as exc +from time import sleep from virtualbmc import manager as vbmc_lib DEFAULT_RAM = 8192 @@ -131,11 +133,39 @@ def host_setup(node): chain.insert_rule(rule) try: subprocess.check_call(['vbmc', 'start', name]) - logging.debug("Started vbmc for domain {}".format(name)) + logging.debug("Started VBMC for domain {}".format(name)) except subprocess.CalledProcessError: - logging.error("Failed to start vbmc for {}".format(name)) + logging.error("Failed to start VBMC for {}".format(name)) raise - logging.debug('vmbcs setup: {}'.format(vbmc_manager.list())) + + logging.info("Checking VBMC {} is up".format(name)) + is_running = False + for x in range(0, 4): + logging.debug("Polling to see if VBMC is up, attempt {}".format(x)) + try: + output = subprocess.check_output(['vbmc', 'show', name], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + logging.warning('Unable to issue "vbmc show" cmd') + continue + for line in output.decode('utf-8').split('\n'): + if 'status' in line: + if 'running' in line: + is_running = True + break + else: + logging.debug('VBMC status is not "running"') + break + if is_running: + break + sleep(1) + if is_running: + logging.info("VBMC {} is up and running".format(name)) + else: + logging.error("Failed to verify VBMC is running") + raise exc.ApexVirtualException("Failed to bring up vbmc " + "{}".format(name)) + logging.debug('VBMCs setup: {}'.format(vbmc_manager.list())) def virt_customize(ops, target): diff --git a/build/Makefile b/build/Makefile index 729b3ce9..fb6734b5 100644 --- a/build/Makefile +++ b/build/Makefile @@ -291,6 +291,7 @@ iso: iso-clean images rpms $(CENTISO) cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-pygerrit2-2.0.3-1.el7.centos.noarch.rpm cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-gitdb2-2.0.3-1.el7.centos.noarch.rpm cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-GitPython-2.1.7-1.el7.centos.noarch.rpm + cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-distro-1.2.0-1.el7.centos.noarch.rpm # regenerate yum repo data @echo "Generating new yum metadata" createrepo --update -g $(BUILD_ROOT)/c7-opnfv-x86_64-comps.xml $(BUILD_DIR)/centos diff --git a/build/barometer-install.sh b/build/barometer-install.sh index ca9b79c1..2391b6b8 100755 --- a/build/barometer-install.sh +++ b/build/barometer-install.sh @@ -22,7 +22,8 @@ source ./variables.sh # Versions/branches COLLECTD_OPENSTACK_PLUGINS_BRANCH="stable/pike" -ARCH="6.el7.centos.x86_64.rpm" +ARCH="8.el7.centos.x86_64.rpm" + # don't fail because of missing certificate GETFLAG="--no-check-certificate" @@ -58,19 +59,36 @@ function barometer_pkgs { | cut -d'-' -f9) RDT_SUFFIX=$INTEL_RDT_VER-1.el7.centos.x86_64.rpm - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-$RDT_SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-$RDT_SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-$SUFFIX - wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-$SUFFIX + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/libcollectdclient-devel-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-utils-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-python-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_events-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ovs_stats-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-${RDT_SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/intel-cmt-cat-devel-${RDT_SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-intel_rdt-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-snmp_agent-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-virt-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-sensors-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ceph-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_json-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-apache-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-write_http-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-mysql-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ping-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-smart-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl_xml-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-disk-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdcached-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-iptables-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-curl-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-ipmi-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-netlink-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-rrdtool-${SUFFIX} + wget $GETFLAG $ARTIFACTS_BAROM/$BAROMETER_VER/collectd-lvm-${SUFFIX} curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" tar cfz collectd.tar.gz *.rpm get-pip.py @@ -88,8 +106,7 @@ function barometer_pkgs { # get the barometer puppet module and tar it rm -rf puppet-barometer git clone $PUPPET_BAROMETER_REPO puppet-barometer - cd puppet-barometer - pushd puppet-barometer/ > /dev/null + pushd puppet-barometer/puppet-barometer/ > /dev/null git archive --format=tar.gz HEAD > ${BUILD_DIR}/puppet-barometer.tar.gz popd > /dev/null @@ -119,6 +136,10 @@ function barometer_pkgs { -a $OVERCLOUD_IMAGE LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ + --run-command 'yum remove -y collectd-write_sensu-5.8.0-2.el7.x86_64' \ + -a $OVERCLOUD_IMAGE + + LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ --run-command "yum install -y \ /opt/libcollectdclient-${SUFFIX} \ /opt/libcollectdclient-devel-${SUFFIX} \ @@ -132,7 +153,24 @@ function barometer_pkgs { /opt/collectd-intel_rdt-${SUFFIX} \ /opt/collectd-snmp-${SUFFIX} \ /opt/collectd-snmp_agent-${SUFFIX} \ - /opt/collectd-virt-${SUFFIX}" \ + /opt/collectd-virt-${SUFFIX} \ + /opt/collectd-sensors-${SUFFIX} \ + /opt/collectd-ceph-${SUFFIX} \ + /opt/collectd-curl_json-${SUFFIX} \ + /opt/collectd-apache-${SUFFIX} \ + /opt/collectd-write_http-${SUFFIX} \ + /opt/collectd-mysql-${SUFFIX} \ + /opt/collectd-ping-${SUFFIX} \ + /opt/collectd-smart-${SUFFIX} \ + /opt/collectd-curl_xml-${SUFFIX} \ + /opt/collectd-disk-${SUFFIX} \ + /opt/collectd-rrdcached-${SUFFIX} \ + /opt/collectd-iptables-${SUFFIX} \ + /opt/collectd-curl-${SUFFIX} \ + /opt/collectd-ipmi-${SUFFIX} \ + /opt/collectd-netlink-${SUFFIX} \ + /opt/collectd-rrdtool-${SUFFIX} \ + /opt/collectd-lvm-${SUFFIX}" \ -a $OVERCLOUD_IMAGE # install collectd-openstack-plugins @@ -150,4 +188,3 @@ function barometer_pkgs { --run-command 'mkdir -p /etc/collectd/collectd.conf.d' \ -a $OVERCLOUD_IMAGE } - diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml index 4ef6ef85..3df18e97 100644 --- a/build/opnfv-environment.yaml +++ b/build/opnfv-environment.yaml @@ -160,7 +160,7 @@ parameter_defaults: ComputeServices: - OS::TripleO::Services::AuditD - OS::TripleO::Services::Sshd - #- OS::TripleO::Services::Barometer + - OS::TripleO::Services::Barometer - OS::TripleO::Services::CACerts - OS::TripleO::Services::CephClient - OS::TripleO::Services::CephOSD @@ -196,6 +196,8 @@ resource_registry: OS::TripleO::Services::SwiftStorage: OS::Heat::None #OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None OS::TripleO::Services::SwiftProxy: OS::Heat::None + OS::TripleO::Services::BarbicanApi: "/usr/share/openstack-tripleo-heat-\ + templates/puppet/services/barbican-api.yaml" # Extra Config OS::TripleO::ComputeExtraConfigPre: OS::Heat::None OS::TripleO::ControllerExtraConfigPre: OS::Heat::None diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh index 1b7843a0..7ed57d00 100755 --- a/build/overcloud-full.sh +++ b/build/overcloud-full.sh @@ -48,6 +48,7 @@ qemu-img resize overcloud-full_build.qcow2 +1500M # installing forked apex-puppet-tripleo # upload neutron port data plane status LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ + --run-command "curl -f https://trunk.rdoproject.org/centos7-pike/delorean-deps.repo > /etc/yum.repos.d/delorean-deps.repo" \ --run-command "xfs_growfs /dev/sda" \ --upload ${BUILD_DIR}/apex-puppet-tripleo.tar.gz:/etc/puppet/modules \ --run-command "cd /etc/puppet/modules && rm -rf tripleo && tar xzf apex-puppet-tripleo.tar.gz" \ @@ -66,6 +67,7 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ --upload ${BUILD_ROOT}/patches/neutron_openstackclient_dps.patch:/usr/lib/python2.7/site-packages/ \ --upload ${BUILD_ROOT}/patches/puppet-neutron-add-sfc.patch:/usr/share/openstack-puppet/modules/neutron/ \ --upload ${BUILD_ROOT}/patches/congress-parallel-execution.patch:/usr/lib/python2.7/site-packages/ \ + --install openstack-utils \ -a overcloud-full_build.qcow2 # --upload ${BUILD_ROOT}/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch:/usr/share/openstack-puppet/modules/neutron/ \ # --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \ @@ -146,8 +148,7 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ -a overcloud-full_build.qcow2 # upload and install barometer packages - # FIXME collectd pkgs conflict during upgrade to Pike - # barometer_pkgs overcloud-full_build.qcow2 + barometer_pkgs overcloud-full_build.qcow2 fi # end x86_64 specific items diff --git a/build/patches/tacker-client-fix-symmetrical.patch b/build/patches/tacker-client-fix-symmetrical.patch deleted file mode 100644 index eab01a62..00000000 --- a/build/patches/tacker-client-fix-symmetrical.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 9630f711a88a69480c44d6ac21244d9a8b0d92c7 Mon Sep 17 00:00:00 2001 -From: Tim Rozet <trozet@redhat.com> -Date: Fri, 18 Aug 2017 16:22:23 -0400 -Subject: [PATCH] Fixes passing boolean as string for symmetrical - -Bug where 'True'/'False' strings were being passed in REST to Tacker -service which would end up throwing an exception because the DB type for -symmetrical is boolean/small int. This converts it to boolean in the -client. - -Closes-Bug: 1711550 - -Change-Id: Ide2aeab73b1dd88beb6e491e6b07cdee9fb7e48a -Signed-off-by: Tim Rozet <trozet@redhat.com> ---- - -diff --git a/tackerclient/tacker/v1_0/nfvo/vnffg.py b/tackerclient/tacker/v1_0/nfvo/vnffg.py -index 729cd19..92b98ed 100644 ---- a/tackerclient/tacker/v1_0/nfvo/vnffg.py -+++ b/tackerclient/tacker/v1_0/nfvo/vnffg.py -@@ -97,7 +97,9 @@ - help=_('List of logical VNFD name to VNF instance name mapping. ' - 'Example: VNF1:my_vnf1,VNF2:my_vnf2')) - parser.add_argument( -- '--symmetrical', metavar='{True,False}', -+ '--symmetrical', -+ action='store_true', -+ default=False, - help=_('Should a reverse path be created for the NFP')) - parser.add_argument( - '--param-file', diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec index dde13a78..f8226e43 100644 --- a/build/rpm_specs/opnfv-apex-common.spec +++ b/build/rpm_specs/opnfv-apex-common.spec @@ -11,13 +11,13 @@ URL: https://gerrit.opnfv.org/gerrit/apex.git Source0: opnfv-apex-common.tar.gz BuildArch: noarch -BuildRequires: python-docutils python34-devel +BuildRequires: python34-docutils python34-devel Requires: opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools python34-libvirt Requires: initscripts net-tools iputils iproute iptables python34 python34-yaml python34-jinja2 python3-ipmi python34-virtualbmc Requires: ipxe-roms-qemu >= 20160127-1 Requires: libvirt-devel ansible Requires: python34-iptables python34-cryptography python34-pbr -Requires: python34-GitPython python34-pygerrit2 +Requires: python34-GitPython python34-pygerrit2 python34-distro %description Scripts for OPNFV deployment using Apex @@ -92,6 +92,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml %{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-ha.yaml %{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-noha.yaml +%{_sysconfdir}/opnfv-apex/os-odl-l2gw-ha.yaml +%{_sysconfdir}/opnfv-apex/os-odl-l2gw-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml %{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml @@ -116,6 +118,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %doc %{_docdir}/opnfv/inventory.yaml.example %changelog +* Wed Feb 14 2018 Tim Rozet <trozet@redhat.com> - 6.0-1 + Fix docutils requirement and add python34-distro * Wed Nov 29 2017 Tim Rozet <trozet@redhat.com> - 6.0-0 Bump version for Fraser * Wed Oct 25 2017 Tim Rozet <trozet@redhat.com> - 5.0-9 diff --git a/build/undercloud.sh b/build/undercloud.sh index 0cfb6737..6bb8ac90 100755 --- a/build/undercloud.sh +++ b/build/undercloud.sh @@ -59,8 +59,6 @@ LIBGUESTFS_BACKEND=direct $VIRT_CUSTOMIZE \ --upload ${CACHE_DIR}/${calipso_script}:/root/ \ --install "libguestfs-tools" \ --install "python-tackerclient" \ - --upload ${BUILD_ROOT}/patches/tacker-client-fix-symmetrical.patch:/usr/lib/python2.7/site-packages/ \ - --run-command "cd usr/lib/python2.7/site-packages/ && patch -p1 < tacker-client-fix-symmetrical.patch" \ --run-command "yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" \ --install yum-utils,lvm2,device-mapper-persistent-data \ -a undercloud_build.qcow2 diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml index a5e9e960..ab3b0a37 100644 --- a/config/deploy/deploy_settings.yaml +++ b/config/deploy/deploy_settings.yaml @@ -7,9 +7,12 @@ # If ha_enabled is false, there will only be one controller. global_params: ha_enabled: true - # introspect defaults to True, if set false the introspection process will - # be skipped at deploy time. + # introspect defaults to True, + # Enables/disables the introspection process at deploy time. introspect: true + # ipxe defaults to True + # Enables/disables the use of ipxe for provisioning + ipxe: true deploy_options: # Which SDN controller to use. Valid options are 'opendaylight', 'onos', diff --git a/config/deploy/os-odl-l2gw-ha.yaml b/config/deploy/os-odl-l2gw-ha.yaml new file mode 100644 index 00000000..a22da3bb --- /dev/null +++ b/config/deploy/os-odl-l2gw-ha.yaml @@ -0,0 +1,12 @@ +--- +global_params: + ha_enabled: true + +deploy_options: + sdn_controller: opendaylight + odl_version: nitrogen + tacker: false + congress: true + sfc: false + vpn: false + l2gw: true diff --git a/config/deploy/os-odl-l2gw-noha.yaml b/config/deploy/os-odl-l2gw-noha.yaml new file mode 100644 index 00000000..ae5218aa --- /dev/null +++ b/config/deploy/os-odl-l2gw-noha.yaml @@ -0,0 +1,12 @@ +--- +global_params: + ha_enabled: false + +deploy_options: + sdn_controller: opendaylight + odl_version: nitrogen + tacker: false + congress: true + sfc: false + vpn: false + l2gw: true diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml index fe11a9b5..ffe3a18e 100644 --- a/config/network/network_settings.yaml +++ b/config/network/network_settings.yaml @@ -57,6 +57,10 @@ syslog: server: 10.128.1.24 transport: 'tcp' +# http(s) proxy settings added to /etc/environment of uc and oc nodes +# http_proxy: http://proxy.server:8080 +# https_proxy: https://proxy.server:8081 + # Common network settings networks: # Admin configuration (pxe and jumpstart) diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml index 7dddf343..176bc7ca 100644 --- a/config/network/network_settings_v6.yaml +++ b/config/network/network_settings_v6.yaml @@ -57,6 +57,10 @@ syslog: server: 10.128.1.24 transport: 'tcp' +# http(s) proxy settings added to /etc/environment of uc and oc nodes +# http_proxy: http://proxy.server:8080 +# https_proxy: https://proxy.server:8081 + # Common network settings networks: # Admin configuration (pxe and jumpstart) diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml index 345dbbde..29cd193d 100644 --- a/config/network/network_settings_vlans.yaml +++ b/config/network/network_settings_vlans.yaml @@ -57,6 +57,10 @@ syslog: server: 10.128.1.24 transport: 'tcp' +# http(s) proxy settings added to /etc/environment of uc and oc nodes +# http_proxy: http://proxy.server:8080 +# https_proxy: https://proxy.server:8081 + # Common network settings networks: # Admin configuration (pxe and jumpstart) diff --git a/config/network/network_settings_vpp.yaml b/config/network/network_settings_vpp.yaml index 2f6bba5e..a40158ea 100644 --- a/config/network/network_settings_vpp.yaml +++ b/config/network/network_settings_vpp.yaml @@ -57,6 +57,10 @@ syslog: server: 10.128.1.24 transport: 'tcp' +# http(s) proxy settings added to /etc/environment of uc and oc nodes +# http_proxy: http://proxy.server:8080 +# https_proxy: https://proxy.server:8081 + # Common network settings networks: # Admin configuration (pxe and jumpstart) diff --git a/docs/release/installation/architecture.rst b/docs/release/installation/architecture.rst index 079c26d5..b8db7c86 100644 --- a/docs/release/installation/architecture.rst +++ b/docs/release/installation/architecture.rst @@ -159,7 +159,11 @@ issues per scenario. The following scenarios correspond to a supported | os-odl-bgpvpn-ha | SDNVPN | Yes | +-------------------------+-------------+---------------+ | os-odl-bgpvpn-noha | SDNVPN | Yes | +++-------------------------+-------------+---------------+ +| os-odl-l2gw-ha | Apex | No | +-------------------------+-------------+---------------+ +| os-odl-l2gw-noha | Apex | No | +-------------------------+-------------+---------------+ | os-odl-sfc-ha | SFC | No | +-------------------------+-------------+---------------+ | os-odl-sfc-noha | SFC | Yes | diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml index c0e1cd35..e9ce8754 100644 --- a/lib/ansible/playbooks/configure_undercloud.yml +++ b/lib/ansible/playbooks/configure_undercloud.yml @@ -32,6 +32,18 @@ regexp: 'Defaults\s*requiretty' state: absent become: yes + - lineinfile: + path: /etc/environment + regexp: '^http_proxy' + line: "http_proxy={{ http_proxy }}" + become: yes + when: http_proxy + - lineinfile: + path: /etc/environment + regexp: '^https_proxy' + line: "https_proxy={{ https_proxy }}" + become: yes + when: https_proxy - name: openstack-configs undercloud shell: openstack-config --set undercloud.conf DEFAULT {{ item }} with_items: "{{ undercloud_config }}" @@ -39,9 +51,6 @@ shell: openstack-config --set /etc/ironic/ironic.conf {{ item }} become: yes with_items: "{{ ironic_config }}" - - name: openstack-configs undercloud aarch64 - shell: openstack-config --set undercloud.conf DEFAULT ipxe_enabled false - when: aarch64 - lineinfile: path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py regexp: '_link_ip_address_pxe_configs' diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml index 545ee33d..fb1da46f 100644 --- a/lib/ansible/playbooks/deploy_dependencies.yml +++ b/lib/ansible/playbooks/deploy_dependencies.yml @@ -7,6 +7,7 @@ with_items: - python-lxml - libvirt-python + - libguestfs-tools - sysctl: name: net.ipv4.ip_forward state: present @@ -72,6 +73,12 @@ when: - ansible_architecture == "x86_64" - "'Y' not in nested_result.stdout" + - modprobe: + name: ip6_tables + state: present + - modprobe: + name: ip_tables + state: present - name: Generate SSH key for root if missing shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa - name: Check that /u/l/python3.4/site-packages/virtualbmc/vbmc.py exists diff --git a/lib/ansible/playbooks/undercloud_aarch64.yml b/lib/ansible/playbooks/undercloud_aarch64.yml index 040831c5..ddaf1b04 100644 --- a/lib/ansible/playbooks/undercloud_aarch64.yml +++ b/lib/ansible/playbooks/undercloud_aarch64.yml @@ -23,6 +23,8 @@ dest: /tftpboot/EFI/centos/grub.cfg mode: 0644 - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi' + - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \$pybasedir/drivers/modules/pxe_grub_config.template' + - systemd: name: openstack-ironic-conductor state: restarted diff --git a/requirements.txt b/requirements.txt index 0326a8cb..18bd020f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,3 +11,4 @@ PyYAML Jinja2>=2.8 GitPython pygerrit2 +distro |