summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--apex/deploy.py17
-rw-r--r--apex/overcloud/config.py12
-rw-r--r--apex/overcloud/overcloud_deploy.py132
-rw-r--r--apex/settings/deploy_settings.py9
-rw-r--r--apex/tests/test_apex_undercloud.py196
-rw-r--r--apex/tests/test_apex_virtual_configure_vm.py102
-rw-r--r--apex/tests/test_apex_virtual_utils.py101
-rw-r--r--apex/undercloud/undercloud.py2
-rw-r--r--apex/virtual/utils.py (renamed from apex/virtual/virtual_utils.py)8
-rw-r--r--build/Makefile2
-rwxr-xr-xbuild/build_quagga.sh2
-rw-r--r--build/enable_rt_kvm.yaml4
-rw-r--r--build/first-boot.yaml63
-rw-r--r--build/opnfv-environment.yaml37
-rwxr-xr-xbuild/overcloud-full.sh35
-rwxr-xr-xbuild/overcloud-opendaylight.sh16
-rw-r--r--build/patches/puppet-neutron-add-external_network_bridge-option.patch106
-rw-r--r--build/rpm_specs/networking-vpp.spec2
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec7
-rwxr-xr-xbuild/undercloud.sh13
-rw-r--r--build/variables.sh21
-rw-r--r--config/deploy/os-nosdn-calipso-noha.yaml9
-rw-r--r--config/deploy/os-odl-fdio-ha.yaml2
-rw-r--r--config/deploy/os-odl-fdio-noha.yaml2
-rw-r--r--config/deploy/os-odl-fdio_dvr-ha.yaml (renamed from config/deploy/os-odl-fdio-dvr-ha.yaml)4
-rw-r--r--config/deploy/os-odl-fdio_dvr-noha.yaml (renamed from config/deploy/os-odl-fdio-dvr-noha.yaml)4
-rw-r--r--lib/ansible/playbooks/post_deploy_undercloud.yml38
-rw-r--r--tox.ini2
28 files changed, 816 insertions, 132 deletions
diff --git a/apex/deploy.py b/apex/deploy.py
index a0561384..5ec0f7fa 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -20,7 +20,7 @@ import sys
import tempfile
import apex.virtual.configure_vm as vm_lib
-import apex.virtual.virtual_utils as virt_utils
+import apex.virtual.utils as virt_utils
from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
@@ -58,6 +58,14 @@ def validate_cross_settings(deploy_settings, net_settings, inventory):
raise ApexDeployException("Setting a DPDK based dataplane requires"
"a dedicated NIC for tenant network")
+ if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
+ if deploy_settings['deploy_options']['dataplane'] != 'fdio':
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dataplane is set to fdio")
+ if deploy_settings['deploy_options'].get('dvr') is True:
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dvr is not enabled")
+
# TODO(trozet): add more checks here like RAM for ODL, etc
# check if odl_vpp_netvirt is true and vpp is set
# Check if fdio and nosdn:
@@ -336,8 +344,8 @@ def main():
overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
root_pw=root_pw)
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
- net_env_target, APEX_TEMP_DIR)
+ overcloud_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
inventory, APEX_TEMP_DIR,
args.virtual, args.env_file)
@@ -401,6 +409,9 @@ def main():
deploy_vars['congress'] = True
else:
deploy_vars['congress'] = False
+ deploy_vars['calipso'] = ds_opts.get('calipso', False)
+ deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
+ 'installer_vm']['ip']
# TODO(trozet): this is probably redundant with getting external
# network info from undercloud.py
if 'external' in net_settings.enabled_network_list:
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
index e48b254f..a7f7d848 100644
--- a/apex/overcloud/config.py
+++ b/apex/overcloud/config.py
@@ -44,10 +44,14 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
ovs_dpdk_br = ''
if ds['dataplane'] == 'fdio':
nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
- if ds['sdn_controller'] == 'opendaylight' and role == 'compute':
- nets['external'][0]['nic_mapping'][role]['phys_type'] = \
- 'vpp_interface'
- ext_net = 'vpp_interface'
+ if ds['sdn_controller'] == 'opendaylight':
+ if role == 'compute':
+ nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+ 'vpp_interface'
+ ext_net = 'vpp_interface'
+ if ds.get('dvr') is True:
+ nets['admin']['nic_mapping'][role]['phys_type'] = \
+ 'linux_bridge'
elif ds['dataplane'] == 'ovs_dpdk':
ovs_dpdk_br = 'br-phy'
if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py
index f7a8b954..e3248536 100644
--- a/apex/overcloud/overcloud_deploy.py
+++ b/apex/overcloud/overcloud_deploy.py
@@ -20,7 +20,7 @@ import time
from apex.common import constants as con
from apex.common.exceptions import ApexDeployException
from apex.common import parsers
-from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import utils as virt_utils
from cryptography.hazmat.primitives import serialization as \
crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
@@ -35,6 +35,7 @@ SDN_FILE_MAP = {
'gluon': 'gluon.yaml',
'vpp': {
'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
+ 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
'default': 'neutron-opendaylight.yaml',
@@ -92,6 +93,34 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
return env_list
+def _get_node_counts(inventory):
+ """
+ Return numbers of controller and compute nodes in inventory
+
+ :param inventory: node inventory data structure
+ :return: number of controller and compute nodes in inventory
+ """
+ if not inventory:
+ raise ApexDeployException("Empty inventory")
+
+ nodes = inventory['nodes']
+ num_control = 0
+ num_compute = 0
+ for node in nodes:
+ if node['capabilities'] == 'profile:control':
+ num_control += 1
+ elif node['capabilities'] == 'profile:compute':
+ num_compute += 1
+ else:
+ # TODO(trozet) do we want to allow capabilities to not exist?
+ logging.error("Every node must include a 'capabilities' key "
+ "tagged with either 'profile:control' or "
+ "'profile:compute'")
+ raise ApexDeployException("Node missing capabilities "
+ "key: {}".format(node))
+ return num_control, num_compute
+
+
def create_deploy_cmd(ds, ns, inv, tmp_dir,
virtual, env_file='opnfv-environment.yaml'):
@@ -100,7 +129,6 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
ds_opts = ds['deploy_options']
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
- # TODO(trozet): make sure rt kvm file is in tht dir
for k, v in OTHER_FILE_MAP.items():
if k in ds_opts and ds_opts[k]:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
@@ -118,21 +146,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append('baremetal-environment.yaml')
- nodes = inv['nodes']
- num_control = 0
- num_compute = 0
- for node in nodes:
- if 'profile:control' in node['capabilities']:
- num_control += 1
- elif 'profile:compute' in node['capabilities']:
- num_compute += 1
- else:
- # TODO(trozet) do we want to allow capabilities to not exist?
- logging.error("Every node must include a 'capabilities' key "
- "tagged with either 'profile:control' or "
- "'profile:compute'")
- raise ApexDeployException("Node missing capabilities "
- "key: {}".format(node))
+ num_control, num_compute = _get_node_counts(inv)
if num_control == 0 or num_compute == 0:
logging.error("Detected 0 control or compute nodes. Control nodes: "
"{}, compute nodes{}".format(num_control, num_compute))
@@ -230,18 +244,33 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
virt_cmds.append(
{con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
"-p1 < neutron-patch-NSDriver.patch"})
+ if sdn is False:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
+ {con.VIRT_RUN_CMD: "yum install -y "
+ "/root/nosdn_vpp_rpms/*.rpm"}
+ ])
if sdn == 'opendaylight':
if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
virt_cmds.extend([
{con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])},
{con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
"/root/puppet-opendaylight-"
"{}.tar.gz".format(ds_opts['odl_version'])}
])
+ if ds_opts['odl_version'] == 'master':
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
+ ds_opts['odl_version'])}
+ ])
+ else:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+ ds_opts['odl_version'])}
+ ])
+
elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
and ds_opts['odl_vpp_netvirt']:
virt_cmds.extend([
@@ -285,15 +314,15 @@ def make_ssh_key():
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
- pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
- return private_key.decode('utf-8'), pub_key
+ return private_key.decode('utf-8'), public_key.decode('utf-8')
-def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
+def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
"""
Creates modified opnfv/network environments for deployment
:param ds: deploy settings
:param ns: network settings
+ :param inv: node inventory
:param opnfv_env: file path for opnfv-environment file
:param net_env: file path for network-environment file
:param tmp_dir: Apex tmp dir
@@ -346,15 +375,18 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if 'CloudDomain' in line:
output_line = " CloudDomain: {}".format(ns['domain_name'])
elif 'replace_private_key' in line:
- output_line = " key: '{}'".format(private_key)
+ output_line = " private_key: |\n"
+ key_out = ''
+ for line in private_key.splitlines():
+ key_out += " {}\n".format(line)
+ output_line += key_out
elif 'replace_public_key' in line:
- output_line = " key: '{}'".format(public_key)
+ output_line = " public_key: '{}'".format(public_key)
if ds_opts['sdn_controller'] == 'opendaylight' and \
- 'odl_vpp_routing_node' in ds_opts and ds_opts[
- 'odl_vpp_routing_node'] != 'dvr':
+ 'odl_vpp_routing_node' in ds_opts:
if 'opendaylight::vpp_routing_node' in line:
- output_line = (" opendaylight::vpp_routing_node: ${}.${}"
+ output_line = (" opendaylight::vpp_routing_node: {}.{}"
.format(ds_opts['odl_vpp_routing_node'],
ns['domain_name']))
elif 'ControllerExtraConfig' in line:
@@ -373,6 +405,17 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if 'NeutronVPPAgentPhysnets' in line:
output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
format(tenant_ctrl_nic))
+ elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
+ 'dvr') is True:
+ if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
+ output_line = ''
+ elif 'NeutronDhcpAgentsPerNetwork' in line:
+ num_control, num_compute = _get_node_counts(inv)
+ output_line = (" NeutronDhcpAgentsPerNetwork: {}"
+ .format(num_compute))
+ elif 'ComputeServices' in line:
+ output_line = (" ComputeServices:\n"
+ " - OS::TripleO::Services::NeutronDhcpAgent")
if perf:
for role in 'NovaCompute', 'Controller':
@@ -402,10 +445,10 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if kernel_args:
output_line = " ComputeKernelArgs: '{}'".\
format(kernel_args)
- elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
+ if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
for k, v in OVS_PERF_MAP.items():
if k in line and v in perf_ovs_comp:
- output_line = " {}: {}".format(k, perf_ovs_comp[v])
+ output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
print(output_line)
@@ -414,23 +457,20 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
# Modify Network environment
for line in fileinput.input(net_env, inplace=True):
line = line.strip('\n')
- if ds_opts['dataplane'] == 'ovs_dpdk':
- if 'ComputeExtraConfigPre' in line:
- print(' OS::TripleO::ComputeExtraConfigPre: '
- './ovs-dpdk-preconfig.yaml')
- else:
- print(line)
- elif perf and perf_kern_comp:
- if 'resource_registry' in line:
- print("resource_registry:\n"
- " OS::TripleO::NodeUserData: first-boot.yaml")
- elif 'NovaSchedulerDefaultFilters' in line:
- print(" NovaSchedulerDefaultFilters: 'RamFilter,"
- "ComputeFilter,AvailabilityZoneFilter,"
- "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
- "NUMATopologyFilter'")
- else:
- print(line)
+ if 'ComputeExtraConfigPre' in line and \
+ ds_opts['dataplane'] == 'ovs_dpdk':
+ print(' OS::TripleO::ComputeExtraConfigPre: '
+ './ovs-dpdk-preconfig.yaml')
+ elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
+ 'resource_registry' in line:
+ print("resource_registry:\n"
+ " OS::TripleO::NodeUserData: first-boot.yaml")
+ elif perf and perf_kern_comp and \
+ 'NovaSchedulerDefaultFilters' in line:
+ print(" NovaSchedulerDefaultFilters: 'RamFilter,"
+ "ComputeFilter,AvailabilityZoneFilter,"
+ "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
+ "NUMATopologyFilter'")
else:
print(line)
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index 793e43ac..c0594056 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -31,13 +31,15 @@ OPT_DEPLOY_SETTINGS = ['performance',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
+ 'dvr',
'odl_vpp_netvirt',
- 'barometer']
+ 'barometer',
+ 'calipso']
VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'master']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
class DeploySettings(dict):
@@ -110,6 +112,9 @@ class DeploySettings(dict):
"Invalid ODL version: {}".format(self[deploy_options][
'odl_version']))
+ if self['deploy_options']['odl_version'] == 'oxygen':
+ self['deploy_options']['odl_version'] = 'master'
+
if 'performance' in deploy_options:
if not isinstance(deploy_options['performance'], dict):
raise DeploySettingsException("Performance deploy_option"
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
new file mode 100644
index 00000000..9458bf9f
--- /dev/null
+++ b/apex/tests/test_apex_undercloud.py
@@ -0,0 +1,196 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import os
+import subprocess
+import unittest
+
+from mock import patch
+from mock import MagicMock
+
+from apex.common import constants
+from apex.undercloud.undercloud import Undercloud
+from apex.undercloud.undercloud import ApexUndercloudException
+
+from nose.tools import (
+ assert_regexp_matches,
+ assert_raises,
+ assert_true,
+ assert_equal)
+
+
+class TestUndercloud(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_init(self, mock_get_vm, mock_create):
+ Undercloud('img_path', 'tplt_path')
+ mock_create.assert_called()
+
+ @patch.object(Undercloud, '_get_vm', return_value=object())
+ @patch.object(Undercloud, 'create')
+ def test_init_uc_exists(self, mock_get_vm, mock_create):
+ assert_raises(ApexUndercloudException,
+ Undercloud, 'img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.libvirt.open')
+ @patch.object(Undercloud, 'create')
+ def test_get_vm_exists(self, mock_create, mock_libvirt):
+ assert_raises(ApexUndercloudException,
+ Undercloud, 'img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.libvirt.open')
+ @patch.object(Undercloud, 'create')
+ def test_get_vm_not_exists(self, mock_create, mock_libvirt):
+ conn = mock_libvirt.return_value
+ conn.lookupByName.side_effect = libvirt.libvirtError('defmsg')
+ Undercloud('img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.vm_lib')
+ @patch.object(Undercloud, 'inject_auth', return_value=None)
+ @patch.object(Undercloud, 'setup_volumes', return_value=None)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ def test_create(self, mock_get_vm, mock_setup_vols,
+ mock_inject_auth, mock_vm_lib):
+ Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_inject_auth.assert_called()
+ mock_setup_vols.assert_called()
+ mock_inject_auth.assert_called()
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_set_ip(self, mock_get_vm, mock_create):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ if_addrs = {'item1': {'addrs': [{'type': libvirt.VIR_IP_ADDR_TYPE_IPV4,
+ 'addr': 'ipaddress'}]},
+ 'item2': {'addrs': [{'type': libvirt.VIR_IP_ADDR_TYPE_IPV4,
+ 'addr': 'ipaddress'}]}}
+ uc.vm.interfaceAddresses.return_value = if_addrs
+ assert_true(uc._set_ip())
+
+ @patch('apex.undercloud.undercloud.time.sleep')
+ @patch.object(Undercloud, '_set_ip', return_value=False)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_start(self, mock_create, mock_get_vm,
+ mock_set_ip, mock_time):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ uc.vm.isActive.return_value = False
+ mock_set_ip.return_value = True
+ uc.start()
+
+ @patch('apex.undercloud.undercloud.time.sleep')
+ @patch.object(Undercloud, '_set_ip', return_value=False)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_start_no_ip(self, mock_create, mock_get_vm,
+ mock_set_ip, mock_time):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ uc.vm.isActive.return_value = True
+ mock_set_ip.return_value = False
+ assert_raises(ApexUndercloudException, uc.start)
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_configure(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ ns = MagicMock()
+ uc.configure(ns, 'playbook', '/tmp/dir')
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_configure_raises(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ ns = MagicMock()
+ subps_err = subprocess.CalledProcessError(1, 'cmd')
+ mock_utils.run_ansible.side_effect = subps_err
+ assert_raises(ApexUndercloudException,
+ uc.configure, ns, 'playbook', '/tmp/dir')
+
+ @patch('apex.undercloud.undercloud.os.remove')
+ @patch('apex.undercloud.undercloud.os.path')
+ @patch('apex.undercloud.undercloud.shutil')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_setup_vols(self, mock_get_vm, mock_create,
+ mock_shutil, mock_os_path, mock_os_remove):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_os_path.isfile.return_value = True
+ mock_os_path.exists.return_value = True
+ uc.setup_volumes()
+ for img_file in ('overcloud-full.vmlinuz', 'overcloud-full.initrd',
+ 'undercloud.qcow2'):
+ src_img = os.path.join(uc.image_path, img_file)
+ dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
+ mock_shutil.copyfile.assert_called_with(src_img, dest_img)
+
+ @patch('apex.undercloud.undercloud.os.path')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_setup_vols_raises(self, mock_get_vm, mock_create, mock_os_path):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_os_path.isfile.return_value = False
+ assert_raises(ApexUndercloudException, uc.setup_volumes)
+
+ @patch('apex.undercloud.undercloud.virt_utils')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_inject_auth(self, mock_get_vm, mock_create, mock_vutils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.root_pw = 'test'
+ uc.inject_auth()
+ test_ops = [{'--root-password': 'password:test'},
+ {'--run-command': 'mkdir -p /root/.ssh'},
+ {'--upload':
+ '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'},
+ {'--run-command': 'chmod 600 /root/.ssh/authorized_keys'},
+ {'--run-command': 'restorecon /root/.ssh/authorized_keys'},
+ {'--run-command':
+ 'cp /root/.ssh/authorized_keys /home/stack/.ssh/'},
+ {'--run-command':
+ 'chown stack:stack /home/stack/.ssh/authorized_keys'},
+ {'--run-command':
+ 'chmod 600 /home/stack/.ssh/authorized_keys'}]
+ mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_generate_config(self, mock_get_vm, mock_create):
+ ns_net = MagicMock()
+ ns_net.__getitem__.side_effect = \
+ lambda i: '1234/24' if i is 'cidr' else MagicMock()
+ ns = {'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin': ns_net,
+ 'external': [ns_net]}}
+
+ Undercloud('img_path', 'tplt_path').generate_config(ns)
diff --git a/apex/tests/test_apex_virtual_configure_vm.py b/apex/tests/test_apex_virtual_configure_vm.py
new file mode 100644
index 00000000..228e06d6
--- /dev/null
+++ b/apex/tests/test_apex_virtual_configure_vm.py
@@ -0,0 +1,102 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import unittest
+
+from mock import patch
+
+from apex.virtual.configure_vm import generate_baremetal_macs
+from apex.virtual.configure_vm import create_vm_storage
+from apex.virtual.configure_vm import create_vm
+
+from nose.tools import (
+ assert_regexp_matches,
+ assert_raises,
+ assert_equal)
+
+
+class TestVirtualConfigureVM(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_generate_baremetal_macs(self):
+ assert_regexp_matches(generate_baremetal_macs()[0],
+ '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$')
+
+ def test_generate_baremetal_macs_alot(self):
+ assert_equal(len(generate_baremetal_macs(127)), 127)
+
+ def test_generate_baremetal_macs_too_many(self):
+ assert_raises(ValueError, generate_baremetal_macs, 128)
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.isActive.return_value = 0
+ # execute
+ create_vm_storage('test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_pool_none(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value = None
+ # execute
+ assert_raises(Exception, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_libvirt_error(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.storageVolLookupByName.side_effect = libvirt.libvirtError('ermsg')
+ # execute
+ assert_raises(libvirt.libvirtError, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_new_vol_none(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.createXML.return_value = None
+ # execute
+ assert_raises(Exception, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', default_network=True,
+ direct_boot=True, kernel_args='test', template_dir='./build')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm_x86_64(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', arch='x86_64', template_dir='./build')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm_aarch64(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', arch='aarch64', template_dir='./build')
diff --git a/apex/tests/test_apex_virtual_utils.py b/apex/tests/test_apex_virtual_utils.py
new file mode 100644
index 00000000..643069f3
--- /dev/null
+++ b/apex/tests/test_apex_virtual_utils.py
@@ -0,0 +1,101 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import subprocess
+import unittest
+
+from mock import patch
+
+from apex.virtual.utils import DEFAULT_VIRT_IP
+from apex.virtual.utils import get_virt_ip
+from apex.virtual.utils import generate_inventory
+from apex.virtual.utils import host_setup
+from apex.virtual.utils import virt_customize
+
+from nose.tools import (
+ assert_is_instance,
+ assert_regexp_matches,
+ assert_raises,
+ assert_equal)
+
+
+class TestVirtualUtils(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip(self, mock_subprocess):
+ mock_subprocess.return_value = '<xml></xml>'
+ assert_equal(get_virt_ip(), DEFAULT_VIRT_IP)
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip_not_default(self, mock_subprocess):
+ mock_subprocess.return_value = '''<xml>
+<ip address='1.2.3.4' netmask='255.255.255.0'/>
+</xml>'''
+ assert_equal(get_virt_ip(), '1.2.3.4')
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip_raises(self, mock_subprocess):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_equal(get_virt_ip(), DEFAULT_VIRT_IP)
+
+ @patch('apex.virtual.utils.common_utils')
+ def test_generate_inventory(self, mock_common_utils):
+ assert_is_instance(generate_inventory('target_file'), dict)
+
+ @patch('apex.virtual.utils.common_utils')
+ def test_generate_inventory_ha_enabled(self, mock_common_utils):
+ assert_is_instance(generate_inventory('target_file', ha_enabled=True),
+ dict)
+
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc):
+ host_setup({'test': 2468})
+ mock_subprocess.assert_called_with(['vbmc', 'start', 'test'])
+
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup_raise_called_process_error(self, mock_vbmc_lib,
+ mock_subprocess, mock_iptc):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_raises(subprocess.CalledProcessError, host_setup, {'tst': 2468})
+
+ @patch('apex.virtual.utils.os.path')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize(self, mock_subprocess, mock_os_path):
+ virt_customize([{'--operation': 'arg'}], 'target')
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize_file_not_found(self, mock_subprocess):
+ assert_raises(FileNotFoundError,
+ virt_customize,
+ [{'--operation': 'arg'}], 'target')
+
+ @patch('apex.virtual.utils.os.path')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize_raises(self, mock_subprocess, mock_os_path):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_raises(subprocess.CalledProcessError,
+ virt_customize,
+ [{'--operation': 'arg'}], 'target')
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 7b7c35f0..50035638 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -15,7 +15,7 @@ import shutil
import subprocess
import time
-from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import utils as virt_utils
from apex.virtual import configure_vm as vm_lib
from apex.common import constants
from apex.common import utils
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/utils.py
index 1fe2c399..226af1b5 100644
--- a/apex/virtual/virtual_utils.py
+++ b/apex/virtual/utils.py
@@ -16,7 +16,7 @@ import pprint
import subprocess
import xml.etree.ElementTree as ET
-from apex.common import utils
+from apex.common import utils as common_utils
from apex.virtual import configure_vm as vm_lib
from virtualbmc import manager as vbmc_lib
@@ -39,7 +39,7 @@ def get_virt_ip():
tree = ET.fromstring(virsh_net_xml)
ip_tag = tree.find('ip')
- if ip_tag:
+ if ip_tag is not None:
virsh_ip = ip_tag.get('address')
if virsh_ip:
logging.debug("Detected virsh default network ip: "
@@ -95,9 +95,9 @@ def generate_inventory(target_file, ha_enabled=False, num_computes=1,
tmp_node['memory'] = compute_ram
inv_output['nodes']['node{}'.format(idx)] = copy.deepcopy(tmp_node)
- utils.dump_yaml(inv_output, target_file)
-
+ common_utils.dump_yaml(inv_output, target_file)
logging.info('Virtual environment file created: {}'.format(target_file))
+ return inv_output
def host_setup(node):
diff --git a/build/Makefile b/build/Makefile
index f1fde6a3..23b7f115 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -132,7 +132,7 @@ rpmlint:
$(BUILD_DIR)/python-networking-vpp.tar.gz:
@echo "Preparing the networking-vpp RPM prerequisites"
- git clone $(NETVPP_REPO) $(BUILD_DIR)/python-networking-vpp-$(NETVPP_VERS)
+ git clone $(NETVPP_REPO) -b $(NETVPP_BRANCH) $(BUILD_DIR)/python-networking-vpp-$(NETVPP_VERS)
tar czf $(BUILD_DIR)/python-networking-vpp.tar.gz -C $(BUILD_DIR) python-networking-vpp-$(NETVPP_VERS)
.PHONY: networking-vpp-rpm
diff --git a/build/build_quagga.sh b/build/build_quagga.sh
index a682b54e..e33cd274 100755
--- a/build/build_quagga.sh
+++ b/build/build_quagga.sh
@@ -170,7 +170,7 @@ build_zrpc(){
# ZRPC RPM
./configure --enable-zrpcd \
--enable-user=quagga --enable-group=quagga \
- --enable-vty-group=quagga
+ --enable-vty-group=quagga --with-thrift-version=4
make dist
cat > $rpmbuild/SOURCES/zrpcd.service <<EOF
diff --git a/build/enable_rt_kvm.yaml b/build/enable_rt_kvm.yaml
index 4601fd40..35a00d7d 100644
--- a/build/enable_rt_kvm.yaml
+++ b/build/enable_rt_kvm.yaml
@@ -1,5 +1,3 @@
---
parameter_defaults:
- ComputeKernelArgs: 'kvmfornfv_kernel.rpm'
-resource_registry:
- OS::TripleO::NodeUserData: kvm4nfv-1st-boot.yaml
+ KVMForNFVKernelRPM: 'kvmfornfv_kernel.rpm'
diff --git a/build/first-boot.yaml b/build/first-boot.yaml
index 1e8921ba..582981c0 100644
--- a/build/first-boot.yaml
+++ b/build/first-boot.yaml
@@ -16,6 +16,15 @@ parameters:
"intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
type: string
default: ""
+ KVMForNFVKernelRPM:
+ description: >
+ Name of the kvmfornfv kernel rpm.
+ Example: "kvmfornfv_kernel.rpm"
+ type: string
+ default: ""
+ ComputeHostnameFormat:
+ type: string
+ default: ""
resources:
userdata:
@@ -33,23 +42,51 @@ resources:
template: |
#!/bin/bash
set -x
- sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' \
- -i /etc/default/grub ;
- grub2-mkconfig -o /etc/grub2.cfg
- hugepage_count=$(echo $KERNEL_ARGS | \
- grep -oP ' ?hugepages=\K[0-9]+')
- if [ -z "$hugepage_count" ]; then
- hugepage_count=1024
+ need_reboot='false'
+
+ if [ -n "$KERNEL_ARGS" ]; then
+ sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' \
+ -i /etc/default/grub ;
+ grub2-mkconfig -o /etc/grub2.cfg
+ hugepage_count=$(echo $KERNEL_ARGS | \
+ grep -oP ' ?hugepages=\K[0-9]+')
+ if [ -z "$hugepage_count" ]; then
+ hugepage_count=1024
+ fi
+ echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf
+ HPAGE_CT=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc))
+ echo vm.max_map_count=$HPAGE_CT >> \
+ /usr/lib/sysctl.d/00-system.conf
+ HPAGE_CT=$(($hugepage_count * 2 * 1024 * 1024))
+ echo kernel.shmmax=$HPAGE_CT >> /usr/lib/sysctl.d/00-system.conf
+ need_reboot='true'
+ fi
+
+ if [ -n "$KVMFORNFV_KERNEL_RPM" ]; then
+ FORMAT=$COMPUTE_HOSTNAME_FORMAT
+ if [[ -z $FORMAT ]] ; then
+ FORMAT="compute" ;
+ else
+ # Assumption: only %index% and %stackname% are
+ # the variables in Host name format
+ FORMAT=$(echo $FORMAT | sed 's/\%index\%//g');
+ FORMAT=$(echo $FORMAT | sed 's/\%stackname\%//g');
+ fi
+ if [[ $(hostname) == *$FORMAT* ]] ; then
+ yum install -y /root/$KVMFORNFV_KERNEL_RPM
+ grub2-mkconfig -o /etc/grub2.cfg
+ sleep 5
+ need_reboot='true'
+ fi
fi
- echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf
- HPAGE_CT=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc))
- echo vm.max_map_count=$HPAGE_CT >> /usr/lib/sysctl.d/00-system.conf
- HPAGE_CT=$(($hugepage_count * 2 * 1024 * 1024))
- echo kernel.shmmax=$HPAGE_CT >> /usr/lib/sysctl.d/00-system.conf
- reboot
+ if [ "$need_reboot" == "true" ]; then
+ reboot
+ fi
params:
$KERNEL_ARGS: {get_param: ComputeKernelArgs}
+ $KVMFORNFV_KERNEL_RPM: {get_param: KVMForNFVKernelRPM}
+ $COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
outputs:
OS::stack_id:
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 7da252ec..9d049028 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -12,7 +12,8 @@ parameter_defaults:
NeutronEnableDHCPMetadata: true
NeutronEnableIsolatedMetadata: true
# NeutronDhcpAgentsPerNetwork: 3
- NeutronPluginExtensions: 'qos,port_security,data_plane_status'
+ NeutronPluginExtensions: 'qos,port_security,neutron.plugins.ml2.extensions.\
+ data_plane_status:DataPlaneStatusExtensionDriver'
# TODO: VLAN Ranges should be configurable from network settings
NeutronNetworkVLANRanges: 'datacentre:500:525'
# NeutronVPPAgentPhysnets:
@@ -26,14 +27,32 @@ parameter_defaults:
# NeutronDpdkMemoryChannels:
# ControllerExtraConfig:
# NovaComputeExtraConfig:
+ MigrationSshKey:
+ public_key: replace_public_key
+ private_key: replace_private_key
+ SshServerOptions:
+ HostKey:
+ - '/etc/ssh/ssh_host_rsa_key'
+ - '/etc/ssh/ssh_host_ecdsa_key'
+ - '/etc/ssh/ssh_host_ed25519_key'
+ SyslogFacility: 'AUTHPRIV'
+ AuthorizedKeysFile: '.ssh/authorized_keys'
+ PasswordAuthentication: 'no'
+ ChallengeResponseAuthentication: 'no'
+ GSSAPIAuthentication: 'no'
+ GSSAPICleanupCredentials: 'no'
+ UsePAM: 'yes'
+ X11Forwarding: 'yes'
+ UsePrivilegeSeparation: 'sandbox'
+ AcceptEnv:
+ - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+ - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+ - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+ - 'XMODIFIERS'
+ Subsystem: 'sftp /usr/libexec/openssh/sftp-server'
+ UseDNS: 'no'
ExtraConfig:
tripleo::ringbuilder::build_ring: false
- nova::nova_public_key:
- type: 'ssh-rsa'
- replace_public_key:
- nova::nova_private_key:
- type: 'ssh-rsa'
- replace_private_key:
nova::policy::policies:
nova-os_compute_api:servers:show:host_status:
key: 'os_compute_api:servers:show:host_status'
@@ -50,6 +69,7 @@ parameter_defaults:
# value updated via lib/overcloud-deploy-functions.sh
# opendaylight::vpp_routing_node: overcloud-novacompute-0.opnfvlf.org
ControllerServices:
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
@@ -132,8 +152,9 @@ parameter_defaults:
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::Vpp
- - OS::TripleO::Services::NeutronBgpvpnApi
+ - OS::TripleO::Services::NeutronBgpVpnApi
ComputeServices:
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Barometer
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index af582f17..06adbbe6 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -41,8 +41,8 @@ rm -rf vsperf vsperf.tar.gz
git clone https://gerrit.opnfv.org/gerrit/vswitchperf vsperf
tar czf vsperf.tar.gz vsperf
-# Increase disk size by 1200MB to accommodate more packages
-qemu-img resize overcloud-full_build.qcow2 +1200M
+# Increase disk size by 1500MB to accommodate more packages
+qemu-img resize overcloud-full_build.qcow2 +1500M
# expand file system to max disk size
# installing forked apex-puppet-tripleo
@@ -56,9 +56,6 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cd /usr/lib/python2.7/site-packages/ && rm -rf os_net_config && tar xzf apex-os-net-config.tar.gz" \
--run-command "if ! rpm -qa | grep python-redis; then yum install -y python-redis; fi" \
--install epel-release \
- --run-command "sed -i 's/^#UseDNS.*$/UseDNS no/' /etc/ssh/sshd_config" \
- --run-command "sed -i 's/^GSSAPIAuthentication.*$/GSSAPIAuthentication no/' /etc/ssh/sshd_config" \
- --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
--install unzip \
--upload ${BUILD_DIR}/vsperf.tar.gz:/var/opt \
--run-command "cd /var/opt && tar xzf vsperf.tar.gz" \
@@ -76,6 +73,8 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \
--upload ${BUILD_ROOT}/patches/tacker-vnffg-input-params.patch:/usr/lib/python2.7/site-packages/ \
--run-command "cd usr/lib/python2.7/site-packages/ && patch -p1 < tacker-vnffg-input-params.patch" \
+ --upload ${BUILD_ROOT}/patches/puppet-neutron-add-external_network_bridge-option.patch:/usr/share/openstack-puppet/modules/neutron/ \
+ --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-add-external_network_bridge-option.patch" \
-a overcloud-full_build.qcow2
# apply neutron port data plane status patches
@@ -104,8 +103,6 @@ done
rm -rf puppet-fdio
git clone https://git.fd.io/puppet-fdio
pushd puppet-fdio > /dev/null
-#TODO: Remove this when we update to 17.07
-git revert a6e575c8f0af17e62990653bcf4a12c688c21aad --no-edit
git archive --format=tar.gz --prefix=fdio/ HEAD > ${BUILD_DIR}/puppet-fdio.tar.gz
popd > /dev/null
@@ -118,6 +115,12 @@ enabled=1
gpgcheck=0
EOF
+vpp_nosdn_pkg_str=''
+for package in ${nosdn_vpp_rpms[@]}; do
+ wget $package
+ vpp_nosdn_pkg_str+=" --upload ${BUILD_DIR}/${package##*/}:/root/nosdn_vpp_rpms"
+done
+
# Kubernetes Repo
cat > ${BUILD_DIR}/kubernetes.repo << EOF
[kubernetes]
@@ -133,13 +136,6 @@ EOF
# Get Real Time Kernel from kvm4nfv
populate_cache $kvmfornfv_uri_base/$kvmfornfv_kernel_rpm
-# packages frozen for fdio scenarios
-fdio_pkg_str=''
-for package in ${fdio_pkgs[@]}; do
- wget "$package"
- fdio_pkg_str+=" --upload ${BUILD_DIR}/${package##*/}:/root/fdio/"
-done
-
# upload dpdk rpms but do not install
# install fd.io yum repo and packages
# upload puppet fdio
@@ -151,16 +147,19 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/puppet-fdio.tar.gz:/etc/puppet/modules \
--run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \
--upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/ \
+ --run-command "mkdir /root/nosdn_vpp_rpms" \
+ $vpp_nosdn_pkg_str \
--upload ${BUILD_DIR}/kubernetes.repo:/etc/yum.repos.d/ \
--run-command "mkdir /root/fdio" \
- --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
- $fdio_pkg_str \
- --run-command "yum install -y /root/fdio/*.rpm" \
+ --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/nosdn_vpp_rpms \
+ --install honeycomb \
+ --install vpp-plugins,vpp,vpp-lib,vpp-api-python \
+ --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
--run-command "curl -f https://copr.fedorainfracloud.org/coprs/leifmadsen/ovs-master/repo/epel-7/leifmadsen-ovs-master-epel-7.repo > /etc/yum.repos.d/leifmadsen-ovs-master-epel-7.repo" \
--run-command "mkdir /root/ovs28" \
--run-command "yumdownloader --destdir=/root/ovs28 openvswitch*2.8* python-openvswitch-2.8*" \
--upload ${CACHE_DIR}/$kvmfornfv_kernel_rpm:/root/ \
- --install python2-networking-sfc \
+ --install "http://mirror.centos.org/centos/7/cloud/x86_64/openstack-ocata/python2-networking-sfc-4.0.0-1.el7.noarch.rpm" \
--install python-etcd,puppet-etcd \
--install patch \
--install docker,kubelet,kubeadm,kubectl,kubernetes-cni \
diff --git a/build/overcloud-opendaylight.sh b/build/overcloud-opendaylight.sh
index c850005e..22a539af 100755
--- a/build/overcloud-opendaylight.sh
+++ b/build/overcloud-opendaylight.sh
@@ -35,6 +35,14 @@ enabled=1
gpgcheck=0
EOF
+cat > ${BUILD_DIR}/opendaylight_master.repo << EOF
+[opendaylight-master]
+name=OpenDaylight master repository
+baseurl=https://nexus.opendaylight.org/content/repositories/opendaylight-oxygen-epel-7-x86_64-devel/
+enabled=1
+gpgcheck=0
+EOF
+
# OpenDaylight Puppet Module
rm -rf puppet-opendaylight
git clone -b stable/carbon https://git.opendaylight.org/gerrit/integration/packaging/puppet-opendaylight
@@ -61,10 +69,12 @@ populate_cache http://artifacts.opnfv.org/apex/danube/fdio_netvirt/opendaylight-
# install ODL packages
# Patch in OPNFV custom puppet-tripleO
-# install Honeycomb
# install quagga/zrpc
# upload neutron patch for generic NS linux interface driver + OVS for external networks
LIBGUESTFS_BACKEND=direct virt-customize \
+ --upload ${BUILD_DIR}/opendaylight_master.repo:/etc/yum.repos.d/opendaylight.repo \
+ --run-command "mkdir -p /root/master" \
+ --run-command "yumdownloader --destdir=/root/master opendaylight" \
--upload ${BUILD_DIR}/opendaylight_nitrogen.repo:/etc/yum.repos.d/opendaylight.repo \
--run-command "mkdir -p /root/nitrogen" \
--run-command "yum install --downloadonly --downloaddir=/root/nitrogen opendaylight" \
@@ -90,12 +100,8 @@ if [ "$(uname -i)" == 'x86_64' ]; then
# Download quagga/zrpc rpms
populate_cache http://artifacts.opnfv.org/apex/danube/quagga/quagga-3.tar.gz
-# Download Honeycomb
-populate_cache $honeycomb_pkg
LIBGUESTFS_BACKEND=direct virt-customize \
- --upload ${CACHE_DIR}/${honeycomb_pkg##*/}:/root/fdio/ \
- --run-command "yum install -y /root/fdio/${honeycomb_pkg##*/}" \
--install zeromq-4.1.4 \
--upload ${CACHE_DIR}/quagga-3.tar.gz:/root/ \
--run-command "cd /root/ && tar xzf quagga-3.tar.gz" \
diff --git a/build/patches/puppet-neutron-add-external_network_bridge-option.patch b/build/patches/puppet-neutron-add-external_network_bridge-option.patch
new file mode 100644
index 00000000..808bcae0
--- /dev/null
+++ b/build/patches/puppet-neutron-add-external_network_bridge-option.patch
@@ -0,0 +1,106 @@
+From 48a73ebfd382158b900c041952689128390d14f3 Mon Sep 17 00:00:00 2001
+From: Feng Pan <fpan@redhat.com>
+Date: Sat, 30 Sep 2017 01:02:27 -0400
+Subject: [PATCH] Add external_network_bridge config back
+
+Change-Id: I67582e2033eb1f849f7e76148f089a7f815d1f78
+---
+ manifests/agents/l3.pp | 13 +++++++++++++
+ manifests/agents/vpnaas.pp | 14 ++++++++++++++
+ ...d_external_network_bridge-options-613a8793ef13d761.yaml | 3 ---
+ 3 files changed, 27 insertions(+), 3 deletions(-)
+ delete mode 100644 releasenotes/notes/remove_deprecated_external_network_bridge-options-613a8793ef13d761.yaml
+
+diff --git a/manifests/agents/l3.pp b/manifests/agents/l3.pp
+index 651c7508..df25e3e3 100644
+--- a/manifests/agents/l3.pp
++++ b/manifests/agents/l3.pp
+@@ -92,6 +92,12 @@
+ # (optional) L3 agent extensions to enable.
+ # Defaults to $::os_service_default
+ #
++# === Deprecated Parameters
++#
++# [*external_network_bridge*]
++# (optional) Deprecated. The name of the external bridge
++# Defaults to $::os_service_default
++#
+ class neutron::agents::l3 (
+ $package_ensure = 'present',
+ $enabled = true,
+@@ -113,11 +119,17 @@ class neutron::agents::l3 (
+ $purge_config = false,
+ $availability_zone = $::os_service_default,
+ $extensions = $::os_service_default,
++ # DEPRECATED PARAMETERS
++ $external_network_bridge = $::os_service_default,
+ ) {
+
+ include ::neutron::deps
+ include ::neutron::params
+
++ if ! is_service_default ($external_network_bridge) {
++ warning('parameter external_network_bridge is deprecated')
++ }
++
+ resources { 'neutron_l3_agent_config':
+ purge => $purge_config,
+ }
+@@ -132,6 +144,7 @@ class neutron::agents::l3 (
+
+ neutron_l3_agent_config {
+ 'DEFAULT/debug': value => $debug;
++ 'DEFAULT/external_network_bridge': value => $external_network_bridge;
+ 'DEFAULT/interface_driver': value => $interface_driver;
+ 'DEFAULT/gateway_external_network_id': value => $gateway_external_network_id;
+ 'DEFAULT/handle_internal_only_routers': value => $handle_internal_only_routers;
+diff --git a/manifests/agents/vpnaas.pp b/manifests/agents/vpnaas.pp
+index 52eb4026..1bab10e7 100644
+--- a/manifests/agents/vpnaas.pp
++++ b/manifests/agents/vpnaas.pp
+@@ -45,12 +45,18 @@
+ # in the vpnaas config.
+ # Defaults to false.
+ #
++# === Deprecated Parameters
++#
++# [*external_network_bridge*]
++# (optional) Deprecated. Defaults to $::os_service_default
++#
+ class neutron::agents::vpnaas (
+ $package_ensure = present,
+ $enabled = true,
+ $manage_service = true,
+ $vpn_device_driver = 'neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver',
+ $interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver',
++ $external_network_bridge = $::os_service_default,
+ $ipsec_status_check_interval = $::os_service_default,
+ $purge_config = false,
+ ) {
+@@ -97,6 +103,14 @@ class neutron::agents::vpnaas (
+ 'DEFAULT/interface_driver': value => $interface_driver;
+ }
+
++ if ! is_service_default ($external_network_bridge) {
++ warning('parameter external_network_bridge is deprecated')
++ }
++
++ neutron_vpnaas_agent_config {
++ 'DEFAULT/external_network_bridge': value => $external_network_bridge;
++ }
++
+ if $::neutron::params::vpnaas_agent_package {
+ ensure_resource( 'package', 'neutron-vpnaas-agent', {
+ 'ensure' => $package_ensure,
+diff --git a/releasenotes/notes/remove_deprecated_external_network_bridge-options-613a8793ef13d761.yaml b/releasenotes/notes/remove_deprecated_external_network_bridge-options-613a8793ef13d761.yaml
+deleted file mode 100644
+index 10464012..00000000
+--- a/releasenotes/notes/remove_deprecated_external_network_bridge-options-613a8793ef13d761.yaml
++++ /dev/null
+@@ -1,3 +0,0 @@
+----
+-deprecations:
+- - Removed deprecated option external_network_bridge.
+--
+2.13.4
+
diff --git a/build/rpm_specs/networking-vpp.spec b/build/rpm_specs/networking-vpp.spec
index 4211f94a..80687836 100644
--- a/build/rpm_specs/networking-vpp.spec
+++ b/build/rpm_specs/networking-vpp.spec
@@ -2,7 +2,7 @@
Summary: OpenStack Networking for VPP
Name: python-networking-vpp
-Version: 0.0.1
+Version: 17.07
Release: %{release}%{?git}%{?dist}
License: Apache 2.0
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index c2e2f14e..51094a93 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -69,6 +69,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-bar-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-bar-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-calipso-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-ha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
@@ -86,8 +87,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl_netvirt-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-fdio_dvr-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml
%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml
@@ -112,6 +113,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Wed Sep 20 2017 Tim Rozet <trozet@redhat.com> - 5.0-7
+- Add calipso
* Fri Sep 08 2017 Tim Rozet <trozet@redhat.com> - 5.0-6
- Updates clean to use python
* Wed Aug 23 2017 Tim Rozet <trozet@redhat.com> - 5.0-5
diff --git a/build/undercloud.sh b/build/undercloud.sh
index bd494fb4..baaf424e 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -26,6 +26,9 @@ popd > /dev/null
# inject rt_kvm kernel rpm name into the enable file
sed "s/kvmfornfv_kernel.rpm/$kvmfornfv_kernel_rpm/" ${BUILD_ROOT}/enable_rt_kvm.yaml | tee ${BUILD_DIR}/enable_rt_kvm.yaml
+# grab latest calipso
+populate_cache $calipso_uri_base/$calipso_script
+
# Turn off GSSAPI Auth in sshd
# installing forked apex-tht
# enabling ceph OSDs to live on the controller
@@ -37,7 +40,9 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/apex-tripleo-heat-templates.tar.gz:/usr/share \
--install "openstack-utils" \
--install "ceph-common" \
- --install "python2-networking-sfc" \
+ --install "http://mirror.centos.org/centos/7/cloud/x86_64/openstack-ocata/python2-networking-sfc-4.0.0-1.el7.noarch.rpm" \
+ --install epel-release \
+ --install python34,python34-pip \
--install openstack-ironic-inspector,subunit-filters,docker-distribution,openstack-tripleo-validations \
--run-command "cd /usr/share && rm -rf openstack-tripleo-heat-templates && tar xzf apex-tripleo-heat-templates.tar.gz" \
--run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
@@ -45,17 +50,19 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
--upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/first-boot.yaml:/home/stack/ \
- --upload ${BUILD_ROOT}/kvm4nfv-1st-boot.yaml:/home/stack/ \
- --upload ${BUILD_DIR}/enable_rt_kvm.yaml:/home/stack/ \
+ --upload ${BUILD_DIR}/enable_rt_kvm.yaml:/usr/share/openstack-tripleo-heat-templates/environments/ \
--upload ${BUILD_ROOT}/ovs-dpdk-preconfig.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/baremetal-environment.yaml:/home/stack/ \
--uninstall "libvirt-client" \
+ --upload ${CACHE_DIR}/${calipso_script}:/root/ \
--install "libguestfs-tools" \
--install "python-tackerclient" \
--upload ${BUILD_ROOT}/patches/tacker-client-fix-symmetrical.patch:/usr/lib/python2.7/site-packages/ \
--run-command "cd usr/lib/python2.7/site-packages/ && patch -p1 < tacker-client-fix-symmetrical.patch" \
+ --run-command "yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" \
+ --install yum-utils,lvm2,device-mapper-persistent-data \
-a undercloud_build.qcow2
mv -f undercloud_build.qcow2 undercloud.qcow2
diff --git a/build/variables.sh b/build/variables.sh
index 8d736d30..b0713268 100644
--- a/build/variables.sh
+++ b/build/variables.sh
@@ -38,18 +38,19 @@ dpdk_rpms=(
kvmfornfv_uri_base="http://artifacts.opnfv.org/kvmfornfv/danube"
kvmfornfv_kernel_rpm="kvmfornfv-4bfeded9-apex-kernel-4.4.50_rt62_centos.x86_64.rpm"
+calipso_uri_base="https://git.opnfv.org/calipso/plain/app/install"
+calipso_script="calipso-installer.py"
+
netvpp_repo="https://github.com/openstack/networking-vpp"
-netvpp_branch="master"
+netvpp_branch="17.07"
netvpp_commit=$(git ls-remote ${netvpp_repo} ${netvpp_branch} | awk '{print substr($1,1,7)}')
-netvpp_pkg=python-networking-vpp-0.0.1-1.git${NETVPP_COMMIT}$(rpm -E %dist).noarch.rpm
+netvpp_pkg=python-networking-vpp-17.07-1.git${NETVPP_COMMIT}$(rpm -E %dist).noarch.rpm
gluon_rpm=gluon-0.0.1-1_20170302.noarch.rpm
-fdio_pkgs=(
-'http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/vpp-17.04.1-3~ge3b7ad7~b72.x86_64.rpm'
-'http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/vpp-api-python-17.04.1-3~ge3b7ad7~b72.x86_64.rpm'
-'http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/vpp-lib-17.04.1-3~ge3b7ad7~b72.x86_64.rpm'
-'http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/vpp-plugins-17.04.1-3~ge3b7ad7~b72.x86_64.rpm'
-)
-
-honeycomb_pkg='http://artifacts.opnfv.org/apex/danube/fdio_common_rpms/honeycomb-1.17.04.1-2073.noarch.rpm'
+nosdn_vpp_rpms=(
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp/17.07.01-release.x86_64/vpp-17.07.01-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-api-python/17.07.01-release.x86_64/vpp-api-python-17.07.01-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-lib/17.07.01-release.x86_64/vpp-lib-17.07.01-release.x86_64.rpm'
+'https://nexus.fd.io/content/repositories/fd.io.centos7/io/fd/vpp/vpp-plugins/17.07.01-release.x86_64/vpp-plugins-17.07.01-release.x86_64.rpm'
+) \ No newline at end of file
diff --git a/config/deploy/os-nosdn-calipso-noha.yaml b/config/deploy/os-nosdn-calipso-noha.yaml
new file mode 100644
index 00000000..ce5c8a5e
--- /dev/null
+++ b/config/deploy/os-nosdn-calipso-noha.yaml
@@ -0,0 +1,9 @@
+---
+global_params:
+ ha_enabled: false
+
+deploy_options:
+ sdn_controller: false
+ sfc: false
+ vpn: false
+ calipso: true
diff --git a/config/deploy/os-odl-fdio-ha.yaml b/config/deploy/os-odl-fdio-ha.yaml
index c02b22be..2125265e 100644
--- a/config/deploy/os-odl-fdio-ha.yaml
+++ b/config/deploy/os-odl-fdio-ha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
+ odl_version: oxygen
odl_vpp_routing_node: overcloud-novacompute-0
tacker: true
congress: true
diff --git a/config/deploy/os-odl-fdio-noha.yaml b/config/deploy/os-odl-fdio-noha.yaml
index 4aab0520..17eea880 100644
--- a/config/deploy/os-odl-fdio-noha.yaml
+++ b/config/deploy/os-odl-fdio-noha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
+ odl_version: oxygen
odl_vpp_routing_node: overcloud-novacompute-0
tacker: true
congress: true
diff --git a/config/deploy/os-odl-fdio-dvr-ha.yaml b/config/deploy/os-odl-fdio_dvr-ha.yaml
index 6fcbec65..0304fa8b 100644
--- a/config/deploy/os-odl-fdio-dvr-ha.yaml
+++ b/config/deploy/os-odl-fdio_dvr-ha.yaml
@@ -4,8 +4,8 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
- odl_vpp_routing_node: dvr
+ odl_version: oxygen
+ dvr: true
tacker: true
congress: true
sfc: false
diff --git a/config/deploy/os-odl-fdio-dvr-noha.yaml b/config/deploy/os-odl-fdio_dvr-noha.yaml
index e8788d71..9424c080 100644
--- a/config/deploy/os-odl-fdio-dvr-noha.yaml
+++ b/config/deploy/os-odl-fdio_dvr-noha.yaml
@@ -4,8 +4,8 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
- odl_vpp_routing_node: dvr
+ odl_version: oxygen
+ dvr: true
tacker: true
congress: true
sfc: false
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
index d6b8805a..bd62f9cf 100644
--- a/lib/ansible/playbooks/post_deploy_undercloud.yml
+++ b/lib/ansible/playbooks/post_deploy_undercloud.yml
@@ -115,3 +115,41 @@
become_user: stack
when: congress
with_items: "{{ congress_datasources }}"
+ - name: Configure Calipso
+ block:
+ - name: Install Calipso dependencies
+ pip:
+ name: "{{ item }}"
+ executable: pip3
+ with_items:
+ - docker
+ - pymongo
+ - name: Create Calipso user
+ user:
+ name: calipso
+ createhome: yes
+ - name: Remove old docker
+ package:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - docker
+ - docker-common
+ - docker-selinux
+ - docker-engine
+ - name: Install Docker CE
+ package:
+ name: docker-ce
+ state: latest
+ - name: Start Docker
+ service:
+ name: docker
+ state: started
+ enabled: yes
+ - name: Install Calipso
+ command: >
+ python3 /root/calipso-installer.py --command start-all
+ --copy q --hostname {{ calipso_ip }} --dbport 37017 --webport 81
+ --apiport 8001 --rabbitmport 15673
+ become: yes
+ when: calipso
diff --git a/tox.ini b/tox.ini
index 87b6c035..cde191cb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -13,7 +13,7 @@ commands =
--cover-tests \
--cover-package=apex \
--cover-xml \
- --cover-min-percentage 90 \
+ --cover-min-percentage 94 \
apex/tests
coverage report