summaryrefslogtreecommitdiffstats
path: root/apex
diff options
context:
space:
mode:
Diffstat (limited to 'apex')
-rw-r--r--apex/deploy.py17
-rw-r--r--apex/overcloud/config.py12
-rw-r--r--apex/overcloud/overcloud_deploy.py132
-rw-r--r--apex/settings/deploy_settings.py9
-rw-r--r--apex/tests/test_apex_undercloud.py196
-rw-r--r--apex/tests/test_apex_virtual_configure_vm.py102
-rw-r--r--apex/tests/test_apex_virtual_utils.py101
-rw-r--r--apex/undercloud/undercloud.py2
-rw-r--r--apex/virtual/utils.py (renamed from apex/virtual/virtual_utils.py)8
9 files changed, 519 insertions, 60 deletions
diff --git a/apex/deploy.py b/apex/deploy.py
index a0561384..5ec0f7fa 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -20,7 +20,7 @@ import sys
import tempfile
import apex.virtual.configure_vm as vm_lib
-import apex.virtual.virtual_utils as virt_utils
+import apex.virtual.utils as virt_utils
from apex import DeploySettings
from apex import Inventory
from apex import NetworkEnvironment
@@ -58,6 +58,14 @@ def validate_cross_settings(deploy_settings, net_settings, inventory):
raise ApexDeployException("Setting a DPDK based dataplane requires"
"a dedicated NIC for tenant network")
+ if 'odl_vpp_routing_node' in deploy_settings['deploy_options']:
+ if deploy_settings['deploy_options']['dataplane'] != 'fdio':
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dataplane is set to fdio")
+ if deploy_settings['deploy_options'].get('dvr') is True:
+ raise ApexDeployException("odl_vpp_routing_node should only be set"
+ "when dvr is not enabled")
+
# TODO(trozet): add more checks here like RAM for ODL, etc
# check if odl_vpp_netvirt is true and vpp is set
# Check if fdio and nosdn:
@@ -336,8 +344,8 @@ def main():
overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
root_pw=root_pw)
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
- overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
- net_env_target, APEX_TEMP_DIR)
+ overcloud_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
inventory, APEX_TEMP_DIR,
args.virtual, args.env_file)
@@ -401,6 +409,9 @@ def main():
deploy_vars['congress'] = True
else:
deploy_vars['congress'] = False
+ deploy_vars['calipso'] = ds_opts.get('calipso', False)
+ deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
+ 'installer_vm']['ip']
# TODO(trozet): this is probably redundant with getting external
# network info from undercloud.py
if 'external' in net_settings.enabled_network_list:
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
index e48b254f..a7f7d848 100644
--- a/apex/overcloud/config.py
+++ b/apex/overcloud/config.py
@@ -44,10 +44,14 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
ovs_dpdk_br = ''
if ds['dataplane'] == 'fdio':
nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
- if ds['sdn_controller'] == 'opendaylight' and role == 'compute':
- nets['external'][0]['nic_mapping'][role]['phys_type'] = \
- 'vpp_interface'
- ext_net = 'vpp_interface'
+ if ds['sdn_controller'] == 'opendaylight':
+ if role == 'compute':
+ nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+ 'vpp_interface'
+ ext_net = 'vpp_interface'
+ if ds.get('dvr') is True:
+ nets['admin']['nic_mapping'][role]['phys_type'] = \
+ 'linux_bridge'
elif ds['dataplane'] == 'ovs_dpdk':
ovs_dpdk_br = 'br-phy'
if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py
index f7a8b954..e3248536 100644
--- a/apex/overcloud/overcloud_deploy.py
+++ b/apex/overcloud/overcloud_deploy.py
@@ -20,7 +20,7 @@ import time
from apex.common import constants as con
from apex.common.exceptions import ApexDeployException
from apex.common import parsers
-from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import utils as virt_utils
from cryptography.hazmat.primitives import serialization as \
crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
@@ -35,6 +35,7 @@ SDN_FILE_MAP = {
'gluon': 'gluon.yaml',
'vpp': {
'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
+ 'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
'default': 'neutron-opendaylight.yaml',
@@ -92,6 +93,34 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
return env_list
+def _get_node_counts(inventory):
+ """
+ Return numbers of controller and compute nodes in inventory
+
+ :param inventory: node inventory data structure
+ :return: number of controller and compute nodes in inventory
+ """
+ if not inventory:
+ raise ApexDeployException("Empty inventory")
+
+ nodes = inventory['nodes']
+ num_control = 0
+ num_compute = 0
+ for node in nodes:
+ if node['capabilities'] == 'profile:control':
+ num_control += 1
+ elif node['capabilities'] == 'profile:compute':
+ num_compute += 1
+ else:
+ # TODO(trozet) do we want to allow capabilities to not exist?
+ logging.error("Every node must include a 'capabilities' key "
+ "tagged with either 'profile:control' or "
+ "'profile:compute'")
+ raise ApexDeployException("Node missing capabilities "
+ "key: {}".format(node))
+ return num_control, num_compute
+
+
def create_deploy_cmd(ds, ns, inv, tmp_dir,
virtual, env_file='opnfv-environment.yaml'):
@@ -100,7 +129,6 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
ds_opts = ds['deploy_options']
deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
- # TODO(trozet): make sure rt kvm file is in tht dir
for k, v in OTHER_FILE_MAP.items():
if k in ds_opts and ds_opts[k]:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
@@ -118,21 +146,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append('baremetal-environment.yaml')
- nodes = inv['nodes']
- num_control = 0
- num_compute = 0
- for node in nodes:
- if 'profile:control' in node['capabilities']:
- num_control += 1
- elif 'profile:compute' in node['capabilities']:
- num_compute += 1
- else:
- # TODO(trozet) do we want to allow capabilities to not exist?
- logging.error("Every node must include a 'capabilities' key "
- "tagged with either 'profile:control' or "
- "'profile:compute'")
- raise ApexDeployException("Node missing capabilities "
- "key: {}".format(node))
+ num_control, num_compute = _get_node_counts(inv)
if num_control == 0 or num_compute == 0:
logging.error("Detected 0 control or compute nodes. Control nodes: "
"{}, compute nodes{}".format(num_control, num_compute))
@@ -230,18 +244,33 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
virt_cmds.append(
{con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
"-p1 < neutron-patch-NSDriver.patch"})
+ if sdn is False:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"},
+ {con.VIRT_RUN_CMD: "yum install -y "
+ "/root/nosdn_vpp_rpms/*.rpm"}
+ ])
if sdn == 'opendaylight':
if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
virt_cmds.extend([
{con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])},
{con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
"/root/puppet-opendaylight-"
"{}.tar.gz".format(ds_opts['odl_version'])}
])
+ if ds_opts['odl_version'] == 'master':
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
+ ds_opts['odl_version'])}
+ ])
+ else:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+ ds_opts['odl_version'])}
+ ])
+
elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
and ds_opts['odl_vpp_netvirt']:
virt_cmds.extend([
@@ -285,15 +314,15 @@ def make_ssh_key():
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
- pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
- return private_key.decode('utf-8'), pub_key
+ return private_key.decode('utf-8'), public_key.decode('utf-8')
-def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
+def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
"""
Creates modified opnfv/network environments for deployment
:param ds: deploy settings
:param ns: network settings
+ :param inv: node inventory
:param opnfv_env: file path for opnfv-environment file
:param net_env: file path for network-environment file
:param tmp_dir: Apex tmp dir
@@ -346,15 +375,18 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if 'CloudDomain' in line:
output_line = " CloudDomain: {}".format(ns['domain_name'])
elif 'replace_private_key' in line:
- output_line = " key: '{}'".format(private_key)
+ output_line = " private_key: |\n"
+ key_out = ''
+ for line in private_key.splitlines():
+ key_out += " {}\n".format(line)
+ output_line += key_out
elif 'replace_public_key' in line:
- output_line = " key: '{}'".format(public_key)
+ output_line = " public_key: '{}'".format(public_key)
if ds_opts['sdn_controller'] == 'opendaylight' and \
- 'odl_vpp_routing_node' in ds_opts and ds_opts[
- 'odl_vpp_routing_node'] != 'dvr':
+ 'odl_vpp_routing_node' in ds_opts:
if 'opendaylight::vpp_routing_node' in line:
- output_line = (" opendaylight::vpp_routing_node: ${}.${}"
+ output_line = (" opendaylight::vpp_routing_node: {}.{}"
.format(ds_opts['odl_vpp_routing_node'],
ns['domain_name']))
elif 'ControllerExtraConfig' in line:
@@ -373,6 +405,17 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if 'NeutronVPPAgentPhysnets' in line:
output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
format(tenant_ctrl_nic))
+ elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
+ 'dvr') is True:
+ if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
+ output_line = ''
+ elif 'NeutronDhcpAgentsPerNetwork' in line:
+ num_control, num_compute = _get_node_counts(inv)
+ output_line = (" NeutronDhcpAgentsPerNetwork: {}"
+ .format(num_compute))
+ elif 'ComputeServices' in line:
+ output_line = (" ComputeServices:\n"
+ " - OS::TripleO::Services::NeutronDhcpAgent")
if perf:
for role in 'NovaCompute', 'Controller':
@@ -402,10 +445,10 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
if kernel_args:
output_line = " ComputeKernelArgs: '{}'".\
format(kernel_args)
- elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
+ if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
for k, v in OVS_PERF_MAP.items():
if k in line and v in perf_ovs_comp:
- output_line = " {}: {}".format(k, perf_ovs_comp[v])
+ output_line = " {}: '{}'".format(k, perf_ovs_comp[v])
print(output_line)
@@ -414,23 +457,20 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
# Modify Network environment
for line in fileinput.input(net_env, inplace=True):
line = line.strip('\n')
- if ds_opts['dataplane'] == 'ovs_dpdk':
- if 'ComputeExtraConfigPre' in line:
- print(' OS::TripleO::ComputeExtraConfigPre: '
- './ovs-dpdk-preconfig.yaml')
- else:
- print(line)
- elif perf and perf_kern_comp:
- if 'resource_registry' in line:
- print("resource_registry:\n"
- " OS::TripleO::NodeUserData: first-boot.yaml")
- elif 'NovaSchedulerDefaultFilters' in line:
- print(" NovaSchedulerDefaultFilters: 'RamFilter,"
- "ComputeFilter,AvailabilityZoneFilter,"
- "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
- "NUMATopologyFilter'")
- else:
- print(line)
+ if 'ComputeExtraConfigPre' in line and \
+ ds_opts['dataplane'] == 'ovs_dpdk':
+ print(' OS::TripleO::ComputeExtraConfigPre: '
+ './ovs-dpdk-preconfig.yaml')
+ elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
+ 'resource_registry' in line:
+ print("resource_registry:\n"
+ " OS::TripleO::NodeUserData: first-boot.yaml")
+ elif perf and perf_kern_comp and \
+ 'NovaSchedulerDefaultFilters' in line:
+ print(" NovaSchedulerDefaultFilters: 'RamFilter,"
+ "ComputeFilter,AvailabilityZoneFilter,"
+ "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
+ "NUMATopologyFilter'")
else:
print(line)
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index 793e43ac..c0594056 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -31,13 +31,15 @@ OPT_DEPLOY_SETTINGS = ['performance',
'yardstick',
'dovetail',
'odl_vpp_routing_node',
+ 'dvr',
'odl_vpp_netvirt',
- 'barometer']
+ 'barometer',
+ 'calipso']
VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'master']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
class DeploySettings(dict):
@@ -110,6 +112,9 @@ class DeploySettings(dict):
"Invalid ODL version: {}".format(self[deploy_options][
'odl_version']))
+ if self['deploy_options']['odl_version'] == 'oxygen':
+ self['deploy_options']['odl_version'] = 'master'
+
if 'performance' in deploy_options:
if not isinstance(deploy_options['performance'], dict):
raise DeploySettingsException("Performance deploy_option"
diff --git a/apex/tests/test_apex_undercloud.py b/apex/tests/test_apex_undercloud.py
new file mode 100644
index 00000000..9458bf9f
--- /dev/null
+++ b/apex/tests/test_apex_undercloud.py
@@ -0,0 +1,196 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import os
+import subprocess
+import unittest
+
+from mock import patch
+from mock import MagicMock
+
+from apex.common import constants
+from apex.undercloud.undercloud import Undercloud
+from apex.undercloud.undercloud import ApexUndercloudException
+
+from nose.tools import (
+ assert_regexp_matches,
+ assert_raises,
+ assert_true,
+ assert_equal)
+
+
+class TestUndercloud(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_init(self, mock_get_vm, mock_create):
+ Undercloud('img_path', 'tplt_path')
+ mock_create.assert_called()
+
+ @patch.object(Undercloud, '_get_vm', return_value=object())
+ @patch.object(Undercloud, 'create')
+ def test_init_uc_exists(self, mock_get_vm, mock_create):
+ assert_raises(ApexUndercloudException,
+ Undercloud, 'img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.libvirt.open')
+ @patch.object(Undercloud, 'create')
+ def test_get_vm_exists(self, mock_create, mock_libvirt):
+ assert_raises(ApexUndercloudException,
+ Undercloud, 'img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.libvirt.open')
+ @patch.object(Undercloud, 'create')
+ def test_get_vm_not_exists(self, mock_create, mock_libvirt):
+ conn = mock_libvirt.return_value
+ conn.lookupByName.side_effect = libvirt.libvirtError('defmsg')
+ Undercloud('img_path', 'tplt_path')
+
+ @patch('apex.undercloud.undercloud.vm_lib')
+ @patch.object(Undercloud, 'inject_auth', return_value=None)
+ @patch.object(Undercloud, 'setup_volumes', return_value=None)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ def test_create(self, mock_get_vm, mock_setup_vols,
+ mock_inject_auth, mock_vm_lib):
+ Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_inject_auth.assert_called()
+ mock_setup_vols.assert_called()
+ mock_inject_auth.assert_called()
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_set_ip(self, mock_get_vm, mock_create):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ if_addrs = {'item1': {'addrs': [{'type': libvirt.VIR_IP_ADDR_TYPE_IPV4,
+ 'addr': 'ipaddress'}]},
+ 'item2': {'addrs': [{'type': libvirt.VIR_IP_ADDR_TYPE_IPV4,
+ 'addr': 'ipaddress'}]}}
+ uc.vm.interfaceAddresses.return_value = if_addrs
+ assert_true(uc._set_ip())
+
+ @patch('apex.undercloud.undercloud.time.sleep')
+ @patch.object(Undercloud, '_set_ip', return_value=False)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_start(self, mock_create, mock_get_vm,
+ mock_set_ip, mock_time):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ uc.vm.isActive.return_value = False
+ mock_set_ip.return_value = True
+ uc.start()
+
+ @patch('apex.undercloud.undercloud.time.sleep')
+ @patch.object(Undercloud, '_set_ip', return_value=False)
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_start_no_ip(self, mock_create, mock_get_vm,
+ mock_set_ip, mock_time):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.vm = MagicMock()
+ uc.vm.isActive.return_value = True
+ mock_set_ip.return_value = False
+ assert_raises(ApexUndercloudException, uc.start)
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_configure(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ ns = MagicMock()
+ uc.configure(ns, 'playbook', '/tmp/dir')
+
+ @patch('apex.undercloud.undercloud.utils')
+ @patch.object(Undercloud, 'generate_config', return_value={})
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_configure_raises(self, mock_create, mock_get_vm,
+ mock_generate_config, mock_utils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ ns = MagicMock()
+ subps_err = subprocess.CalledProcessError(1, 'cmd')
+ mock_utils.run_ansible.side_effect = subps_err
+ assert_raises(ApexUndercloudException,
+ uc.configure, ns, 'playbook', '/tmp/dir')
+
+ @patch('apex.undercloud.undercloud.os.remove')
+ @patch('apex.undercloud.undercloud.os.path')
+ @patch('apex.undercloud.undercloud.shutil')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_setup_vols(self, mock_get_vm, mock_create,
+ mock_shutil, mock_os_path, mock_os_remove):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_os_path.isfile.return_value = True
+ mock_os_path.exists.return_value = True
+ uc.setup_volumes()
+ for img_file in ('overcloud-full.vmlinuz', 'overcloud-full.initrd',
+ 'undercloud.qcow2'):
+ src_img = os.path.join(uc.image_path, img_file)
+ dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
+ mock_shutil.copyfile.assert_called_with(src_img, dest_img)
+
+ @patch('apex.undercloud.undercloud.os.path')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_setup_vols_raises(self, mock_get_vm, mock_create, mock_os_path):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ mock_os_path.isfile.return_value = False
+ assert_raises(ApexUndercloudException, uc.setup_volumes)
+
+ @patch('apex.undercloud.undercloud.virt_utils')
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_inject_auth(self, mock_get_vm, mock_create, mock_vutils):
+ uc = Undercloud('img_path', 'tplt_path', external_network=True)
+ uc.root_pw = 'test'
+ uc.inject_auth()
+ test_ops = [{'--root-password': 'password:test'},
+ {'--run-command': 'mkdir -p /root/.ssh'},
+ {'--upload':
+ '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'},
+ {'--run-command': 'chmod 600 /root/.ssh/authorized_keys'},
+ {'--run-command': 'restorecon /root/.ssh/authorized_keys'},
+ {'--run-command':
+ 'cp /root/.ssh/authorized_keys /home/stack/.ssh/'},
+ {'--run-command':
+ 'chown stack:stack /home/stack/.ssh/authorized_keys'},
+ {'--run-command':
+ 'chmod 600 /home/stack/.ssh/authorized_keys'}]
+ mock_vutils.virt_customize.assert_called_with(test_ops, uc.volume)
+
+ @patch.object(Undercloud, '_get_vm', return_value=None)
+ @patch.object(Undercloud, 'create')
+ def test_generate_config(self, mock_get_vm, mock_create):
+ ns_net = MagicMock()
+ ns_net.__getitem__.side_effect = \
+ lambda i: '1234/24' if i is 'cidr' else MagicMock()
+ ns = {'apex': MagicMock(),
+ 'dns-domain': 'dns',
+ 'networks': {'admin': ns_net,
+ 'external': [ns_net]}}
+
+ Undercloud('img_path', 'tplt_path').generate_config(ns)
diff --git a/apex/tests/test_apex_virtual_configure_vm.py b/apex/tests/test_apex_virtual_configure_vm.py
new file mode 100644
index 00000000..228e06d6
--- /dev/null
+++ b/apex/tests/test_apex_virtual_configure_vm.py
@@ -0,0 +1,102 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import unittest
+
+from mock import patch
+
+from apex.virtual.configure_vm import generate_baremetal_macs
+from apex.virtual.configure_vm import create_vm_storage
+from apex.virtual.configure_vm import create_vm
+
+from nose.tools import (
+ assert_regexp_matches,
+ assert_raises,
+ assert_equal)
+
+
+class TestVirtualConfigureVM(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_generate_baremetal_macs(self):
+ assert_regexp_matches(generate_baremetal_macs()[0],
+ '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$')
+
+ def test_generate_baremetal_macs_alot(self):
+ assert_equal(len(generate_baremetal_macs(127)), 127)
+
+ def test_generate_baremetal_macs_too_many(self):
+ assert_raises(ValueError, generate_baremetal_macs, 128)
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.isActive.return_value = 0
+ # execute
+ create_vm_storage('test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_pool_none(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ conn.storagePoolLookupByName.return_value = None
+ # execute
+ assert_raises(Exception, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_libvirt_error(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.storageVolLookupByName.side_effect = libvirt.libvirtError('ermsg')
+ # execute
+ assert_raises(libvirt.libvirtError, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ def test_create_vm_storage_new_vol_none(self, mock_libvirt_open):
+ # setup mock
+ conn = mock_libvirt_open.return_value
+ pool = conn.storagePoolLookupByName.return_value
+ pool.createXML.return_value = None
+ # execute
+ assert_raises(Exception, create_vm_storage, 'test')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', default_network=True,
+ direct_boot=True, kernel_args='test', template_dir='./build')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm_x86_64(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', arch='x86_64', template_dir='./build')
+
+ @patch('apex.virtual.configure_vm.libvirt.open')
+ @patch('apex.virtual.configure_vm.create_vm_storage')
+ def test_create_vm_aarch64(self, mock_create_vm_storage,
+ mock_libvirt_open):
+ create_vm('test', 'image', arch='aarch64', template_dir='./build')
diff --git a/apex/tests/test_apex_virtual_utils.py b/apex/tests/test_apex_virtual_utils.py
new file mode 100644
index 00000000..643069f3
--- /dev/null
+++ b/apex/tests/test_apex_virtual_utils.py
@@ -0,0 +1,101 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import subprocess
+import unittest
+
+from mock import patch
+
+from apex.virtual.utils import DEFAULT_VIRT_IP
+from apex.virtual.utils import get_virt_ip
+from apex.virtual.utils import generate_inventory
+from apex.virtual.utils import host_setup
+from apex.virtual.utils import virt_customize
+
+from nose.tools import (
+ assert_is_instance,
+ assert_regexp_matches,
+ assert_raises,
+ assert_equal)
+
+
+class TestVirtualUtils(unittest.TestCase):
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip(self, mock_subprocess):
+ mock_subprocess.return_value = '<xml></xml>'
+ assert_equal(get_virt_ip(), DEFAULT_VIRT_IP)
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip_not_default(self, mock_subprocess):
+ mock_subprocess.return_value = '''<xml>
+<ip address='1.2.3.4' netmask='255.255.255.0'/>
+</xml>'''
+ assert_equal(get_virt_ip(), '1.2.3.4')
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_get_virt_ip_raises(self, mock_subprocess):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_equal(get_virt_ip(), DEFAULT_VIRT_IP)
+
+ @patch('apex.virtual.utils.common_utils')
+ def test_generate_inventory(self, mock_common_utils):
+ assert_is_instance(generate_inventory('target_file'), dict)
+
+ @patch('apex.virtual.utils.common_utils')
+ def test_generate_inventory_ha_enabled(self, mock_common_utils):
+ assert_is_instance(generate_inventory('target_file', ha_enabled=True),
+ dict)
+
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup(self, mock_vbmc_lib, mock_subprocess, mock_iptc):
+ host_setup({'test': 2468})
+ mock_subprocess.assert_called_with(['vbmc', 'start', 'test'])
+
+ @patch('apex.virtual.utils.iptc')
+ @patch('apex.virtual.utils.subprocess.check_call')
+ @patch('apex.virtual.utils.vbmc_lib')
+ def test_host_setup_raise_called_process_error(self, mock_vbmc_lib,
+ mock_subprocess, mock_iptc):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_raises(subprocess.CalledProcessError, host_setup, {'tst': 2468})
+
+ @patch('apex.virtual.utils.os.path')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize(self, mock_subprocess, mock_os_path):
+ virt_customize([{'--operation': 'arg'}], 'target')
+
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize_file_not_found(self, mock_subprocess):
+ assert_raises(FileNotFoundError,
+ virt_customize,
+ [{'--operation': 'arg'}], 'target')
+
+ @patch('apex.virtual.utils.os.path')
+ @patch('apex.virtual.utils.subprocess.check_output')
+ def test_virt_customize_raises(self, mock_subprocess, mock_os_path):
+ mock_subprocess.side_effect = subprocess.CalledProcessError(1, 'cmd')
+ assert_raises(subprocess.CalledProcessError,
+ virt_customize,
+ [{'--operation': 'arg'}], 'target')
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index 7b7c35f0..50035638 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -15,7 +15,7 @@ import shutil
import subprocess
import time
-from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import utils as virt_utils
from apex.virtual import configure_vm as vm_lib
from apex.common import constants
from apex.common import utils
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/utils.py
index 1fe2c399..226af1b5 100644
--- a/apex/virtual/virtual_utils.py
+++ b/apex/virtual/utils.py
@@ -16,7 +16,7 @@ import pprint
import subprocess
import xml.etree.ElementTree as ET
-from apex.common import utils
+from apex.common import utils as common_utils
from apex.virtual import configure_vm as vm_lib
from virtualbmc import manager as vbmc_lib
@@ -39,7 +39,7 @@ def get_virt_ip():
tree = ET.fromstring(virsh_net_xml)
ip_tag = tree.find('ip')
- if ip_tag:
+ if ip_tag is not None:
virsh_ip = ip_tag.get('address')
if virsh_ip:
logging.debug("Detected virsh default network ip: "
@@ -95,9 +95,9 @@ def generate_inventory(target_file, ha_enabled=False, num_computes=1,
tmp_node['memory'] = compute_ram
inv_output['nodes']['node{}'.format(idx)] = copy.deepcopy(tmp_node)
- utils.dump_yaml(inv_output, target_file)
-
+ common_utils.dump_yaml(inv_output, target_file)
logging.info('Virtual environment file created: {}'.format(target_file))
+ return inv_output
def host_setup(node):