summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--apex/clean.py30
-rw-r--r--apex/deploy.py2
-rw-r--r--apex/inventory/inventory.py20
-rw-r--r--apex/tests/config/inventory-virt-1-node.yaml13
-rw-r--r--apex/tests/config/inventory-virt.yaml25
-rw-r--r--apex/tests/test_apex_clean.py12
-rw-r--r--apex/tests/test_apex_inventory.py33
-rw-r--r--apex/virtual/virtual_utils.py27
-rwxr-xr-xbuild/overcloud-full.sh14
9 files changed, 142 insertions, 34 deletions
diff --git a/apex/clean.py b/apex/clean.py
index 9d0e648e..0b1be860 100644
--- a/apex/clean.py
+++ b/apex/clean.py
@@ -87,12 +87,36 @@ def clean_vms():
def clean_ssh_keys(key_file='/root/.ssh/authorized_keys'):
logging.info('Removing any stack pub keys from root authorized keys')
+ if not os.path.isfile(key_file):
+ logging.warning("Key file does not exist: ".format(key_file))
+ return
for line in fileinput.input(key_file, inplace=True):
line = line.strip('\n')
if 'stack@undercloud' not in line:
print(line)
+def clean_networks():
+ logging.debug('Cleaning all network config')
+ for network in constants.OPNFV_NETWORK_TYPES:
+ logging.info("Cleaning Jump Host Network config for network "
+ "{}".format(network))
+ jumphost.detach_interface_from_ovs(network)
+ jumphost.remove_ovs_bridge(network)
+
+ conn = libvirt.open('qemu:///system')
+ if not conn:
+ raise ApexCleanException('Unable to open libvirt connection')
+ logging.debug('Destroying all virsh networks')
+ for network in conn.listNetworks():
+ if network in constants.OPNFV_NETWORK_TYPES:
+ virsh_net = conn.networkLookupByName(network)
+ logging.debug("Destroying virsh network: {}".format(network))
+ if virsh_net.isActive():
+ virsh_net.destroy()
+ virsh_net.undefine()
+
+
def main():
clean_parser = argparse.ArgumentParser()
clean_parser.add_argument('-i',
@@ -123,11 +147,7 @@ def main():
# Delete vbmc
clean_vbmcs()
# Clean network config
- for network in constants.ADMIN_NETWORK, constants.EXTERNAL_NETWORK:
- logging.info("Cleaning Jump Host Network config for network "
- "{}".format(network))
- jumphost.detach_interface_from_ovs(network)
- jumphost.remove_ovs_bridge(network)
+ clean_networks()
# clean pub keys from root's auth keys
clean_ssh_keys()
diff --git a/apex/deploy.py b/apex/deploy.py
index 22faec92..a0561384 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -83,7 +83,7 @@ def build_vms(inventory, network_settings,
name, volume_path,
baremetal_interfaces=network_settings.enabled_network_list,
memory=node['memory'], cpus=node['cpu'],
- macs=[node['mac_address']],
+ macs=node['mac'],
template_dir=template_dir)
virt_utils.host_setup({name: node['pm_port']})
diff --git a/apex/inventory/inventory.py b/apex/inventory/inventory.py
index 71f8e528..3483e577 100644
--- a/apex/inventory/inventory.py
+++ b/apex/inventory/inventory.py
@@ -40,7 +40,7 @@ class Inventory(dict):
# move ipmi_* to pm_*
# make mac a list
- def munge_nodes(node):
+ def munge_node(node):
node['pm_addr'] = node['ipmi_ip']
node['pm_password'] = node['ipmi_pass']
node['pm_user'] = node['ipmi_user']
@@ -54,23 +54,21 @@ class Inventory(dict):
for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
'disk_device'):
- if i == 'disk_device' and 'disk_device' in node.keys():
- self.root_device = node[i]
- else:
- continue
- del node[i]
+ if i in node.keys():
+ if i == 'disk_device':
+ self.root_device = node[i]
+ del node[i]
return node
-
- super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
+ super().__init__({'nodes': list(map(munge_node, init_dict['nodes']))})
# verify number of nodes
- if ha and len(self['nodes']) < 5 and not virtual:
+ if ha and len(self['nodes']) < 5:
raise InventoryException('You must provide at least 5 '
- 'nodes for HA baremetal deployment')
+ 'nodes for HA deployment')
elif len(self['nodes']) < 2:
raise InventoryException('You must provide at least 2 nodes '
- 'for non-HA baremetal deployment')
+ 'for non-HA deployment')
if virtual:
self['host-ip'] = '192.168.122.1'
diff --git a/apex/tests/config/inventory-virt-1-node.yaml b/apex/tests/config/inventory-virt-1-node.yaml
new file mode 100644
index 00000000..3e4b8dc4
--- /dev/null
+++ b/apex/tests/config/inventory-virt-1-node.yaml
@@ -0,0 +1,13 @@
+nodes:
+ node0:
+ arch: x86_64
+ capabilities: profile:control
+ cpu: 4
+ disk: 41
+ ipmi_ip: 192.168.122.1
+ ipmi_pass: password
+ ipmi_user: admin
+ mac_address: 00:a8:58:29:f9:99
+ memory: 10240
+ pm_port: 6230
+ pm_type: pxe_ipmitool
diff --git a/apex/tests/config/inventory-virt.yaml b/apex/tests/config/inventory-virt.yaml
new file mode 100644
index 00000000..36184ea5
--- /dev/null
+++ b/apex/tests/config/inventory-virt.yaml
@@ -0,0 +1,25 @@
+nodes:
+ node0:
+ arch: x86_64
+ capabilities: profile:control
+ cpu: 4
+ disk: 41
+ ipmi_ip: 192.168.122.1
+ ipmi_pass: password
+ ipmi_user: admin
+ mac_address: 00:a8:58:29:f9:99
+ memory: 10240
+ pm_port: 6230
+ pm_type: pxe_ipmitool
+ node1:
+ arch: x86_64
+ capabilities: profile:compute
+ cpu: 4
+ disk: 41
+ ipmi_ip: 192.168.122.1
+ ipmi_pass: password
+ ipmi_user: admin
+ mac_address: 00:9d:c8:10:d9:64
+ memory: 8192
+ pm_port: 6231
+ pm_type: pxe_ipmitool
diff --git a/apex/tests/test_apex_clean.py b/apex/tests/test_apex_clean.py
index b6b9d428..b3ead6f7 100644
--- a/apex/tests/test_apex_clean.py
+++ b/apex/tests/test_apex_clean.py
@@ -100,3 +100,15 @@ class TestClean:
ml.listDefinedDomains.return_value = ['undercloud']
ml.lookupByName.return_value = dummy_domain()
assert clean.clean_vms() is None
+
+ @patch('apex.network.jumphost.detach_interface_from_ovs')
+ @patch('apex.network.jumphost.remove_ovs_bridge')
+ @patch('libvirt.open')
+ def test_clean_networks(self, mock_libvirt, mock_jumphost_ovs_remove,
+ mock_jumphost_detach):
+ ml = mock_libvirt.return_value
+ ml.listNetworks.return_value = ['admin', 'external', 'tenant', 'blah']
+ mock_net = ml.networkLookupByName.return_value
+ mock_net.isActive.return_value = True
+ clean.clean_networks()
+ assert_equal(mock_net.destroy.call_count, 3)
diff --git a/apex/tests/test_apex_inventory.py b/apex/tests/test_apex_inventory.py
index cca8068b..87e7d50b 100644
--- a/apex/tests/test_apex_inventory.py
+++ b/apex/tests/test_apex_inventory.py
@@ -16,7 +16,10 @@ from nose.tools import (
from apex import Inventory
from apex.inventory.inventory import InventoryException
-from apex.tests.constants import TEST_CONFIG_DIR
+from apex.tests.constants import (
+ TEST_CONFIG_DIR,
+ TEST_DUMMY_CONFIG
+)
inventory_files = ('intel_pod2_settings.yaml',
'nokia_pod1_settings.yaml',
@@ -40,26 +43,26 @@ class TestInventory:
def teardown(self):
"""This method is run once after _each_ test method is executed"""
- def test_init(self):
+ def test_inventory_baremetal(self):
for f in inventory_files:
i = Inventory(os.path.join(files_dir, f))
assert_equal(i.dump_instackenv_json(), None)
- # test virtual
- i = Inventory(i, virtual=True)
- assert_equal(i.dump_instackenv_json(), None)
+ def test_inventory_invalid_ha_count(self):
+ assert_raises(InventoryException, Inventory,
+ os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'),
+ virtual=True, ha=True)
- # Remove nodes to violate HA node count
- while len(i['nodes']) >= 5:
- i['nodes'].pop()
- assert_raises(InventoryException,
- Inventory, i)
+ def test_inventory_invalid_noha_count(self):
+ assert_raises(InventoryException, Inventory,
+ os.path.join(TEST_DUMMY_CONFIG,
+ 'inventory-virt-1-node.yaml'),
+ virtual=True, ha=False)
- # Remove nodes to violate non-HA node count
- while len(i['nodes']) >= 2:
- i['nodes'].pop()
- assert_raises(InventoryException,
- Inventory, i, ha=False)
+ def test_inventory_virtual(self):
+ i = Inventory(os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'),
+ virtual=True, ha=False)
+ assert_equal(i.dump_instackenv_json(), None)
def test_exception(self):
e = InventoryException("test")
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/virtual_utils.py
index 255d2c69..1fe2c399 100644
--- a/apex/virtual/virtual_utils.py
+++ b/apex/virtual/virtual_utils.py
@@ -14,6 +14,7 @@ import os
import platform
import pprint
import subprocess
+import xml.etree.ElementTree as ET
from apex.common import utils
from apex.virtual import configure_vm as vm_lib
@@ -26,6 +27,28 @@ DEFAULT_PASS = 'password'
DEFAULT_VIRT_IP = '192.168.122.1'
+def get_virt_ip():
+ try:
+ virsh_net_xml = subprocess.check_output(['virsh', 'net-dumpxml',
+ 'default'],
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ logging.warning('Unable to detect default virsh network IP. Will '
+ 'use 192.168.122.1')
+ return DEFAULT_VIRT_IP
+
+ tree = ET.fromstring(virsh_net_xml)
+ ip_tag = tree.find('ip')
+ if ip_tag:
+ virsh_ip = ip_tag.get('address')
+ if virsh_ip:
+ logging.debug("Detected virsh default network ip: "
+ "{}".format(virsh_ip))
+ return virsh_ip
+
+ return DEFAULT_VIRT_IP
+
+
def generate_inventory(target_file, ha_enabled=False, num_computes=1,
controller_ram=DEFAULT_RAM, arch=platform.machine(),
compute_ram=DEFAULT_RAM, vcpus=4):
@@ -42,7 +65,7 @@ def generate_inventory(target_file, ha_enabled=False, num_computes=1,
"""
node = {'mac_address': '',
- 'ipmi_ip': DEFAULT_VIRT_IP,
+ 'ipmi_ip': get_virt_ip(),
'ipmi_user': DEFAULT_USER,
'ipmi_pass': DEFAULT_PASS,
'pm_type': 'pxe_ipmitool',
@@ -86,7 +109,7 @@ def host_setup(node):
vbmc_manager = vbmc_lib.VirtualBMCManager()
for name, port in node.items():
vbmc_manager.add(username=DEFAULT_USER, password=DEFAULT_PASS,
- port=port, address=DEFAULT_VIRT_IP, domain_name=name,
+ port=port, address=get_virt_ip(), domain_name=name,
libvirt_uri='qemu:///system',
libvirt_sasl_password=False,
libvirt_sasl_username=False)
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index cc335c82..b7711a06 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -115,6 +115,18 @@ enabled=1
gpgcheck=0
EOF
+# Kubernetes Repo
+cat > ${BUILD_DIR}/kubernetes.repo << EOF
+[kubernetes]
+name=Kubernetes
+baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOF
+
# Get Real Time Kernel from kvm4nfv
populate_cache $kvmfornfv_uri_base/$kvmfornfv_kernel_rpm
@@ -136,6 +148,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/puppet-fdio.tar.gz:/etc/puppet/modules \
--run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \
--upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/ \
+ --upload ${BUILD_DIR}/kubernetes.repo:/etc/yum.repos.d/ \
--run-command "mkdir /root/fdio" \
--upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \
$fdio_pkg_str \
@@ -147,6 +160,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install python2-networking-sfc \
--install python-etcd,puppet-etcd \
--install patch \
+ --install docker,kubelet,kubeadm,kubectl,kubernetes-cni \
-a overcloud-full_build.qcow2
# upload and install barometer packages