summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore10
-rw-r--r--apex/build.py25
-rw-r--r--apex/clean.py86
-rw-r--r--apex/common/exceptions.py8
-rw-r--r--apex/common/parsers.py25
-rw-r--r--apex/common/utils.py45
-rw-r--r--apex/deploy.py12
-rw-r--r--apex/network/jumphost.py236
-rw-r--r--apex/overcloud/config.py7
-rw-r--r--apex/overcloud/overcloud_deploy.py99
-rw-r--r--apex/settings/deploy_settings.py4
-rw-r--r--apex/settings/network_settings.py4
-rw-r--r--apex/tests/config/bad_ifcfg-br-external8
-rw-r--r--apex/tests/config/bad_nova_output.json23
-rw-r--r--apex/tests/config/ifcfg-br-dummy9
-rw-r--r--apex/tests/config/ifcfg-br-external10
-rw-r--r--apex/tests/config/ifcfg-dummy7
-rw-r--r--apex/tests/config/nova_output.json23
-rw-r--r--apex/tests/config/test_overcloudrc17
-rw-r--r--apex/tests/constants.py1
-rw-r--r--apex/tests/playbooks/test_failed_playbook.yaml5
-rwxr-xr-xapex/tests/smoke_tests/execute_smoke_tests.sh3
-rw-r--r--apex/tests/smoke_tests/execute_tests.yml11
-rw-r--r--apex/tests/smoke_tests/prepare_undercloud.yml9
-rw-r--r--apex/tests/smoke_tests/smoke_tests.yml3
-rw-r--r--apex/tests/test_apex_clean.py77
-rw-r--r--apex/tests/test_apex_common_parsers.py71
-rw-r--r--apex/tests/test_apex_common_utils.py41
-rw-r--r--apex/tests/test_apex_deploy_settings.py8
-rw-r--r--apex/tests/test_apex_inventory.py18
-rw-r--r--apex/tests/test_apex_ip_utils.py42
-rw-r--r--apex/tests/test_apex_network_environment.py21
-rw-r--r--apex/tests/test_apex_network_jumphost.py299
-rw-r--r--apex/tests/test_apex_network_settings.py8
-rw-r--r--apex/undercloud/undercloud.py27
-rwxr-xr-xapex/virtual/configure_vm.py27
-rw-r--r--apex/virtual/virtual_utils.py10
-rw-r--r--build/CentOS-Updates.repo5
-rw-r--r--build/Makefile6
-rw-r--r--build/baremetal-environment.yaml1
-rwxr-xr-xbuild/barometer-install.sh26
-rw-r--r--build/bash_completion_apex2
-rwxr-xr-xbuild/build_ovs_nsh.sh7
-rw-r--r--build/opnfv-environment.yaml12
-rwxr-xr-xbuild/overcloud-full.sh6
-rwxr-xr-xbuild/overcloud-opendaylight.sh3
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec3
-rwxr-xr-xbuild/undercloud.sh2
-rwxr-xr-xci/build.sh16
-rwxr-xr-xci/clean.sh221
-rwxr-xr-xci/deploy.sh22
-rwxr-xr-xci/run_smoke_tests.sh13
-rwxr-xr-xci/test.sh31
-rw-r--r--config/deploy/os-odl-fdio-dvr-ha.yaml1
-rw-r--r--config/deploy/os-odl-fdio-dvr-noha.yaml1
-rw-r--r--config/deploy/os-odl-sfc-ha.yaml2
-rw-r--r--config/deploy/os-odl-sfc-noha.yaml2
-rw-r--r--config/network/network_settings_v6.yaml2
-rwxr-xr-xcontrib/dev_dep_check.sh (renamed from ci/dev_dep_check.sh)7
-rwxr-xr-xcontrib/simple_deploy.sh17
-rw-r--r--docs/release/release-notes/release-notes.rst50
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst6
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst6
-rw-r--r--docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst6
-rw-r--r--docs/release/scenarios/os-odl-csit-noha/index.rst (renamed from docs/release/scenarios/os-odl_l3-nofeature-ha/index.rst)10
-rw-r--r--docs/release/scenarios/os-odl-csit-noha/os-odl-csit-noha.rst (renamed from docs/release/scenarios/os-odl_l3-csit-noha/os-odl_l3-csit-noha.rst)8
-rw-r--r--docs/release/scenarios/os-odl-nofeature-ha/index.rst (renamed from docs/release/scenarios/os-odl_l3-csit-noha/index.rst)6
-rw-r--r--docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst (renamed from docs/release/scenarios/os-odl_l3-nofeature-ha/os-odl_l3-nofeature-ha.rst)10
-rw-r--r--docs/release/scenarios/os-odl-nofeature-noha/index.rst (renamed from docs/release/scenarios/os-odl_l3-nofeature-noha/index.rst)10
-rw-r--r--docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst (renamed from docs/release/scenarios/os-odl_l3-nofeature-noha/os-odl_l3-nofeature-noha.rst)8
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst6
-rw-r--r--lib/ansible/playbooks/configure_undercloud.yml22
-rw-r--r--lib/ansible/playbooks/deploy_dependencies.yml19
-rw-r--r--lib/ansible/playbooks/deploy_overcloud.yml18
-rw-r--r--lib/ansible/playbooks/post_deploy_undercloud.yml1
-rw-r--r--setup.cfg1
76 files changed, 1235 insertions, 699 deletions
diff --git a/.gitignore b/.gitignore
index 47eaef64..f42d4c6e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,11 @@
*~
*.pyc
-.*.sw?
-.coverage
/docs_build/
/docs_output/
/releng/
-.build/
-.cache/
-ci/apex_build.log
-ci/apex_deploy.log
-.tox/
apex.egg-info/
/apex/tests/playbooks/*.retry
coverage.xml
nosetests.xml
-ci/apex_clean.log
+ci/*.log
+.*
diff --git a/apex/build.py b/apex/build.py
index cda4e061..2d0786a8 100644
--- a/apex/build.py
+++ b/apex/build.py
@@ -15,6 +15,8 @@ import sys
import uuid
import yaml
+from apex.common import utils
+
CACHE_JOURNAL = 'cache_journal.yaml'
TMP_CACHE = '.cache'
BUILD_ROOT = 'build'
@@ -116,6 +118,12 @@ def build(build_root, version, iso=False, rpms=False):
make_args = ['RELEASE={}'.format(version)]
else:
make_args = []
+ logging.info('Running make clean...')
+ try:
+ subprocess.check_call(['make', '-C', build_root, 'clean'])
+ except subprocess.CalledProcessError:
+ logging.error('Failure to make clean')
+ raise
logging.info('Building targets: {}'.format(make_targets))
try:
output = subprocess.check_output(["make"] + make_args + ["-C",
@@ -216,13 +224,13 @@ if __name__ == '__main__':
console.setLevel(log_level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
- apex_root = os.path.split(os.getcwd())[0]
- if 'apex/apex' in apex_root:
- apex_root = os.path.split(apex_root)[0]
- for root, dirs, files in os.walk(apex_root):
- if BUILD_ROOT in dirs and 'apex/apex' not in root:
- apex_root = root
- break
+ # Since we only support building inside of git repo this should be fine
+ try:
+ apex_root = subprocess.check_output(
+ ['git', 'rev-parse', '--show-toplevel']).decode('utf-8').strip()
+ except subprocess.CalledProcessError:
+ logging.error("Must be in an Apex git repo to execute build")
+ raise
apex_build_root = os.path.join(apex_root, BUILD_ROOT)
if os.path.isdir(apex_build_root):
cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
@@ -232,6 +240,9 @@ if __name__ == '__main__':
raise ApexBuildException("Invalid path for apex root: {}. Must be "
"invoked from within Apex code directory.".
format(apex_root))
+ dep_playbook = os.path.join(apex_root,
+ 'lib/ansible/playbooks/build_dependencies.yml')
+ utils.run_ansible(None, dep_playbook)
unpack_cache(cache_tmp_dir, args.cache_dir)
build(apex_build_root, args.build_version, args.iso, args.rpms)
build_cache(cache_tmp_dir, args.cache_dir)
diff --git a/apex/clean.py b/apex/clean.py
index af9e8ce0..9d0e648e 100644
--- a/apex/clean.py
+++ b/apex/clean.py
@@ -7,16 +7,21 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Clean will eventually be migrated to this file
-
import argparse
+import fileinput
+import libvirt
import logging
import os
import pyipmi
import pyipmi.interfaces
import sys
-from .common import utils
+from apex.common import (
+ constants,
+ utils)
+from apex.network import jumphost
+from apex.common.exceptions import ApexCleanException
+from virtualbmc import manager as vbmc_lib
def clean_nodes(inventory):
@@ -41,11 +46,59 @@ def clean_nodes(inventory):
sys.exit(1)
+def clean_vbmcs():
+ vbmc_manager = vbmc_lib.VirtualBMCManager()
+ vbmcs = vbmc_manager.list()
+ for vbmc in vbmcs:
+ logging.info("Deleting vbmc: {}".format(vbmc['domain_name']))
+ vbmc_manager.delete(vbmc['domain_name'])
+
+
+def clean_vms():
+ logging.info('Destroying all Apex VMs')
+ conn = libvirt.open('qemu:///system')
+ if not conn:
+ raise ApexCleanException('Unable to open libvirt connection')
+ pool = conn.storagePoolLookupByName('default')
+ domains = conn.listAllDomains()
+
+ for domain in domains:
+ vm = domain.name()
+ if vm != 'undercloud' and not vm.startswith('baremetal'):
+ continue
+ logging.info("Cleaning domain: {}".format(vm))
+ if domain.isActive():
+ logging.debug('Destroying domain')
+ domain.destroy()
+ domain.undefine()
+ # delete storage volume
+ try:
+ stgvol = pool.storageVolLookupByName("{}.qcow2".format(vm))
+ except libvirt.libvirtError:
+ logging.warning("Skipping volume cleanup as volume not found for "
+ "vm: {}".format(vm))
+ stgvol = None
+ if stgvol:
+ logging.info('Deleting storage volume')
+ stgvol.wipe(0)
+ stgvol.delete(0)
+ pool.refresh()
+
+
+def clean_ssh_keys(key_file='/root/.ssh/authorized_keys'):
+ logging.info('Removing any stack pub keys from root authorized keys')
+ for line in fileinput.input(key_file, inplace=True):
+ line = line.strip('\n')
+ if 'stack@undercloud' not in line:
+ print(line)
+
+
def main():
clean_parser = argparse.ArgumentParser()
- clean_parser.add_argument('-f',
+ clean_parser.add_argument('-i',
dest='inv_file',
- required=True,
+ required=False,
+ default=None,
help='File which contains inventory')
args = clean_parser.parse_args(sys.argv[1:])
os.makedirs(os.path.dirname('./apex_clean.log'), exist_ok=True)
@@ -58,7 +111,28 @@ def main():
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
- clean_nodes(args.inv_file)
+ if args.inv_file:
+ if not os.path.isfile(args.inv_file):
+ logging.error("Inventory file not found: {}".format(args.inv_file))
+ raise FileNotFoundError("Inventory file does not exist")
+ else:
+ logging.info("Shutting down baremetal nodes")
+ clean_nodes(args.inv_file)
+ # Delete all VMs
+ clean_vms()
+ # Delete vbmc
+ clean_vbmcs()
+ # Clean network config
+ for network in constants.ADMIN_NETWORK, constants.EXTERNAL_NETWORK:
+ logging.info("Cleaning Jump Host Network config for network "
+ "{}".format(network))
+ jumphost.detach_interface_from_ovs(network)
+ jumphost.remove_ovs_bridge(network)
+
+ # clean pub keys from root's auth keys
+ clean_ssh_keys()
+
+ logging.info('Apex clean complete!')
if __name__ == '__main__':
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
index c660213f..54d99834 100644
--- a/apex/common/exceptions.py
+++ b/apex/common/exceptions.py
@@ -10,3 +10,11 @@
class ApexDeployException(Exception):
pass
+
+
+class JumpHostNetworkException(Exception):
+ pass
+
+
+class ApexCleanException(Exception):
+ pass
diff --git a/apex/common/parsers.py b/apex/common/parsers.py
index 8744c862..91b8905b 100644
--- a/apex/common/parsers.py
+++ b/apex/common/parsers.py
@@ -71,3 +71,28 @@ def parse_overcloudrc(in_file):
logging.debug("os cred not found in: {}".format(line))
return creds
+
+
+def parse_ifcfg_file(in_file):
+ """
+ Parses ifcfg file information
+ :param in_file:
+ :return: dictionary of ifcfg key value pairs
+ """
+ ifcfg_params = {
+ 'IPADDR': '',
+ 'NETMASK': '',
+ 'GATEWAY': '',
+ 'METRIC': '',
+ 'DNS1': '',
+ 'DNS2': '',
+ 'PREFIX': ''
+ }
+ with open(in_file, 'r') as fh:
+ for line in fh:
+ for param in ifcfg_params.keys():
+ match = re.search("^\s*{}=(.*)$".format(param), line)
+ if match:
+ ifcfg_params[param] = match.group(1)
+ break
+ return ifcfg_params
diff --git a/apex/common/utils.py b/apex/common/utils.py
index 848f2644..b1837b9b 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -76,7 +76,7 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
else:
conn_type = 'smart'
ansible_command = ['ansible-playbook', '--become', '-i', inv_host,
- '-u', user, '-c', conn_type, playbook, '-vvv']
+ '-u', user, '-c', conn_type, playbook, '-vv']
if dry_run:
ansible_command.append('--check')
@@ -95,13 +95,36 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
with open(ansible_tmp, 'w') as fh:
fh.write("ANSIBLE_HOST_KEY_CHECKING=FALSE {}".format(
' '.join(ansible_command)))
- try:
- my_env = os.environ.copy()
- my_env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
- logging.info("Executing playbook...this may take some time")
- logging.debug(subprocess.check_output(ansible_command, env=my_env,
- stderr=subprocess.STDOUT).decode('utf-8'))
- except subprocess.CalledProcessError as e:
- logging.error("Error executing ansible: {}".format(
- pprint.pformat(e.output.decode('utf-8'))))
- raise
+
+ my_env = os.environ.copy()
+ my_env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ logging.info("Executing playbook...this may take some time")
+ p = subprocess.Popen(ansible_command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ bufsize=1,
+ env=my_env,
+ universal_newlines=True)
+ # read first line
+ x = p.stdout.readline()
+ # initialize task
+ task = ''
+ while x:
+ # append lines to task
+ task += x
+ # log the line and read another
+ x = p.stdout.readline()
+ # deliver the task to info when we get a blank line
+ if not x.strip():
+ task += x
+ logging.info(task.replace('\\n', '\n'))
+ task = ''
+ x = p.stdout.readline()
+ # clean up and get return code
+ p.stdout.close()
+ rc = p.wait()
+ if rc:
+ # raise errors
+ e = "Ansible playbook failed. See Ansible logs for details."
+ logging.error(e)
+ raise Exception(e)
diff --git a/apex/deploy.py b/apex/deploy.py
index 7900170b..9ebc3f62 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -13,6 +13,7 @@ import argparse
import json
import logging
import os
+import platform
import pprint
import shutil
import sys
@@ -33,7 +34,7 @@ from apex.undercloud import undercloud as uc_lib
from apex.overcloud import config as oc_cfg
from apex.overcloud import overcloud_deploy
-APEX_TEMP_DIR = tempfile.mkdtemp()
+APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp')
ANSIBLE_PATH = 'ansible/playbooks'
SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
@@ -77,7 +78,6 @@ def build_vms(inventory, network_settings,
name = 'baremetal{}'.format(idx)
volume = name + ".qcow2"
volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
- # TODO(trozet): add back aarch64
# TODO(trozet): add error checking
vm_lib.create_vm(
name, volume_path,
@@ -125,22 +125,26 @@ def create_deploy_parser():
deploy_parser.add_argument('--virtual-computes',
dest='virt_compute_nodes',
default=1,
+ type=int,
help='Number of Virtual Compute nodes to create'
' and use during deployment (defaults to 1'
' for noha and 2 for ha)')
deploy_parser.add_argument('--virtual-cpus',
dest='virt_cpus',
default=4,
+ type=int,
help='Number of CPUs to use per Overcloud VM in'
' a virtual deployment (defaults to 4)')
deploy_parser.add_argument('--virtual-default-ram',
dest='virt_default_ram',
default=8,
+ type=int,
help='Amount of default RAM to use per '
'Overcloud VM in GB (defaults to 8).')
deploy_parser.add_argument('--virtual-compute-ram',
dest='virt_compute_ram',
default=None,
+ type=int,
help='Amount of RAM to use per Overcloud '
'Compute VM in GB (defaults to 8). '
'Overrides --virtual-default-ram arg for '
@@ -353,6 +357,7 @@ def main():
deploy_vars = dict()
deploy_vars['virtual'] = args.virtual
deploy_vars['debug'] = args.debug
+ deploy_vars['aarch64'] = platform.machine() == 'aarch64'
deploy_vars['dns_server_args'] = ''
deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
deploy_vars['stackrc'] = 'source /home/stack/stackrc'
@@ -364,10 +369,11 @@ def main():
utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
user='stack', tmp_dir=APEX_TEMP_DIR)
logging.info("Overcloud deployment complete")
- os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
except Exception:
logging.error("Deployment Failed. Please check log")
raise
+ finally:
+ os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
# Post install
logging.info("Executing post deploy configuration")
diff --git a/apex/network/jumphost.py b/apex/network/jumphost.py
index f3f06ad6..c28c105e 100644
--- a/apex/network/jumphost.py
+++ b/apex/network/jumphost.py
@@ -9,11 +9,11 @@
import logging
import os
-import re
import shutil
import subprocess
-from apex.common.exceptions import ApexDeployException
+from apex.common.exceptions import JumpHostNetworkException
+from apex.common import parsers
from apex.network import ip_utils
NET_MAP = {
@@ -24,6 +24,8 @@ NET_MAP = {
'api': 'br-api'
}
+NET_CFG_PATH = '/etc/sysconfig/network-scripts'
+
def configure_bridges(ns):
"""
@@ -68,81 +70,98 @@ def configure_bridges(ns):
except subprocess.CalledProcessError:
logging.error("Unable to configure IP address on "
"bridge {}".format(NET_MAP[network]))
+ raise
-def attach_interface_to_ovs(bridge, interface, network):
+def generate_ifcfg_params(if_file, network):
"""
- Attaches jumphost interface to OVS for baremetal deployments
- :param bridge: bridge to attach to
- :param interface: interface to attach to bridge
- :param network: Apex network type for these interfaces
- :return: None
+ Generates and validates ifcfg parameters required for a network
+ :param if_file: ifcfg file to parse
+ :param network: Apex network
+ :return: dictionary of generated/validated ifcfg params
"""
+ ifcfg_params = parsers.parse_ifcfg_file(if_file)
+ if not ifcfg_params['IPADDR']:
+ logging.error("IPADDR missing in {}".format(if_file))
+ raise JumpHostNetworkException("IPADDR missing in {}".format(if_file))
+ if not (ifcfg_params['NETMASK'] or ifcfg_params['PREFIX']):
+ logging.error("NETMASK/PREFIX missing in {}".format(if_file))
+ raise JumpHostNetworkException("NETMASK/PREFIX missing in {}".format(
+ if_file))
+ if network == 'external' and not ifcfg_params['GATEWAY']:
+ logging.error("GATEWAY is required to be in {} for external "
+ "network".format(if_file))
+ raise JumpHostNetworkException("GATEWAY is required to be in {} for "
+ "external network".format(if_file))
- net_cfg_path = '/etc/sysconfig/network-scripts'
- if_file = os.path.join(net_cfg_path, "ifcfg-{}".format(interface))
- ovs_file = os.path.join(net_cfg_path, "ifcfg-{}".format(bridge))
+ if ifcfg_params['DNS1'] or ifcfg_params['DNS2']:
+ ifcfg_params['PEERDNS'] = 'yes'
+ else:
+ ifcfg_params['PEERDNS'] = 'no'
+ return ifcfg_params
- logging.info("Attaching interface: {} to bridge: {} on network {}".format(
- bridge, interface, network
- ))
+def is_ovs_bridge(bridge):
+ """
+ Finds an OVS bridge
+ :param bridge: OVS bridge to find
+ :return: boolean if OVS bridge exists
+ """
try:
output = subprocess.check_output(['ovs-vsctl', 'show'],
stderr=subprocess.STDOUT)
if bridge not in output.decode('utf-8'):
- logging.debug("Bridge {} not found. Creating...".format(bridge))
- subprocess.check_call(['ovs-vsctl', 'add-br', bridge])
+ logging.debug("Bridge {} not found".format(bridge))
+ return False
else:
logging.debug("Bridge {} found".format(bridge))
+ return True
except subprocess.CalledProcessError:
- logging.error("Unable to validate/create OVS bridge {}".format(bridge))
+ logging.error("Unable to validate OVS bridge {}".format(bridge))
raise
+
+
+def dump_ovs_ports(bridge):
+ """
+ Returns
+ :param bridge: OVS bridge to list ports
+ :return: list of ports
+ """
try:
output = subprocess.check_output(['ovs-vsctl', 'list-ports', bridge],
stderr=subprocess.STDOUT)
- if interface in output.decode('utf-8'):
- logging.debug("Interface already attached to bridge")
- return
- except subprocess.CalledProcessError as e:
- logging.error("Unable to dump ports for bridge: {}".format(bridge))
- logging.error("Error output: {}".format(e.output))
+ except subprocess.CalledProcessError:
+ logging.error("Unable to show ports for {}".format(bridge))
raise
+ return output.decode('utf-8').strip().split('\n')
- if not os.path.isfile(if_file):
- logging.error("Interface ifcfg not found: {}".format(if_file))
- raise FileNotFoundError("Interface file missing: {}".format(if_file))
- ifcfg_params = {
- 'IPADDR': '',
- 'NETMASK': '',
- 'GATEWAY': '',
- 'METRIC': '',
- 'DNS1': '',
- 'DNS2': '',
- 'PREFIX': ''
- }
- with open(if_file, 'r') as fh:
- interface_output = fh.read()
-
- for param in ifcfg_params.keys():
- match = re.search("{}=(.*)\n".format(param), interface_output)
- if match:
- ifcfg_params[param] = match.group(1)
+def attach_interface_to_ovs(bridge, interface, network):
+ """
+ Attaches jumphost interface to OVS for baremetal deployments
+ :param bridge: bridge to attach to
+ :param interface: interface to attach to bridge
+ :param network: Apex network type for these interfaces
+ :return: None
+ """
- if not ifcfg_params['IPADDR']:
- logging.error("IPADDR missing in {}".format(if_file))
- raise ApexDeployException("IPADDR missing in {}".format(if_file))
- if not (ifcfg_params['NETMASK'] or ifcfg_params['PREFIX']):
- logging.error("NETMASK/PREFIX missing in {}".format(if_file))
- raise ApexDeployException("NETMASK/PREFIX missing in {}".format(
- if_file))
- if network == 'external' and not ifcfg_params['GATEWAY']:
- logging.error("GATEWAY is required to be in {} for external "
- "network".format(if_file))
- raise ApexDeployException("GATEWAY is required to be in {} for "
- "external network".format(if_file))
+ if_file = os.path.join(NET_CFG_PATH, "ifcfg-{}".format(interface))
+ ovs_file = os.path.join(NET_CFG_PATH, "ifcfg-{}".format(bridge))
+
+ logging.info("Attaching interface: {} to bridge: {} on network {}".format(
+ bridge, interface, network
+ ))
+ if not is_ovs_bridge(bridge):
+ subprocess.check_call(['ovs-vsctl', 'add-br', bridge])
+ elif interface in dump_ovs_ports(bridge):
+ logging.debug("Interface already attached to bridge")
+ return
+
+ if not os.path.isfile(if_file):
+ logging.error("Interface ifcfg not found: {}".format(if_file))
+ raise FileNotFoundError("Interface file missing: {}".format(if_file))
+ ifcfg_params = generate_ifcfg_params(if_file, network)
shutil.move(if_file, "{}.orig".format(if_file))
if_content = """DEVICE={}
DEVICETYPE=ovs
@@ -160,13 +179,9 @@ BOOTPROTO=static
ONBOOT=yes
TYPE=OVSBridge
PROMISC=yes""".format(bridge)
- peer_dns = 'no'
for param, value in ifcfg_params.items():
if value:
bridge_content += "\n{}={}".format(param, value)
- if param == 'DNS1' or param == 'DNS2':
- peer_dns = 'yes'
- bridge_content += "\n{}={}".format('PEERDNS', peer_dns)
logging.debug("New interface file content:\n{}".format(if_content))
logging.debug("New bridge file content:\n{}".format(bridge_content))
@@ -181,3 +196,108 @@ PROMISC=yes""".format(bridge)
except subprocess.CalledProcessError:
logging.error("Failed to restart Linux networking")
raise
+
+
+def detach_interface_from_ovs(network):
+ """
+ Detach interface from OVS for baremetal deployments
+ :param network: Apex network to detach single interface from
+ :return: None
+ """
+
+ bridge = NET_MAP[network]
+ logging.debug("Detaching interfaces from bridge on network: {}".format(
+ network))
+ # ensure bridge exists
+ if not is_ovs_bridge(bridge):
+ return
+
+ # check if real port is on bridge
+ for interface in dump_ovs_ports(bridge):
+ if interface and not interface.startswith('vnet'):
+ logging.debug("Interface found: {}".format(interface))
+ real_interface = interface
+ break
+ else:
+ logging.info("No jumphost interface exists on bridge {}".format(
+ bridge))
+ return
+
+ # check if original backup ifcfg file exists or create
+ orig_ifcfg_file = os.path.join(NET_CFG_PATH,
+ "ifcfg-{}.orig".format(real_interface))
+ ifcfg_file = orig_ifcfg_file[:-len('.orig')]
+ bridge_ifcfg_file = os.path.join(NET_CFG_PATH,
+ "ifcfg-{}".format(bridge))
+ if os.path.isfile(orig_ifcfg_file):
+ logging.debug("Original interface file found: "
+ "{}".format(orig_ifcfg_file))
+ else:
+ logging.info("No original ifcfg file found...will attempt to use "
+ "bridge ifcfg file and re-create")
+ if os.path.isfile(bridge_ifcfg_file):
+ ifcfg_params = generate_ifcfg_params(bridge_ifcfg_file, network)
+ if_content = """DEVICE={}
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=Ethernet
+NM_CONTROLLED=no""".format(real_interface)
+ for param, value in ifcfg_params.items():
+ if value:
+ if_content += "\n{}={}".format(param, value)
+ logging.debug("Interface file content:\n{}".format(if_content))
+ # write original backup
+ with open(orig_ifcfg_file, 'w') as fh:
+ fh.write(if_content)
+ logging.debug("Original interface file created: "
+ "{}".format(orig_ifcfg_file))
+ else:
+ logging.error("Unable to find original interface config file: {} "
+ "or bridge config file:{}".format(orig_ifcfg_file,
+ bridge_ifcfg_file))
+ raise FileNotFoundError("Unable to locate bridge or original "
+ "interface ifcfg file")
+
+ # move original file back and rewrite bridge ifcfg
+ shutil.move(orig_ifcfg_file, ifcfg_file)
+ bridge_content = """DEVICE={}
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes""".format(bridge)
+ with open(bridge_ifcfg_file, 'w') as fh:
+ fh.write(bridge_content)
+ # restart linux networking
+ logging.info("Restarting Linux networking")
+ try:
+ subprocess.check_call(['systemctl', 'restart', 'network'])
+ except subprocess.CalledProcessError:
+ logging.error("Failed to restart Linux networking")
+ raise
+
+
+def remove_ovs_bridge(network):
+ """
+ Unconfigure and remove an OVS bridge
+ :param network: Apex network to remove OVS bridge for
+ :return:
+ """
+ bridge = NET_MAP[network]
+ if is_ovs_bridge(bridge):
+ logging.info("Removing bridge: {}".format(bridge))
+ try:
+ subprocess.check_call(['ovs-vsctl', 'del-br', bridge])
+ except subprocess.CalledProcessError:
+ logging.error('Unable to destroy OVS bridge')
+ raise
+
+ logging.debug('Bridge destroyed')
+ bridge_ifcfg_file = os.path.join(NET_CFG_PATH,
+ "ifcfg-{}".format(bridge))
+ if os.path.isfile(bridge_ifcfg_file):
+ os.remove(bridge_ifcfg_file)
+ logging.debug("Bridge ifcfg file removed: {}".format(
+ bridge_ifcfg_file))
+ else:
+ logging.debug('Bridge ifcfg file not found')
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
index 6e116de2..e48b254f 100644
--- a/apex/overcloud/config.py
+++ b/apex/overcloud/config.py
@@ -44,7 +44,7 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
ovs_dpdk_br = ''
if ds['dataplane'] == 'fdio':
nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
- if ds['sdn_controller'] == 'opendaylight':
+ if ds['sdn_controller'] == 'opendaylight' and role == 'compute':
nets['external'][0]['nic_mapping'][role]['phys_type'] = \
'vpp_interface'
ext_net = 'vpp_interface'
@@ -54,7 +54,7 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
.get('uio-driver')):
nets['tenant']['nic_mapping'][role]['uio-driver'] =\
ds['performance'][role.title()]['vpp']['uio-driver']
- if ds['sdn_controller'] == 'opendaylight':
+ if ds['sdn_controller'] == 'opendaylight' and role == 'compute':
nets['external'][0]['nic_mapping'][role]['uio-driver'] =\
ds['performance'][role.title()]['vpp']['uio-driver']
if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
@@ -62,6 +62,9 @@ def create_nic_template(network_settings, deploy_settings, role, template_dir,
nets['tenant']['nic_mapping'][role]['interface-options'] =\
ds['performance'][role.title()]['vpp']['interface-options']
+ if role == 'controller' and ds.get('sfc', None):
+ ext_net = 'interface'
+
template_output = template.render(
nets=nets,
role=role,
diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py
index 3c108464..d37d73ca 100644
--- a/apex/overcloud/overcloud_deploy.py
+++ b/apex/overcloud/overcloud_deploy.py
@@ -30,7 +30,7 @@ from cryptography.hazmat.backends import default_backend as \
SDN_FILE_MAP = {
'opendaylight': {
- 'sfc': 'opendaylight_sfc.yaml',
+ 'sfc': 'neutron-sfc-opendaylight.yaml',
'vpn': 'neutron-bgpvpn-opendaylight.yaml',
'gluon': 'gluon.yaml',
'vpp': {
@@ -85,7 +85,7 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
if len(env_list) == 0:
try:
env_list.append(os.path.join(
- con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default']))
+ con.THT_ENV_DIR, sdn_map['default']))
except KeyError:
logging.warning("Unable to find default file for SDN")
@@ -137,6 +137,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
logging.error("Detected 0 control or compute nodes. Control nodes: "
"{}, compute nodes{}".format(num_control, num_compute))
raise ApexDeployException("Invalid number of control or computes")
+ elif num_control > 1 and not ds['global_params']['ha_enabled']:
+ num_control = 1
cmd = "openstack overcloud deploy --templates --timeout {} " \
"--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
# build cmd env args
@@ -198,7 +200,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
}
- for mod_file, mod in uio_types:
+ for mod_file, mod in uio_types.items():
with open(mod_file, 'w') as fh:
fh.write('#!/bin/bash\n')
fh.write('exec /sbin/modprobe {}'.format(mod))
@@ -218,7 +220,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
{con.VIRT_RUN_CMD: "yum -y install "
"/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
"{}".format(OVS_NSH_KMOD_RPM)},
- {con.VIRT_RUN_CMD: "yum upgrade -y "
+ {con.VIRT_RUN_CMD: "yum downgrade -y "
"/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
"{}".format(OVS_NSH_RPM)}
])
@@ -234,7 +236,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
virt_cmds.extend([
{con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
{con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- con.DEFAULT_ODL_VERSION)},
+ ds_opts['odl_version'])},
{con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
{con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
"/root/puppet-opendaylight-"
@@ -337,37 +339,42 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
perf = False
# Modify OPNFV environment
+ # TODO: Change to build a dict and outputing yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
line = line.strip('\n')
+ output_line = line
if 'CloudDomain' in line:
- print(" CloudDomain: {}".format(ns['domain_name']))
- elif ds_opts['sdn_controller'] == 'opendaylight' and \
+ output_line = " CloudDomain: {}".format(ns['domain_name'])
+ elif 'replace_private_key' in line:
+ output_line = " key: '{}'".format(private_key)
+ elif 'replace_public_key' in line:
+ output_line = " key: '{}'".format(public_key)
+
+ if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts and ds_opts[
'odl_vpp_routing_node'] != 'dvr':
if 'opendaylight::vpp_routing_node' in line:
- print(" opendaylight::vpp_routing_node: ${}.${}".format(
- ds_opts['odl_vpp_routing_node'], ns['domain_name']))
+ output_line = (" opendaylight::vpp_routing_node: ${}.${}"
+ .format(ds_opts['odl_vpp_routing_node'],
+ ns['domain_name']))
elif 'ControllerExtraConfig' in line:
- print(" ControllerExtraConfig:\n "
- "tripleo::profile::base::neutron::agents::honeycomb"
- "::interface_role_mapping: ['{}:tenant-"
- "interface]'".format(tenant_ctrl_nic))
+ output_line = (" ControllerExtraConfig:\n "
+ "tripleo::profile::base::neutron::agents::"
+ "honeycomb::interface_role_mapping:"
+ " ['{}:tenant-interface]'"
+ .format(tenant_ctrl_nic))
elif 'NovaComputeExtraConfig' in line:
- print(" NovaComputeExtraConfig:\n "
- "tripleo::profile::base::neutron::agents::honeycomb"
- "::interface_role_mapping: ['{}:tenant-"
- "interface]'".format(tenant_comp_nic))
- else:
- print(line)
-
+ output_line = (" NovaComputeExtraConfig:\n "
+ "tripleo::profile::base::neutron::agents::"
+ "honeycomb::interface_role_mapping:"
+ " ['{}:tenant-interface]'"
+ .format(tenant_comp_nic))
elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
if 'NeutronVPPAgentPhysnets' in line:
- print(" NeutronVPPAgentPhysnets: 'datacentre:{}'".format(
- tenant_ctrl_nic))
- else:
- print(line)
- elif perf:
- line_printed = False
+ output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
+ format(tenant_ctrl_nic))
+
+ if perf:
for role in 'NovaCompute', 'Controller':
if role == 'NovaCompute':
perf_opts = perf_vpp_comp
@@ -375,42 +382,32 @@ def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
perf_opts = perf_vpp_ctrl
cfg = "{}ExtraConfig".format(role)
if cfg in line and perf_opts:
+ perf_line = ''
if 'main-core' in perf_opts:
- print(" {}:\n"
- " fdio::vpp_cpu_main_core: '{}'"
- "".format(cfg, perf_opts['main-core']))
- line_printed = True
- break
- elif 'corelist-workers' in perf_vpp_comp:
- print(" {}:\n"
- " fdio::vpp_cpu_corelist_workers: '{}'"
- "".format(cfg, perf_opts['corelist-workers']))
- line_printed = True
- break
+ perf_line += ("\n fdio::vpp_cpu_main_core: '{}'"
+ .format(perf_opts['main-core']))
+ if 'corelist-workers' in perf_opts:
+ perf_line += ("\n "
+ "fdio::vpp_cpu_corelist_workers: '{}'"
+ .format(perf_opts['corelist-workers']))
+ if perf_line:
+ output_line = (" {}:{}".format(cfg, perf_line))
# kernel args
# (FIXME) use compute's kernel settings for all nodes for now.
if 'ComputeKernelArgs' in line and perf_kern_comp:
kernel_args = ''
for k, v in perf_kern_comp.items():
- kernel_args += "{}={}".format(k, v)
+ kernel_args += "{}={} ".format(k, v)
if kernel_args:
- print("ComputeKernelArgs: '{}'".format(kernel_args))
- line_printed = True
+ output_line = " ComputeKernelArgs: '{}'".\
+ format(kernel_args)
elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
for k, v in OVS_PERF_MAP.items():
if k in line and v in perf_ovs_comp:
- print(" {}: {}".format(k, perf_ovs_comp[v]))
- line_printed = True
+ output_line = " {}: {}".format(k, perf_ovs_comp[v])
- if not line_printed:
- print(line)
- elif 'replace_private_key' in line:
- print(" key: '{}'".format(private_key))
- elif 'replace_public_key' in line:
- print(" key: '{}'".format(public_key))
- else:
- print(line)
+ print(output_line)
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
@@ -550,7 +547,7 @@ def create_congress_cmds(overcloud_file):
else:
cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
if driver == 'nova':
- cmd += '--config api_version="2.34"'
+ cmd += ' --config api_version="2.34"'
logging.debug("Congress command created: {}".format(cmd))
cmds.append(cmd)
return cmds
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
index c8e347b7..793e43ac 100644
--- a/apex/settings/deploy_settings.py
+++ b/apex/settings/deploy_settings.py
@@ -43,10 +43,6 @@ VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'master']
class DeploySettings(dict):
"""
This class parses a APEX deploy settings yaml file into an object
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
"""
def __init__(self, filename):
if isinstance(filename, str):
diff --git a/apex/settings/network_settings.py b/apex/settings/network_settings.py
index 14870078..f6566834 100644
--- a/apex/settings/network_settings.py
+++ b/apex/settings/network_settings.py
@@ -35,10 +35,6 @@ class NetworkSettings(dict):
The resulting object will be used later to generate network environment
file as well as configuring post deployment networks.
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
"""
def __init__(self, filename):
init_dict = {}
diff --git a/apex/tests/config/bad_ifcfg-br-external b/apex/tests/config/bad_ifcfg-br-external
new file mode 100644
index 00000000..85b81959
--- /dev/null
+++ b/apex/tests/config/bad_ifcfg-br-external
@@ -0,0 +1,8 @@
+DEVICE=br-external
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+IPADDR=172.30.9.66
+NETMASK=255.255.255.0
diff --git a/apex/tests/config/bad_nova_output.json b/apex/tests/config/bad_nova_output.json
new file mode 100644
index 00000000..137750e5
--- /dev/null
+++ b/apex/tests/config/bad_nova_output.json
@@ -0,0 +1,23 @@
+[
+ {
+ "Status": "ACTIVE",
+ "Networks": "",
+ "ID": "a5ff8aeb-5fd0-467f-9d89-791dfbc6267b",
+ "Image Name": "overcloud-full",
+ "Name": "test3"
+ },
+ {
+ "Status": "ACTIVE",
+ "Networks": "",
+ "ID": "c8be26ae-6bef-4841-bb03-c7f336cfd785",
+ "Image Name": "overcloud-full",
+ "Name": "test2"
+ },
+ {
+ "Status": "ACTIVE",
+ "Networks": "",
+ "ID": "105d1c61-78d3-498f-9191-6b21823b8544",
+ "Image Name": "overcloud-full",
+ "Name": "test1"
+ }
+]
diff --git a/apex/tests/config/ifcfg-br-dummy b/apex/tests/config/ifcfg-br-dummy
new file mode 100644
index 00000000..117ca726
--- /dev/null
+++ b/apex/tests/config/ifcfg-br-dummy
@@ -0,0 +1,9 @@
+DEVICE=br-dummy
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+IPADDR=152.30.9.11
+NETMASK=255.255.255.0
+PEERDNS=no \ No newline at end of file
diff --git a/apex/tests/config/ifcfg-br-external b/apex/tests/config/ifcfg-br-external
new file mode 100644
index 00000000..9717d6e3
--- /dev/null
+++ b/apex/tests/config/ifcfg-br-external
@@ -0,0 +1,10 @@
+DEVICE=br-external
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+IPADDR=172.30.9.66
+NETMASK=255.255.255.0
+GATEWAY=172.30.9.1
+#DNS1=1.1.1.1
diff --git a/apex/tests/config/ifcfg-dummy b/apex/tests/config/ifcfg-dummy
new file mode 100644
index 00000000..f9ca21d4
--- /dev/null
+++ b/apex/tests/config/ifcfg-dummy
@@ -0,0 +1,7 @@
+DEVICE=enpfakes0
+TYPE=Ethernet
+ONBOOT=yes
+BOOTPROTO=static
+NM_CONTROLLED=no
+IPADDR=152.30.9.11
+NETMASK=255.255.255.0
diff --git a/apex/tests/config/nova_output.json b/apex/tests/config/nova_output.json
new file mode 100644
index 00000000..1348ef24
--- /dev/null
+++ b/apex/tests/config/nova_output.json
@@ -0,0 +1,23 @@
+[
+ {
+ "Status": "ACTIVE",
+ "Networks": "ctlplane=192.30.9.9",
+ "ID": "a5ff8aeb-5fd0-467f-9d89-791dfbc6267b",
+ "Image Name": "overcloud-full",
+ "Name": "overcloud-novacompute-1"
+ },
+ {
+ "Status": "ACTIVE",
+ "Networks": "ctlplane=192.30.9.10",
+ "ID": "c8be26ae-6bef-4841-bb03-c7f336cfd785",
+ "Image Name": "overcloud-full",
+ "Name": "overcloud-novacompute-0"
+ },
+ {
+ "Status": "ACTIVE",
+ "Networks": "ctlplane=192.30.9.8",
+ "ID": "105d1c61-78d3-498f-9191-6b21823b8544",
+ "Image Name": "overcloud-full",
+ "Name": "overcloud-controller-0"
+ }
+]
diff --git a/apex/tests/config/test_overcloudrc b/apex/tests/config/test_overcloudrc
new file mode 100644
index 00000000..2707184d
--- /dev/null
+++ b/apex/tests/config/test_overcloudrc
@@ -0,0 +1,17 @@
+# Clear any old environment that may conflict.
+for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
+export OS_USERNAME=admin
+export OS_BAREMETAL_API_VERSION=1.29
+export NOVA_VERSION=1.1
+export OS_PROJECT_NAME=admin
+export OS_PASSWORD=Wd8ruyf6qG8cmcms6dq2HM93f
+export OS_NO_CACHE=True
+export COMPUTE_API_VERSION=1.1
+export no_proxy=,172.30.9.29,192.30.9.5
+export OS_CLOUDNAME=overcloud
+export OS_AUTH_URL=http://172.30.9.29:5000/v2.0
+export IRONIC_API_VERSION=1.29
+export OS_AUTH_TYPE=password
+export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available"
+export OS_PROJECT_ID=4695721d82c1421094005ef4ab86d33a
+export OS_TENANT_NAME=admin
diff --git a/apex/tests/constants.py b/apex/tests/constants.py
index 47e63e2c..eec64296 100644
--- a/apex/tests/constants.py
+++ b/apex/tests/constants.py
@@ -10,3 +10,4 @@
TEST_CONFIG_DIR = 'config'
TEST_BUILD_DIR = 'build'
TEST_PLAYBOOK_DIR = 'playbooks'
+TEST_DUMMY_CONFIG = 'apex/tests/config'
diff --git a/apex/tests/playbooks/test_failed_playbook.yaml b/apex/tests/playbooks/test_failed_playbook.yaml
new file mode 100644
index 00000000..d12cefb1
--- /dev/null
+++ b/apex/tests/playbooks/test_failed_playbook.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ tasks:
+ - fail:
+ msg: "Failure to test with"
diff --git a/apex/tests/smoke_tests/execute_smoke_tests.sh b/apex/tests/smoke_tests/execute_smoke_tests.sh
deleted file mode 100755
index 27f95251..00000000
--- a/apex/tests/smoke_tests/execute_smoke_tests.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-python ~/snaps/snaps/test_runner.py -e ~stack/overcloudrc -n external -c -a -i -f -k -l INFO &> ~stack/smoke-tests.out \ No newline at end of file
diff --git a/apex/tests/smoke_tests/execute_tests.yml b/apex/tests/smoke_tests/execute_tests.yml
deleted file mode 100644
index 5042d230..00000000
--- a/apex/tests/smoke_tests/execute_tests.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: all
- become: yes
- become_method: sudo
- become_user: root
-
- tasks:
- - name: Copy execute_smoke_tests.sh
- copy: src=execute_smoke_tests.sh dest=~/execute_smoke_tests.sh mode=0755
- - name: Execute Tests
- command: sh ~/execute_smoke_tests.sh | tee ~/unit_tests.out \ No newline at end of file
diff --git a/apex/tests/smoke_tests/prepare_undercloud.yml b/apex/tests/smoke_tests/prepare_undercloud.yml
deleted file mode 100644
index 7ad769c0..00000000
--- a/apex/tests/smoke_tests/prepare_undercloud.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: all
- become: yes
- become_method: sudo
- become_user: root
-
- tasks:
- - git: repo=https://gerrit.opnfv.org/gerrit/snaps dest=~/snaps
- - command: pip install -e ~/snaps/
diff --git a/apex/tests/smoke_tests/smoke_tests.yml b/apex/tests/smoke_tests/smoke_tests.yml
deleted file mode 100644
index b67c194f..00000000
--- a/apex/tests/smoke_tests/smoke_tests.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- include: prepare_undercloud.yml
-- include: execute_tests.yml \ No newline at end of file
diff --git a/apex/tests/test_apex_clean.py b/apex/tests/test_apex_clean.py
index d0b87917..b6b9d428 100644
--- a/apex/tests/test_apex_clean.py
+++ b/apex/tests/test_apex_clean.py
@@ -8,34 +8,95 @@
##############################################################################
import mock
+import os
import pyipmi
import pyipmi.chassis
from mock import patch
-from nose import tools
+from nose.tools import (
+ assert_raises,
+ assert_equal
+)
from apex import clean_nodes
+from apex import clean
+from apex.tests import constants as con
-class TestClean(object):
+class dummy_domain:
+
+ def isActive(self):
+ return True
+
+ def destroy(self):
+ pass
+
+ def undefine(self):
+ pass
+
+
+class dummy_vol:
+
+ def wipe(self, *args):
+ pass
+
+ def delete(self, *args):
+ pass
+
+
+class dummy_pool:
+
+ def storageVolLookupByName(self, *args, **kwargs):
+ return dummy_vol()
+
+ def refresh(self):
+ pass
+
+
+class TestClean:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
- def test_clean(self):
+ def test_clean_nodes(self):
with mock.patch.object(pyipmi.Session, 'establish') as mock_method:
with patch.object(pyipmi.chassis.Chassis,
'chassis_control_power_down') as mock_method2:
clean_nodes('apex/tests/config/inventory.yaml')
- tools.assert_equal(mock_method.call_count, 5)
- tools.assert_equal(mock_method2.call_count, 5)
+ assert_equal(mock_method.call_count, 5)
+ assert_equal(mock_method2.call_count, 5)
+
+ @patch('virtualbmc.manager.VirtualBMCManager.list',
+ return_value=[{'domain_name': 'dummy1'}, {'domain_name': 'dummy2'}])
+ @patch('virtualbmc.manager.VirtualBMCManager.delete')
+ def test_vmbc_clean(self, vbmc_del_func, vbmc_list_func):
+ assert clean.clean_vbmcs() is None
+
+ def test_clean_ssh_keys(self):
+ ssh_file = os.path.join(con.TEST_DUMMY_CONFIG, 'authorized_dummy')
+ with open(ssh_file, 'w') as fh:
+ fh.write('ssh-rsa 2LwlofGD8rNUFAlafY2/oUsKOf1mQ1 stack@undercloud')
+ assert clean.clean_ssh_keys(ssh_file) is None
+ with open(ssh_file, 'r') as fh:
+ output = fh.read()
+ assert 'stack@undercloud' not in output
+ if os.path.isfile(ssh_file):
+ os.remove(ssh_file)
+
+ @patch('libvirt.open')
+ def test_clean_vms(self, mock_libvirt):
+ ml = mock_libvirt.return_value
+ ml.storagePoolLookupByName.return_value = dummy_pool()
+ ml.listDefinedDomains.return_value = ['undercloud']
+ ml.lookupByName.return_value = dummy_domain()
+ assert clean.clean_vms() is None
diff --git a/apex/tests/test_apex_common_parsers.py b/apex/tests/test_apex_common_parsers.py
new file mode 100644
index 00000000..d272a749
--- /dev/null
+++ b/apex/tests/test_apex_common_parsers.py
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+
+from apex.tests import constants as con
+from apex.common import parsers as apex_parsers
+from apex.common.exceptions import ApexDeployException
+from nose.tools import (
+ assert_is_instance,
+ assert_dict_equal,
+ assert_raises
+)
+
+
+class TestCommonParsers:
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_parse_nova_output(self):
+ output = apex_parsers.parse_nova_output(
+ os.path.join(con.TEST_DUMMY_CONFIG, 'nova_output.json'))
+ assert_is_instance(output, dict)
+ nodes = {
+ 'overcloud-controller-0': '192.30.9.8',
+ 'overcloud-novacompute-0': '192.30.9.10',
+ 'overcloud-novacompute-1': '192.30.9.9'
+ }
+ assert_dict_equal(output, nodes)
+
+ def test_negative_parse_nova_output(self):
+ assert_raises(ApexDeployException, apex_parsers.parse_nova_output,
+ os.path.join(con.TEST_DUMMY_CONFIG,
+ 'bad_nova_output.json'))
+
+ def test_parse_overcloudrc(self):
+ output = apex_parsers.parse_overcloudrc(
+ os.path.join(con.TEST_DUMMY_CONFIG, 'test_overcloudrc'))
+ assert_is_instance(output, dict)
+ assert 'OS_AUTH_TYPE' in output.keys()
+ assert output['OS_AUTH_TYPE'] == 'password'
+ assert 'OS_PASSWORD' in output.keys()
+ assert output['OS_PASSWORD'] == 'Wd8ruyf6qG8cmcms6dq2HM93f'
+
+ def test_parse_ifcfg(self):
+ output = apex_parsers.parse_ifcfg_file(
+ os.path.join(con.TEST_DUMMY_CONFIG, 'ifcfg-br-external'))
+ assert_is_instance(output, dict)
+ assert 'IPADDR' in output.keys()
+ assert output['IPADDR'] == '172.30.9.66'
+ assert 'NETMASK' in output.keys()
+ assert output['NETMASK'] == '255.255.255.0'
+ assert 'DNS1' in output.keys()
+ assert not output['DNS1']
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index 357ad1b0..aee39a75 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -8,7 +8,6 @@
##############################################################################
import ipaddress
-import nose.tools
import os
from apex.common import utils
@@ -17,43 +16,53 @@ from apex.tests.constants import (
TEST_CONFIG_DIR,
TEST_PLAYBOOK_DIR)
+from nose.tools import (
+ assert_equal,
+ assert_is_instance,
+ assert_not_is_instance,
+ assert_raises)
+
NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
-class TestCommonUtils(object):
+class TestCommonUtils:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
def test_str2bool(self):
- nose.tools.assert_equal(utils.str2bool(True), True)
- nose.tools.assert_equal(utils.str2bool(False), False)
- nose.tools.assert_equal(utils.str2bool("True"), True)
- nose.tools.assert_equal(utils.str2bool("YES"), True)
+ assert_equal(utils.str2bool(True), True)
+ assert_equal(utils.str2bool(False), False)
+ assert_equal(utils.str2bool("True"), True)
+ assert_equal(utils.str2bool("YES"), True)
def test_parse_yaml(self):
- nose.tools.assert_is_instance(utils.parse_yaml(NET_SETS), dict)
+ assert_is_instance(utils.parse_yaml(NET_SETS), dict)
def test_dict_to_string(self):
net_settings = NetworkSettings(NET_SETS)
output = utils.dict_objects_to_str(net_settings)
- nose.tools.assert_is_instance(output, dict)
+ assert_is_instance(output, dict)
for k, v in output.items():
- nose.tools.assert_is_instance(k, str)
- nose.tools.assert_not_is_instance(v, ipaddress.IPv4Address)
+ assert_is_instance(k, str)
+ assert_not_is_instance(v, ipaddress.IPv4Address)
def test_run_ansible(self):
playbook = 'apex/tests/playbooks/test_playbook.yaml'
- nose.tools.assert_equal(
- utils.run_ansible(None, os.path.join(playbook),
- dry_run=True), None)
+ assert_equal(utils.run_ansible(None, os.path.join(playbook),
+ dry_run=True), None)
+
+ def test_failed_run_ansible(self):
+ playbook = 'apex/tests/playbooks/test_failed_playbook.yaml'
+ assert_raises(Exception, utils.run_ansible, None,
+ os.path.join(playbook), dry_run=True)
diff --git a/apex/tests/test_apex_deploy_settings.py b/apex/tests/test_apex_deploy_settings.py
index 312c1f3a..0338087c 100644
--- a/apex/tests/test_apex_deploy_settings.py
+++ b/apex/tests/test_apex_deploy_settings.py
@@ -64,16 +64,16 @@ deploy_options:
""",)
-class TestIpUtils(object):
+class TestIpUtils:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
diff --git a/apex/tests/test_apex_inventory.py b/apex/tests/test_apex_inventory.py
index ed95c53c..cca8068b 100644
--- a/apex/tests/test_apex_inventory.py
+++ b/apex/tests/test_apex_inventory.py
@@ -8,13 +8,11 @@
##############################################################################
import os
-import sys
-from io import StringIO
-from nose.tools import assert_equal
-from nose.tools import assert_is_instance
-from nose.tools import assert_raises
-from nose.tools import assert_regexp_matches
+from nose.tools import (
+ assert_equal,
+ assert_is_instance,
+ assert_raises)
from apex import Inventory
from apex.inventory.inventory import InventoryException
@@ -27,16 +25,16 @@ inventory_files = ('intel_pod2_settings.yaml',
files_dir = os.path.join(TEST_CONFIG_DIR, 'inventory')
-class TestInventory(object):
+class TestInventory:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
diff --git a/apex/tests/test_apex_ip_utils.py b/apex/tests/test_apex_ip_utils.py
index 04a1b2bb..eb4549d2 100644
--- a/apex/tests/test_apex_ip_utils.py
+++ b/apex/tests/test_apex_ip_utils.py
@@ -12,19 +12,21 @@ import re
from ipaddress import IPv4Address
from ipaddress import ip_network
-from nose.tools import assert_equal
-from nose.tools import assert_false
-from nose.tools import assert_is_instance
-from nose.tools import assert_raises
-from nose.tools import assert_regexp_matches
-from nose.tools import assert_true
-
-from apex.network.ip_utils import IPUtilsException
-from apex.network.ip_utils import _validate_ip_range
-from apex.network.ip_utils import find_gateway
-from apex.network.ip_utils import get_interface
-from apex.network.ip_utils import get_ip
-from apex.network.ip_utils import get_ip_range
+from nose.tools import (
+ assert_equal,
+ assert_false,
+ assert_is_instance,
+ assert_raises,
+ assert_regexp_matches,
+ assert_true)
+
+from apex.network.ip_utils import (
+ IPUtilsException,
+ _validate_ip_range,
+ find_gateway,
+ get_interface,
+ get_ip,
+ get_ip_range)
ip4_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
ip4_range_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{1,'
@@ -40,19 +42,19 @@ def get_default_gateway_linux():
return fields[0]
-class TestIpUtils(object):
+class TestIpUtils:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
- klass.iface_name = get_default_gateway_linux()
- iface = get_interface(klass.iface_name)
- klass.iface = iface
+ cls.iface_name = get_default_gateway_linux()
+ iface = get_interface(cls.iface_name)
+ cls.iface = iface
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
index 5047adbb..4e3ae111 100644
--- a/apex/tests/test_apex_network_environment.py
+++ b/apex/tests/test_apex_network_environment.py
@@ -11,10 +11,11 @@ import os
from copy import copy
-from nose.tools import assert_equal
-from nose.tools import assert_is_instance
-from nose.tools import assert_not_equal
-from nose.tools import assert_raises
+from nose.tools import (
+ assert_equal,
+ assert_is_instance,
+ assert_not_equal,
+ assert_raises)
from apex.common.constants import (
EXTERNAL_NETWORK,
@@ -29,23 +30,23 @@ from apex.tests.constants import TEST_CONFIG_DIR
from apex.tests.constants import TEST_BUILD_DIR
-class TestNetworkEnvironment(object):
+class TestNetworkEnvironment:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
- klass.ns = NetworkSettings(
+ cls.ns = NetworkSettings(
os.path.join(TEST_CONFIG_DIR, 'network/network_settings.yaml'))
- klass.ns_vlans = NetworkSettings(
+ cls.ns_vlans = NetworkSettings(
os.path.join(TEST_CONFIG_DIR,
'network/network_settings_vlans.yaml'))
- klass.ns_ipv6 = NetworkSettings(
+ cls.ns_ipv6 = NetworkSettings(
os.path.join(TEST_CONFIG_DIR, 'network/network_settings_v6.yaml'))
@classmethod
def teardown_class(klass):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
diff --git a/apex/tests/test_apex_network_jumphost.py b/apex/tests/test_apex_network_jumphost.py
new file mode 100644
index 00000000..da9703e3
--- /dev/null
+++ b/apex/tests/test_apex_network_jumphost.py
@@ -0,0 +1,299 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import shutil
+import subprocess
+
+from apex import NetworkSettings
+from apex.tests import constants as con
+from apex.common import constants as apex_constants
+from apex.network import jumphost
+from apex.common.exceptions import JumpHostNetworkException
+from ipaddress import IPv4Interface
+from mock import patch
+from nose.tools import (
+ assert_is_instance,
+ assert_dict_equal,
+ assert_raises,
+ assert_true,
+ assert_false
+)
+
+
+def bridge_show_output(*args, **kwargs):
+ return b"""
+ b6f1b54a-b8ba-4e86-9c5b-733ab71b5712
+ Bridge br-admin
+ Port br-admin
+ Interface br-admin
+ type: internal
+ ovs_version: "2.5.0"
+"""
+
+
+def bridge_port_list(*args, **kwargs):
+ return b"""
+enp6s0
+vnet1
+"""
+
+
+def subprocess_exception(*args, **kwargs):
+ raise subprocess.CalledProcessError(returncode=2, cmd='dummy')
+
+
+class TestNetworkJumpHost:
+ @classmethod
+ def setup_class(cls):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(cls):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setup(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ @patch('subprocess.check_output', side_effect=bridge_show_output)
+ def test_is_ovs_bridge(self, bridge_output_function):
+ assert_true(jumphost.is_ovs_bridge('br-admin'))
+ assert_false(jumphost.is_ovs_bridge('br-blah'))
+
+ @patch('subprocess.check_output', side_effect=bridge_port_list)
+ def test_dump_ovs_ports(self, bridge_function):
+ output = jumphost.dump_ovs_ports('br-admin')
+ assert_is_instance(output, list)
+ assert 'enp6s0' in output
+
+ def test_generate_ifcfg_params(self):
+ output = jumphost.generate_ifcfg_params(
+ os.path.join(con.TEST_DUMMY_CONFIG, 'ifcfg-br-external'),
+ apex_constants.EXTERNAL_NETWORK)
+ assert_is_instance(output, dict)
+ assert output['IPADDR'] == '172.30.9.66'
+ assert output['PEERDNS'] == 'no'
+
+ def test_negative_generate_ifcfg_params(self):
+ assert_raises(JumpHostNetworkException, jumphost.generate_ifcfg_params,
+ os.path.join(con.TEST_DUMMY_CONFIG,
+ 'bad_ifcfg-br-external'),
+ apex_constants.EXTERNAL_NETWORK)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.ip_utils.get_interface', return_value=IPv4Interface(
+ '10.10.10.2'))
+ def test_configure_bridges_ip_exists(self, interface_function,
+ subprocess_func):
+ ns = NetworkSettings(os.path.join(con.TEST_CONFIG_DIR,
+ 'network', 'network_settings.yaml'))
+ assert jumphost.configure_bridges(ns) is None
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.ip_utils.get_interface', return_value=None)
+ def test_configure_bridges_no_ip(self, interface_function,
+ subprocess_func):
+ ns = NetworkSettings(os.path.join(con.TEST_CONFIG_DIR,
+ 'network', 'network_settings.yaml'))
+ assert jumphost.configure_bridges(ns) is None
+
+ @patch('subprocess.check_call', side_effect=subprocess_exception)
+ @patch('apex.network.ip_utils.get_interface', return_value=None)
+ def test_negative_configure_bridges(self, interface_function,
+ subprocess_func):
+ ns = NetworkSettings(os.path.join(con.TEST_CONFIG_DIR,
+ 'network', 'network_settings.yaml'))
+ assert_raises(subprocess.CalledProcessError,
+ jumphost.configure_bridges, ns)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=[])
+ def test_attach_interface(self, dump_ports_func, is_bridge_func,
+ subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ output = jumphost.attach_interface_to_ovs('br-admin', 'enpfakes0',
+ 'admin')
+ assert output is None
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0.orig'))
+
+ for ifcfg in ('ifcfg-enpfakes0', 'ifcfg-enpfakes0.orig',
+ 'ifcfg-br-admin'):
+ ifcfg_path = os.path.join(ifcfg_dir, ifcfg)
+ if os.path.isfile(ifcfg_path):
+ os.remove(ifcfg_path)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=['dummy_int'])
+ def test_already_attached_interface(self, dump_ports_func, is_bridge_func,
+ subprocess_func):
+ output = jumphost.attach_interface_to_ovs('br-dummy', 'dummy_int',
+ 'admin')
+ assert output is None
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=[])
+ def test_negative_attach_interface(self, dump_ports_func, is_bridge_func,
+ subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ assert_raises(FileNotFoundError, jumphost.attach_interface_to_ovs,
+ 'br-dummy', 'dummy_int', 'admin')
+
+ @patch('subprocess.check_call', side_effect=subprocess_exception)
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=[])
+ def test_negative_attach_interface_process_error(
+ self, dump_ports_func, is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ assert_raises(subprocess.CalledProcessError,
+ jumphost.attach_interface_to_ovs,
+ 'br-admin', 'enpfakes0', 'admin')
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0.orig'))
+
+ for ifcfg in ('ifcfg-enpfakes0', 'ifcfg-enpfakes0.orig',
+ 'ifcfg-br-admin'):
+ ifcfg_path = os.path.join(ifcfg_dir, ifcfg)
+ if os.path.isfile(ifcfg_path):
+ os.remove(ifcfg_path)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=['enpfakes0'])
+ def test_detach_interface(self, dump_ports_func, is_bridge_func,
+ subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ output = jumphost.detach_interface_from_ovs('admin')
+ assert output is None
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+
+ for ifcfg in ('ifcfg-enpfakes0', 'ifcfg-enpfakes0.orig',
+ 'ifcfg-br-admin'):
+ ifcfg_path = os.path.join(ifcfg_dir, ifcfg)
+ if os.path.isfile(ifcfg_path):
+ os.remove(ifcfg_path)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=['enpfakes0'])
+ def test_detach_interface_orig_exists(self, dump_ports_func,
+ is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-enpfakes0.orig'))
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ output = jumphost.detach_interface_from_ovs('admin')
+ assert output is None
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ assert not os.path.isfile(os.path.join(ifcfg_dir,
+ 'ifcfg-enpfakes0.orig'))
+ for ifcfg in ('ifcfg-enpfakes0', 'ifcfg-enpfakes0.orig',
+ 'ifcfg-br-admin'):
+ ifcfg_path = os.path.join(ifcfg_dir, ifcfg)
+ if os.path.isfile(ifcfg_path):
+ os.remove(ifcfg_path)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=False)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=[])
+ def test_detach_interface_no_bridge(self, dump_ports_func,
+ is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ output = jumphost.detach_interface_from_ovs('admin')
+ assert output is None
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=[])
+ def test_detach_interface_no_int_to_remove(self, dump_ports_func,
+ is_bridge_func,
+ subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ output = jumphost.detach_interface_from_ovs('admin')
+ assert output is None
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=['enpfakes0'])
+ def test_negative_detach_interface(self, dump_ports_func, is_bridge_func,
+ subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ assert_raises(FileNotFoundError, jumphost.detach_interface_from_ovs,
+ 'admin')
+
+ @patch('subprocess.check_call', side_effect=subprocess_exception)
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ @patch('apex.network.jumphost.dump_ovs_ports', return_value=['enpfakes0'])
+ def test_negative_detach_interface_process_error(
+ self, dump_ports_func, is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ assert_raises(subprocess.CalledProcessError,
+ jumphost.detach_interface_from_ovs, 'admin')
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-enpfakes0'))
+ assert os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+
+ for ifcfg in ('ifcfg-enpfakes0', 'ifcfg-enpfakes0.orig',
+ 'ifcfg-br-admin'):
+ ifcfg_path = os.path.join(ifcfg_dir, ifcfg)
+ if os.path.isfile(ifcfg_path):
+ os.remove(ifcfg_path)
+
+ @patch('subprocess.check_call')
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ def test_remove_ovs_bridge(self, is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ shutil.copyfile(os.path.join(ifcfg_dir, 'ifcfg-br-dummy'),
+ os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+ assert jumphost.remove_ovs_bridge(apex_constants.ADMIN_NETWORK) is None
+ assert not os.path.isfile(os.path.join(ifcfg_dir, 'ifcfg-br-admin'))
+
+ # test without file
+ assert jumphost.remove_ovs_bridge(apex_constants.ADMIN_NETWORK) is None
+
+ @patch('subprocess.check_call', side_effect=subprocess_exception)
+ @patch('apex.network.jumphost.is_ovs_bridge', return_value=True)
+ def test_negative_remove_ovs_bridge(self, is_bridge_func, subprocess_func):
+ ifcfg_dir = con.TEST_DUMMY_CONFIG
+ jumphost.NET_CFG_PATH = ifcfg_dir
+ assert_raises(subprocess.CalledProcessError,
+ jumphost.remove_ovs_bridge,
+ apex_constants.ADMIN_NETWORK)
diff --git a/apex/tests/test_apex_network_settings.py b/apex/tests/test_apex_network_settings.py
index adff8cff..5e2fa072 100644
--- a/apex/tests/test_apex_network_settings.py
+++ b/apex/tests/test_apex_network_settings.py
@@ -27,16 +27,16 @@ from apex.tests.constants import TEST_CONFIG_DIR
files_dir = os.path.join(TEST_CONFIG_DIR, 'network')
-class TestNetworkSettings(object):
+class TestNetworkSettings:
@classmethod
- def setup_class(klass):
+ def setup_class(cls):
"""This method is run once for each class before any tests are run"""
@classmethod
- def teardown_class(klass):
+ def teardown_class(cls):
"""This method is run once for each class _after_ all tests are run"""
- def setUp(self):
+ def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index a1af4d00..7b7c35f0 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -10,7 +10,9 @@
import libvirt
import logging
import os
+import platform
import shutil
+import subprocess
import time
from apex.virtual import virtual_utils as virt_utils
@@ -56,11 +58,13 @@ class Undercloud:
networks = ['admin']
if self.external_net:
networks.append('external')
+ console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0'
+
self.vm = vm_lib.create_vm(name='undercloud',
image=self.volume,
baremetal_interfaces=networks,
direct_boot='overcloud-full',
- kernel_args=['console=ttyS0',
+ kernel_args=['console={}'.format(console),
'root=/dev/sda'],
default_network=True,
template_dir=self.template_path)
@@ -107,14 +111,25 @@ class Undercloud:
def configure(self, net_settings, playbook, apex_temp_dir):
"""
Configures undercloud VM
- :return:
+ :param net_setings: Network settings for deployment
+ :param playbook: playbook to use to configure undercloud
+ :param apex_temp_dir: temporary apex directory to hold configs/logs
+ :return: None
"""
- # TODO(trozet): If undercloud install fails we can add a retry
+
logging.info("Configuring Undercloud...")
# run ansible
ansible_vars = Undercloud.generate_config(net_settings)
ansible_vars['apex_temp_dir'] = apex_temp_dir
- utils.run_ansible(ansible_vars, playbook, host=self.ip, user='stack')
+ try:
+ utils.run_ansible(ansible_vars, playbook, host=self.ip,
+ user='stack')
+ except subprocess.CalledProcessError:
+ logging.error(
+ "Failed to install undercloud..."
+ "please check log: {}".format(os.path.join(
+ apex_temp_dir, 'apex-undercloud-install.log')))
+ raise ApexUndercloudException('Failed to install undercloud')
logging.info("Undercloud installed!")
def setup_volumes(self):
@@ -203,7 +218,7 @@ class Undercloud:
"enabled": ns_external['enabled']
}
- # FIXME (trozet): for now hardcoding aarch64 to false
- config['aarch64'] = False
+ # Check if this is an ARM deployment
+ config['aarch64'] = platform.machine() == 'aarch64'
return config
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
index 3af7d1e8..3b2c4462 100755
--- a/apex/virtual/configure_vm.py
+++ b/apex/virtual/configure_vm.py
@@ -11,6 +11,7 @@ import libvirt
import logging
import math
import os
+import platform
import random
MAX_NUM_MACS = math.trunc(0xff / 2)
@@ -92,9 +93,9 @@ def create_vm_storage(domain, vol_path='/var/lib/libvirt/images'):
def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
- arch='x86_64', engine='kvm', memory=8192, bootdev='network',
- cpus=4, nic_driver='virtio', macs=[], direct_boot=None,
- kernel_args=None, default_network=False,
+ arch=platform.machine(), engine='kvm', memory=8192,
+ bootdev='network', cpus=4, nic_driver='virtio', macs=[],
+ direct_boot=None, kernel_args=None, default_network=False,
template_dir='/usr/share/opnfv-apex'):
# TODO(trozet): fix name here to be image since it is full path of qcow2
create_vm_storage(name)
@@ -117,6 +118,9 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
'user_interface': '',
}
+ # assign scsi as default for aarch64
+ if arch == 'aarch64' and diskbus == 'sata':
+ diskbus = 'scsi'
# Configure the bus type for the target disk device
params['diskbus'] = diskbus
nicparams = {
@@ -149,14 +153,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
<model type='%(nicdriver)s'/>
</interface>""" % bm_interface_params
- params['enable_serial_console'] = """
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- """
if direct_boot:
params['direct_boot'] = """
<kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
@@ -168,7 +164,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
""" % ' '.join(kernel_args)
if arch == 'aarch64':
-
params['direct_boot'] += """
<loader readonly='yes' \
type='pflash'>/usr/share/AAVMF/AAVMF_CODE.fd</loader>
@@ -190,6 +185,14 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
</channel>
"""
else:
+ params['enable_serial_console'] = """
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ """
params['user_interface'] = """
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/virtual_utils.py
index 5ebb0582..255d2c69 100644
--- a/apex/virtual/virtual_utils.py
+++ b/apex/virtual/virtual_utils.py
@@ -11,6 +11,7 @@ import copy
import iptc
import logging
import os
+import platform
import pprint
import subprocess
@@ -26,7 +27,7 @@ DEFAULT_VIRT_IP = '192.168.122.1'
def generate_inventory(target_file, ha_enabled=False, num_computes=1,
- controller_ram=DEFAULT_RAM, arch='x86_64',
+ controller_ram=DEFAULT_RAM, arch=platform.machine(),
compute_ram=DEFAULT_RAM, vcpus=4):
"""
Generates inventory file for virtual deployments
@@ -91,8 +92,11 @@ def host_setup(node):
libvirt_sasl_username=False)
# TODO(trozet): add support for firewalld
- subprocess.call(['systemctl', 'stop', 'firewalld'])
-
+ try:
+ subprocess.check_call(['systemctl', 'stop', 'firewalld'])
+ subprocess.check_call(['systemctl', 'restart', 'libvirtd'])
+ except subprocess.CalledProcessError:
+ logging.warning('Failed to stop firewalld and restart libvirtd')
# iptables rule
rule = iptc.Rule()
rule.protocol = 'udp'
diff --git a/build/CentOS-Updates.repo b/build/CentOS-Updates.repo
new file mode 100644
index 00000000..8fc45d89
--- /dev/null
+++ b/build/CentOS-Updates.repo
@@ -0,0 +1,5 @@
+#released updates
+[updates-old]
+name=CentOS-$releasever - Old Updates
+baseurl=http://mirror.centos.org/centos/7.3.1611/updates/x86_64/
+gpgcheck=0
diff --git a/build/Makefile b/build/Makefile
index 7cc8736e..5f078339 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -271,7 +271,11 @@ iso: iso-clean images rpms $(CENTISO)
@ln $(RPMUDR) $(BUILD_DIR)/centos/Packages
@ln $(RPMODL) $(BUILD_DIR)/centos/Packages
# add packages to the centos packages
- cd $(BUILD_DIR)/centos/Packages && yumdownloader openvswitch jq python34 python34-libs python34-PyYAML python34-setuptools ipxe-roms-qemu python34-jinja2 python34-markupsafe ansible python34-six python34-cffi python34-idna python34-pycparser python-crypto python-httplib2 python-jinja2 python-keyczar python-paramiko sshpass python-ecdsa python34-ply
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader openvswitch jq python34 python34-libs python34-PyYAML python34-setuptools
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader python34-jinja2 python34-markupsafe ansible python34-six python34-cffi
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader ipxe-roms-qemu python34-idna python34-pycparser python-crypto python-httplib2
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader python-jinja2 python-keyczar python-paramiko sshpass python-ecdsa python34-ply
+ cd $(BUILD_DIR)/centos/Packages && yumdownloader libvirt-python python-lxml
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-asn1crypto-0.22.0-1.el7.centos.noarch.rpm
cd $(BUILD_DIR)/centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python34-cryptography-2.0.3-1.el7.centos.x86_64.rpm
diff --git a/build/baremetal-environment.yaml b/build/baremetal-environment.yaml
index 677b313a..c849655e 100644
--- a/build/baremetal-environment.yaml
+++ b/build/baremetal-environment.yaml
@@ -1,3 +1,4 @@
+---
parameter_defaults:
HeatWorkers: 12
CeilometerWorkers: 12
diff --git a/build/barometer-install.sh b/build/barometer-install.sh
index de47baff..9e5dfc73 100755
--- a/build/barometer-install.sh
+++ b/build/barometer-install.sh
@@ -15,11 +15,11 @@
# limitations under the License.
# Get and install packages needed for Barometer service.
-# These are: collectd rpm's and dependencies, collectd-ceilometer-plugin,
+# These are: collectd rpm's and dependencies, collectd-openstack-plugins,
# puppet-barometer module.
# Versions/branches
-COLLECTD_CEILOMETER_PLUGIN_BRANCH="stable/ocata"
+COLLECTD_OPENSTACK_PLUGINS_BRANCH="stable/ocata"
ARCH="6.el7.centos.x86_64.rpm"
# don't fail because of missing certificate
@@ -27,7 +27,7 @@ GETFLAG="--no-check-certificate"
# Locations of repos
ARTIFACTS_BAROM="artifacts.opnfv.org/barometer"
-COLLECTD_CEILOMETER_REPO="https://github.com/openstack/collectd-ceilometer-plugin"
+COLLECTD_OPENSTACK_REPO="https://github.com/openstack/collectd-ceilometer-plugin"
PUPPET_BAROMETER_REPO="https://github.com/johnhinman/puppet-barometer"
# upload barometer packages tar, extract, and install
@@ -76,12 +76,12 @@ function barometer_pkgs {
cp collectd.tar.gz ${BUILD_DIR}
popd > /dev/null
- # get collectd-ceilometer-plugin and tar it
- rm -rf collectd-ceilometer-plugin
- git clone https://github.com/openstack/collectd-ceilometer-plugin
- pushd collectd-ceilometer-plugin
- git checkout -b $COLLECTD_CEILOMETER_PLUGIN_BRANCH
- git archive --format=tar.gz HEAD > ${BUILD_DIR}/collectd-ceilometer-plugin.tar.gz
+ # get collectd-openstack-plugins and tar it
+ rm -rf collectd-openstack-plugins
+ git clone $COLLECTD_OPENSTACK_REPO collectd-openstack-plugins
+ pushd collectd-openstack-plugins
+ git checkout -b $COLLECTD_OPENSTACK_PLUGINS_BRANCH
+ git archive --format=tar.gz HEAD > ${BUILD_DIR}/collectd-openstack-plugins.tar.gz
popd > /dev/null
# get the barometer puppet module and tar it
@@ -103,7 +103,7 @@ function barometer_pkgs {
# install dependencies
LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/collectd.tar.gz:/opt/ \
- --upload ${BUILD_DIR}/collectd-ceilometer-plugin.tar.gz:/opt/ \
+ --upload ${BUILD_DIR}/collectd-openstack-plugins.tar.gz:/opt/ \
--upload ${BUILD_DIR}/puppet-barometer.tar.gz:/etc/puppet/modules/ \
--run-command 'tar xfz /opt/collectd.tar.gz -C /opt' \
--install libstatgrab,log4cplus,rrdtool,rrdtool-devel \
@@ -133,12 +133,12 @@ function barometer_pkgs {
/opt/collectd-virt-${SUFFIX}" \
-a $OVERCLOUD_IMAGE
- # install collectd-ceilometer plugin
+ # install collectd-openstack-plugins
# install puppet-barometer module
# make directories for config files and mibs
LIBGUESTFS_BACKEND=direct virt-customize \
- --run-command 'mkdir /opt/stack/collectd-ceilometer' \
- --run-command "tar xfz /opt/collectd-ceilometer-plugin.tar.gz -C /opt/stack/collectd-ceilometer" \
+ --run-command 'mkdir /opt/stack/collectd-openstack' \
+ --run-command "tar xfz /opt/collectd-openstack-plugins.tar.gz -C /opt/stack/collectd-openstack" \
--run-command "cd /etc/puppet/modules/ && mkdir barometer && \
tar xzf puppet-barometer.tar.gz -C barometer" \
--run-command 'mkdir /usr/share/mibs/' \
diff --git a/build/bash_completion_apex b/build/bash_completion_apex
index b3c963e3..59c6afb6 100644
--- a/build/bash_completion_apex
+++ b/build/bash_completion_apex
@@ -50,7 +50,7 @@ __apex_complete () {
}
# run completion setup
-__apex_complete ./deploy.sh __deploy_main
+__apex_complete ./deploy.py __deploy_main
__apex_complete opnfv-deploy __deploy_main
__apex_complete ./util.sh __util_main
__apex_complete opnfv-util __util_main
diff --git a/build/build_ovs_nsh.sh b/build/build_ovs_nsh.sh
index 52d4701f..2fba43f5 100755
--- a/build/build_ovs_nsh.sh
+++ b/build/build_ovs_nsh.sh
@@ -9,7 +9,10 @@
##############################################################################
set -e
-yum -y install rpm-build autoconf automake libtool systemd-units openssl openssl-devel python python-twisted-core python-zope-interface python-six desktop-file-utils groff graphviz procps-ng libcap-ng libcap-ng-devel PyQt4 selinux-policy-devel kernel-devel kernel-headers kernel-tools
+yum -y install rpm-build autoconf automake libtool systemd-units \
+openssl openssl-devel python python-twisted-core python-zope-interface \
+python-six desktop-file-utils groff graphviz procps-ng libcap-ng \
+libcap-ng-devel PyQt4 selinux-policy-devel
./boot.sh
libtoolize --force
aclocal
@@ -19,7 +22,7 @@ autoconf
./configure
yum -y install rpmdevtools
# hack due to build pulling in kernel vxlan header
-kernel_vxlan="/usr/src/kernels/$(rpm -q kernel-headers | grep -Eo '[0-9].*x86_64')/include/net/vxlan.h"
+kernel_vxlan="/usr/src/kernels/$(rpm -q kernel | grep -Eo '[0-9].*x86_64')/include/net/vxlan.h"
sed -i '/struct vxlan_metadata {/a\ u32 gpe;' $kernel_vxlan
make rpm-fedora RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort | tail -n -1 | sed 's/^kernel-//'`\" --without check"
make rpm-fedora-kmod RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort | tail -n -1 | sed 's/^kernel-//'`\""
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 0f3dd701..5b5d4500 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -3,7 +3,7 @@
# types
parameters:
- # # value updated via lib/overcloud-deploy-functions.sh
+ # value updated via lib/overcloud-deploy-functions.sh
# CloudDomain:
parameter_defaults:
@@ -11,7 +11,7 @@ parameter_defaults:
NeutronEnableForceMetadata: true
NeutronEnableDHCPMetadata: true
NeutronEnableIsolatedMetadata: true
- #NeutronDhcpAgentsPerNetwork: 3
+ # NeutronDhcpAgentsPerNetwork: 3
NeutronPluginExtensions: 'qos,port_security,data_plane_status'
# TODO: VLAN Ranges should be configurable from network settings
NeutronNetworkVLANRanges: 'datacentre:500:525'
@@ -120,12 +120,12 @@ parameter_defaults:
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::OpenDaylightApi
- OS::TripleO::Services::OpenDaylightOvs
-# - OS::TripleO::Services::ONOSApi
-# - OS::TripleO::Services::ONOSOvs
+ # - OS::TripleO::Services::ONOSApi
+ # - OS::TripleO::Services::ONOSOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::Etcd
-# - OS::TripleO::Services::Gluon
+ # - OS::TripleO::Services::Gluon
- OS::TripleO::Services::Tacker
- OS::TripleO::Services::NeutronHoneycombAgent
- OS::TripleO::Services::Congress
@@ -153,7 +153,7 @@ parameter_defaults:
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::NeutronSriovAgent
- OS::TripleO::Services::OpenDaylightOvs
-# - OS::TripleO::Services::ONOSOvs
+ # - OS::TripleO::Services::ONOSOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::NeutronHoneycombAgent
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index 8efe8225..6fdc32d6 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -42,7 +42,7 @@ git clone https://gerrit.opnfv.org/gerrit/vswitchperf vsperf
tar czf vsperf.tar.gz vsperf
# Increase disk size by 1200MB to accommodate more packages
-qemu-img resize overcloud-full_build.qcow2 +1200MB
+qemu-img resize overcloud-full_build.qcow2 +1200M
# expand file system to max disk size
# installing forked apex-puppet-tripleo
@@ -170,6 +170,10 @@ LIBGUESTFS_BACKEND=direct virt-customize \
LIBGUESTFS_BACKEND=direct virt-customize \
+ --upload ${BUILD_ROOT}/CentOS-Updates.repo:/etc/yum.repos.d/ \
+ --run-command "yum -y install kernel-devel-\$(rpm -q --queryformat '%{VERSION}-%{RELEASE}' kernel)" \
+ --run-command "yum -y install kernel-headers-\$(rpm -q --queryformat '%{VERSION}-%{RELEASE}' kernel)" \
+ --run-command "yum -y install kernel-tools-\$(rpm -q --queryformat '%{VERSION}-%{RELEASE}' kernel)" \
--upload ${BUILD_ROOT}/build_ovs_nsh.sh:/root/ \
--upload ovs.tar.gz:/root/ \
--run-command "cd /root/ && tar xzf ovs.tar.gz" \
diff --git a/build/overcloud-opendaylight.sh b/build/overcloud-opendaylight.sh
index 96b43d87..c850005e 100755
--- a/build/overcloud-opendaylight.sh
+++ b/build/overcloud-opendaylight.sh
@@ -42,6 +42,8 @@ pushd puppet-opendaylight > /dev/null
git archive --format=tar.gz --prefix=opendaylight/ HEAD > ${BUILD_DIR}/puppet-opendaylight-carbon.tar.gz
git checkout master
git archive --format=tar.gz --prefix=opendaylight/ HEAD > ${BUILD_DIR}/puppet-opendaylight-master.tar.gz
+git checkout stable/nitrogen
+git archive --format=tar.gz --prefix=opendaylight/ HEAD > ${BUILD_DIR}/puppet-opendaylight-nitrogen.tar.gz
popd > /dev/null
# cache gluon
@@ -71,6 +73,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/puppet-opendaylight-carbon.tar.gz:/etc/puppet/modules/ \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight-carbon.tar.gz" \
--upload ${BUILD_DIR}/puppet-opendaylight-master.tar.gz:/root/ \
+ --upload ${BUILD_DIR}/puppet-opendaylight-nitrogen.tar.gz:/root/ \
--upload ${BUILD_DIR}/puppet-gluon.tar.gz:/etc/puppet/modules/ \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-gluon.tar.gz" \
--install python-click \
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index 37e32145..c2e2f14e 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -35,7 +35,6 @@ rst2html docs/release/release-notes/release-notes.rst docs/release/release-notes
%install
mkdir -p %{buildroot}%{_bindir}/
%py3_install
-install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean
install ci/util.sh %{buildroot}%{_bindir}/opnfv-util
mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/
@@ -113,6 +112,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Fri Sep 08 2017 Tim Rozet <trozet@redhat.com> - 5.0-6
+- Updates clean to use python
* Wed Aug 23 2017 Tim Rozet <trozet@redhat.com> - 5.0-5
- Updated requirements
* Mon Aug 14 2017 Tim Rozet <trozet@redhat.com> - 5.0-4
diff --git a/build/undercloud.sh b/build/undercloud.sh
index 3244e7f3..05cbf552 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -38,6 +38,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install "openstack-utils" \
--install "ceph-common" \
--install "python2-networking-sfc" \
+ --install openstack-ironic-inspector,subunit-filters,docker-distribution,openstack-tripleo-validations \
--run-command "cd /usr/share && rm -rf openstack-tripleo-heat-templates && tar xzf apex-tripleo-heat-templates.tar.gz" \
--run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
--run-command "sed -i '/ComputeEnableCephStorage/c\\ ComputeEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
@@ -50,6 +51,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/baremetal-environment.yaml:/home/stack/ \
+ --uninstall "libvirt-client" \
--install "libguestfs-tools" \
-a undercloud_build.qcow2
diff --git a/ci/build.sh b/ci/build.sh
deleted file mode 100755
index 113f35d6..00000000
--- a/ci/build.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-##############################################################################
-# Copyright (c) 2016 Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-set -e
-rpm -q ansible || sudo yum -y install ansible
-ansible-playbook --become -i "localhost," -c local $DIR/../lib/ansible/playbooks/build_dependencies.yml -vvv
-make -C $DIR/../build clean
-python3 $DIR/../apex/build.py $@
diff --git a/ci/clean.sh b/ci/clean.sh
deleted file mode 100755
index ef810416..00000000
--- a/ci/clean.sh
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-#Clean script to uninstall provisioning server for Apex
-#author: Dan Radez (dradez@redhat.com)
-#author: Tim Rozet (trozet@redhat.com)
-
-reset=$(tput sgr0 || echo "")
-blue=$(tput setaf 4 || echo "")
-red=$(tput setaf 1 || echo "")
-green=$(tput setaf 2 || echo "")
-
-vm_index=4
-ovs_bridges="br-admin br-tenant br-external br-storage"
-ovs_bridges+=" br-private br-public" # Legacy names, remove in E river
-
-#OPNFV_NETWORK_TYPES=$(python3 -c 'from apex.common.constants import OPNFV_NETWORK_TYPES; print(" ".join(OPNFV_NETWORK_TYPES))')
-OPNFV_NETWORK_TYPES+=" admin tenant external storage api"
-OPNFV_NETWORK_TYPES+=" admin_network private_network public_network storage_network api_network" # Legecy names, remove in E river
-
-##detach interface from OVS and set the network config correctly
-##params: bridge to detach from
-##assumes only 1 real interface attached to OVS
-function detach_interface_from_ovs {
- local bridge
- local port_output ports_no_orig
- local net_path
- local if_ip if_mask if_gw if_prefix
- local if_metric if_dns1 if_dns2
-
- net_path=/etc/sysconfig/network-scripts/
- if [[ -z "$1" ]]; then
- return 1
- else
- bridge=$1
- fi
-
- # if no interfaces attached then return
- if ! ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*"; then
- return 0
- fi
-
- # look for .orig ifcfg files to use
- port_output=$(ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*")
- while read -r line; do
- if [ -z "$line" ]; then
- continue
- elif [ -e ${net_path}/ifcfg-${line}.orig ]; then
- mv -f ${net_path}/ifcfg-${line}.orig ${net_path}/ifcfg-${line}
- elif [ -e ${net_path}/ifcfg-${bridge} ]; then
- if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-
- if [ -z "$if_mask" ]; then
- if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${net_path}/ifcfg-${bridge})
- if_mask=$(prefix2mask ${if_prefix})
- fi
-
- if [[ -z "$if_ip" || -z "$if_mask" ]]; then
- echo "ERROR: IPADDR or PREFIX/NETMASK missing for ${bridge} and no .orig file for interface ${line}"
- return 1
- fi
-
- # create if cfg
- echo "DEVICE=${line}
-IPADDR=${if_ip}
-NETMASK=${if_mask}
-BOOTPROTO=static
-ONBOOT=yes
-TYPE=Ethernet
-NM_CONTROLLED=no
-PEERDNS=no" > ${net_path}/ifcfg-${line}
-
- if [ -n "$if_gw" ]; then
- echo "GATEWAY=${if_gw}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [ -n "$if_metric" ]; then
- echo "METRIC=${if_metric}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
- sed -i '/PEERDNS/c\PEERDNS=yes' ${net_path}/ifcfg-${line}
-
- if [ -n "$if_dns1" ]; then
- echo "DNS1=${if_dns1}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [ -n "$if_dns2" ]; then
- echo "DNS2=${if_dns2}" >> ${net_path}/ifcfg-${line}
- fi
- fi
- break
- else
- echo "ERROR: Real interface ${line} attached to bridge, but no interface or ${bridge} ifcfg file exists"
- return 1
- fi
-
- done <<< "$port_output"
-
- # modify the bridge ifcfg file
- # to remove IP params
- sudo sed -i 's/IPADDR=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/NETMASK=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/GATEWAY=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/DNS1=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/DNS2=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/METRIC=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/PEERDNS=.*//' ${net_path}/ifcfg-${bridge}
-
- sudo systemctl restart network
-}
-
-display_usage() {
- echo -e "Usage:\n$0 [arguments] \n"
- echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal node clean"
-}
-
-##translates the command line parameters into variables
-##params: $@ the entire command line is passed
-##usage: parse_cmd_line() "$@"
-parse_cmdline() {
- echo -e "\n\n${blue}This script is used to clean an Apex environment${reset}\n\n"
- echo "Use -h to display help"
- sleep 2
-
- while [ "${1:0:1}" = "-" ]
- do
- case "$1" in
- -h|--help)
- display_usage
- exit 0
- ;;
- -i|--inventory)
- INVENTORY_FILE=$2
- shift 2
- ;;
- *)
- display_usage
- exit 1
- ;;
- esac
- done
-
- if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
- echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
- exit 1
- fi
-}
-
-parse_cmdline "$@"
-
-if [ -n "$INVENTORY_FILE" ]; then
- echo -e "${blue}INFO: Parsing inventory file...${reset}"
- # hack for now (until we switch fully over to clean.py) to tell if
- # we should install apex from python or if rpm is being used
- if ! rpm -q python34-opnfv-apex > /dev/null; then
- pushd ../ && python3 setup.py install > /dev/null
- popd
- fi
- if ! python3 -m apex.clean -f ${INVENTORY_FILE}; then
- echo -e "${red}WARN: Unable to shutdown all nodes! Please check /var/log/apex.log${reset}"
- else
- echo -e "${blue}INFO: Node shutdown complete...${reset}"
- fi
-fi
-
-# Clean off instack/undercloud VM
-for vm in instack undercloud; do
- virsh destroy $vm 2> /dev/null | xargs echo -n
- virsh undefine --nvram $vm 2> /dev/null | xargs echo -n
- /usr/bin/touch /var/lib/libvirt/images/${vm}.qcow2
- virsh vol-delete ${vm}.qcow2 --pool default 2> /dev/null | xargs echo -n
- rm -f /var/lib/libvirt/images/${vm}.qcow2 2> /dev/null
-done
-
-# Clean off baremetal VMs in case they exist
-for i in $(seq 0 $vm_index); do
- virsh destroy baremetal$i 2> /dev/null | xargs echo -n
- virsh undefine baremetal$i 2> /dev/null | xargs echo -n
- /usr/bin/touch /var/lib/libvirt/images/baremetal${i}.qcow2
- virsh vol-delete baremetal${i}.qcow2 --pool default 2> /dev/null | xargs echo -n
- rm -f /var/lib/libvirt/images/baremetal${i}.qcow2 2> /dev/null
- if [ -e /root/.vbmc/baremetal$i ]; then vbmc delete baremetal$i; fi
-done
-
-for network in ${OPNFV_NETWORK_TYPES}; do
- virsh net-destroy ${network} 2> /dev/null
- virsh net-undefine ${network} 2> /dev/null
-done
-
-# Clean off created bridges
-for bridge in ${ovs_bridges}; do
- if detach_interface_from_ovs ${bridge} 2> /dev/null; then
- ovs-vsctl del-br ${bridge} 2> /dev/null
- rm -f /etc/sysconfig/network-scripts/ifcfg-${bridge}
- fi
-done
-
-# clean pub keys from root's auth keys
-sed -i '/stack@undercloud.localdomain/d' /root/.ssh/authorized_keys
-
-
-# force storage cleanup
-virsh pool-refresh default
-
-# remove temporary files
-rm -f /tmp/network-environment.yaml
-
-echo "Cleanup Completed"
diff --git a/ci/deploy.sh b/ci/deploy.sh
deleted file mode 100755
index 0ba0c74b..00000000
--- a/ci/deploy.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Deploy script to install provisioning server for OPNFV Apex
-# author: Dan Radez (dradez@redhat.com)
-# author: Tim Rozet (trozet@redhat.com)
-#
-
-set -e
-yum -y install python34 python34-devel libvirt-devel python34-pip python-tox ansible
-mkdir -p /home/jenkins-ci/tmp
-mv -f .build /home/jenkins-ci/tmp/
-pip3 install --upgrade --force-reinstall .
-mv -f /home/jenkins-ci/tmp/.build .
-opnfv-deploy $@
diff --git a/ci/run_smoke_tests.sh b/ci/run_smoke_tests.sh
deleted file mode 100755
index 517822ef..00000000
--- a/ci/run_smoke_tests.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-export ANSIBLE_HOST_KEY_CHECKING=False
-
-./dev_dep_check.sh
-
-yum install python-devel -y
-yum install openssl-devel -y
-easy_install pip
-pip install ansible
-
-echo 'See ~stack/smoke-tests.out on the undercloud for result log'
-ansible-playbook -i "$(get_undercloud_ip)," ../tests/smoke_tests/smoke_tests.yml
diff --git a/ci/test.sh b/ci/test.sh
deleted file mode 100755
index 72de24e8..00000000
--- a/ci/test.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-##############################################################################
-# Copyright (c) 2016 Dan Radez (Red Hat)
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-set -e
-
-# Make sure python dependencies are installed
-for pkg in yamllint rpmlint iproute epel-release python34-devel python34-nose python34-PyYAML python-pep8 python34-mock python34-pip; do
- if ! rpm -q ${pkg} > /dev/null; then
- if ! sudo yum install -y ${pkg}; then
- echo "Failed to install ${pkg} package..."
- exit 1
- fi
- fi
-done
-
-# Make sure coverage is installed
-if ! python3 -c "import coverage" &> /dev/null; then sudo pip3.4 coverage; fi
-
-pushd ../build/ > /dev/null
-make rpmlint
-make python-pep8-check
-make yamllint
-make python-tests
-popd > /dev/null
diff --git a/config/deploy/os-odl-fdio-dvr-ha.yaml b/config/deploy/os-odl-fdio-dvr-ha.yaml
index 564cf0b6..6fcbec65 100644
--- a/config/deploy/os-odl-fdio-dvr-ha.yaml
+++ b/config/deploy/os-odl-fdio-dvr-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-fdio-dvr-noha.yaml b/config/deploy/os-odl-fdio-dvr-noha.yaml
index 24c433bd..e8788d71 100644
--- a/config/deploy/os-odl-fdio-dvr-noha.yaml
+++ b/config/deploy/os-odl-fdio-dvr-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl-sfc-ha.yaml b/config/deploy/os-odl-sfc-ha.yaml
index 6cb153d3..3a87bfe3 100644
--- a/config/deploy/os-odl-sfc-ha.yaml
+++ b/config/deploy/os-odl-sfc-ha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
+ odl_version: nitrogen
tacker: true
congress: true
sfc: true
diff --git a/config/deploy/os-odl-sfc-noha.yaml b/config/deploy/os-odl-sfc-noha.yaml
index c3b3379c..2b08af6c 100644
--- a/config/deploy/os-odl-sfc-noha.yaml
+++ b/config/deploy/os-odl-sfc-noha.yaml
@@ -4,7 +4,7 @@ global_params:
deploy_options:
sdn_controller: opendaylight
- odl_version: carbon
+ odl_version: nitrogen
tacker: true
congress: true
sfc: true
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index 25aaee89..7dddf343 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -222,7 +222,7 @@ networks:
# Subnet in CIDR format
cidr: fd00:fd00:fd00:4000::/64
# VLAN tag to use for Overcloud hosts on this network
- #vlan: 13
+ # vlan: 13
# Api network MTU
mtu: 1500
# Mapping of network configuration for Overcloud Nodes
diff --git a/ci/dev_dep_check.sh b/contrib/dev_dep_check.sh
index 7a14563c..33f1319f 100755
--- a/ci/dev_dep_check.sh
+++ b/contrib/dev_dep_check.sh
@@ -32,10 +32,9 @@ if ! sudo yum update -y ipxe-roms-qemu; then
fi
# check for other packages
-for i in epel-release python34-PyYAML openvswitch libguestfs \
- libguestfs-tools-c libvirt-python python2-oslo-config \
- python2-debtcollector python34-devel libxslt-devel \
- libxml2-devel python-virtualbmc python34-jinja2 python34-pip \
+for i in epel-release openvswitch libguestfs \
+ libguestfs-tools-c libvirt-python libxslt-devel \
+ libxml2-devel ansible python34-pip \
rpm-build wget libvirt ntpdate; do
# Make sure deploy deps are installed
if ! rpm -q $i > /dev/null; then
diff --git a/contrib/simple_deploy.sh b/contrib/simple_deploy.sh
index 24b67904..8da92727 100755
--- a/contrib/simple_deploy.sh
+++ b/contrib/simple_deploy.sh
@@ -1,20 +1,13 @@
#!/bin/bash
set -e
apex_home=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../
-export BASE=$apex_home/build
-export LIB=$apex_home/lib
-export IMAGES=$apex_home/.build/
-export PYTHONPATH=$PYTHONPATH:$apex_home/lib/python
+export PYTHONPATH=$apex_home/apex:$PYTHONPATH
$apex_home/ci/dev_dep_check.sh || true
-$apex_home/ci/clean.sh
-pushd $apex_home/build
-make clean
-make undercloud
-make overcloud-opendaylight
-popd
-pushd $apex_home/ci
+pip3 install -r $apex_home/requirements.txt
+pushd $apex_home/apex
+python3 clean.py
echo "All further output will be piped to $PWD/nohup.out"
-(nohup ./deploy.sh -v -n $apex_home/config/network/network_settings.yaml -d $apex_home/config/deploy/os-odl-nofeature-noha.yaml &)
+(nohup python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-nosdn-nofeature-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build &)
[ -f nohup.out ] || sleep 3
tail -f nohup.out
popd
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 753ef818..7d8a78f9 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,6 +1,6 @@
-========================================================================
-OPNFV Release Notes for the Danube release of OPNFV Apex deployment tool
-========================================================================
+===========================================================================
+OPNFV Release Notes for the Euphrates release of OPNFV Apex deployment tool
+===========================================================================
.. contents:: Table of Contents
@@ -10,7 +10,7 @@ OPNFV Release Notes for the Danube release of OPNFV Apex deployment tool
Abstract
========
-This document provides the release notes for Danube release with the Apex
+This document provides the release notes for Euphrates release with the Apex
deployment toolchain.
License
@@ -28,6 +28,8 @@ Version History
| **Date** | **Ver.** | **Authors** | **Comment** |
| | | | |
+-------------+-----------+-----------------+----------------------+
+| 2017-08-28 | 5.0 | Tim Rozet | Updates for Euphrates|
++-------------+-----------+-----------------+----------------------+
| 2017-03-30 | 4.0 | Tim Rozet | Updates for Danube |
+-------------+-----------+-----------------+----------------------+
| 2016-09-20 | 2.1.0 | Tim Rozet | More updates for |
@@ -42,7 +44,7 @@ Version History
Important Notes
===============
-This is the OPNFV Danube release that implements the deploy stage of the
+This is the OPNFV Euphrates release that implements the deploy stage of the
OPNFV CI pipeline via Apex.
Apex is based on RDO's Triple-O installation tool chain.
@@ -54,14 +56,14 @@ deploy OPNFV using Apex installer.
Summary
=======
-Danube release with the Apex deployment toolchain will establish an OPNFV
+Euphrates release with the Apex deployment toolchain will establish an OPNFV
target system on a Pharos compliant lab infrastructure. The current definition
of an OPNFV target system is OpenStack Newton combined with an SDN
controller, such as OpenDaylight. The system is deployed with OpenStack High
Availability (HA) for most OpenStack services. SDN controllers are deployed
on every controller unless deploying with one the HA FD.IO scenarios. Ceph
storage is used as Cinder backend, and is the only supported storage for
-Danube. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
+Euphrates. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
node in an HA setup. Apex also supports non-HA deployments, which deploys a
single controller and n number of compute nodes. Furthermore, Apex is
capable of deploying scenarios in a bare metal or virtual fashion. Virtual
@@ -71,7 +73,7 @@ simulate the a bare metal deployment.
- Documentation is built by Jenkins
- .iso image is built by Jenkins
- .rpm packages are built by Jenkins
-- Jenkins deploys a Danube release with the Apex deployment toolchain
+- Jenkins deploys a Euphrates release with the Apex deployment toolchain
bare metal, which includes 3 control+network nodes, and 2 compute nodes.
Release Data
@@ -81,16 +83,16 @@ Release Data
| **Project** | apex |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | apex/danube.1.0 |
+| **Repo/tag** | apex/euphrates.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | danube.1.0 |
+| **Release designation** | 5.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2017-03-31 |
+| **Release date** | 2017-09-17 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Danube release |
+| **Purpose of the delivery** | OPNFV Euphrates release |
| | |
+--------------------------------------+--------------------------------------+
@@ -99,7 +101,7 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Danube release with the Apex
+This is the first tracked version of the Euphrates release with the Apex
deployment toolchain. It is based on following upstream versions:
- OpenStack (Newton release)
@@ -111,13 +113,13 @@ deployment toolchain. It is based on following upstream versions:
Document Version Changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of Danube release with the Apex
+This is the first tracked version of Euphrates release with the Apex
deployment toolchain.
The following documentation is provided with this release:
-- OPNFV Installation instructions for the Danube release with the Apex
+- OPNFV Installation instructions for the Euphrates release with the Apex
deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Danube release with the Apex deployment
+- OPNFV Release Notes for the Euphrates release with the Apex deployment
toolchain - ver. 1.0.0 (this document)
Feature Additions
@@ -241,17 +243,17 @@ Software Deliverables
- Apex overcloud .rpm (opnfv-apex) - For nosdn and OpenDaylight Scenarios
- Apex undercloud .rpm (opnfv-apex-undercloud)
- Apex common .rpm (opnfv-apex-common)
-- build.sh - Builds the above artifacts
+- build.py - Builds the above artifacts
- opnfv-deploy - Automatically deploys Target OPNFV System
- opnfv-clean - Automatically resets a Target OPNFV Deployment
- opnfv-util - Utility to connect to or debug Overcloud nodes + OpenDaylight
Documentation Deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Danube release with the Apex
- deployment toolchain - ver. 4.0
-- OPNFV Release Notes for the Danube release with the Apex deployment
- toolchain - ver. 4.0 (this document)
+- OPNFV Installation instructions for the Euphrates release with the Apex
+ deployment toolchain - ver. 5.0
+- OPNFV Release Notes for the Euphrates release with the Apex deployment
+ toolchain - ver. 5.0 (this document)
Known Limitations, Issues and Workarounds
=========================================
@@ -333,10 +335,10 @@ Apex installer.
References
==========
-For more information on the OPNFV Danube release, please see:
+For more information on the OPNFV Euphrates release, please see:
-http://wiki.opnfv.org/releases/Danube
+http://wiki.opnfv.org/releases/Euphrates
:Authors: Tim Rozet (trozet@redhat.com)
:Authors: Dan Radez (dradez@redhat.com)
-:Version: 4.0
+:Version: 5.0
diff --git a/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
index 9956f290..4d8870bb 100644
--- a/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
+++ b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with no SDN controller and no extra features enabled.
.. contents::
@@ -42,6 +42,6 @@ None
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
index 6c5c0535..33a855d1 100644
--- a/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
+++ b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with no SDN controller and no extra features enabled.
.. contents::
@@ -39,6 +39,6 @@ None
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst b/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst
index 8f941286..1319f427 100644
--- a/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst
+++ b/docs/release/scenarios/os-nosdn-performance-ha/os-nosdn-performance-ha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with no SDN controller and performance options enabled.
.. contents::
@@ -53,6 +53,6 @@ Limitations, Issues and Workarounds
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-odl_l3-nofeature-ha/index.rst b/docs/release/scenarios/os-odl-csit-noha/index.rst
index 7d02fff4..51cf903f 100644
--- a/docs/release/scenarios/os-odl_l3-nofeature-ha/index.rst
+++ b/docs/release/scenarios/os-odl-csit-noha/index.rst
@@ -1,15 +1,15 @@
-.. _os-odl_l3-nofeature-ha:
+.. _os-odl-csit-noha:
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-===============================================
-os-odl_l3-nofeature-ha overview and description
-===============================================
+=========================================
+os-odl-csit-noha overview and description
+=========================================
.. toctree::
:numbered:
:maxdepth: 4
- os-odl_l3-nofeature-ha.rst
+ os-odl-csit-noha.rst
diff --git a/docs/release/scenarios/os-odl_l3-csit-noha/os-odl_l3-csit-noha.rst b/docs/release/scenarios/os-odl-csit-noha/os-odl-csit-noha.rst
index 7511a558..331d3b11 100644
--- a/docs/release/scenarios/os-odl_l3-csit-noha/os-odl_l3-csit-noha.rst
+++ b/docs/release/scenarios/os-odl-csit-noha/os-odl-csit-noha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with the OpenDaylight SDN controller and only CSIT relevant
features enabled.
@@ -33,7 +33,7 @@ CPU/Memory requirements.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-odl_l3-csit-noha.yaml deploy
+Simply deploy this scenario by using the os-odl-csit-noha.yaml deploy
settings file.
Limitations, Issues and Workarounds
@@ -53,6 +53,6 @@ Limitations, Issues and Workarounds
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-odl_l3-csit-noha/index.rst b/docs/release/scenarios/os-odl-nofeature-ha/index.rst
index 29483b0f..4c6a443f 100644
--- a/docs/release/scenarios/os-odl_l3-csit-noha/index.rst
+++ b/docs/release/scenarios/os-odl-nofeature-ha/index.rst
@@ -1,15 +1,15 @@
-.. _os-odl_l3-csit-noha:
+.. _os-odl-nofeature-ha:
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
============================================
-os-odl_l3-csit-noha overview and description
+os-odl-nofeature-ha overview and description
============================================
.. toctree::
:numbered:
:maxdepth: 4
- os-odl_l3-csit-noha.rst
+ os-odl-nofeature-ha.rst
diff --git a/docs/release/scenarios/os-odl_l3-nofeature-ha/os-odl_l3-nofeature-ha.rst b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
index 69df973c..1036f774 100644
--- a/docs/release/scenarios/os-odl_l3-nofeature-ha/os-odl_l3-nofeature-ha.rst
+++ b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with the OpenDaylight SDN controller and no extra features enabled.
.. contents::
@@ -30,13 +30,13 @@ per service.
OpenDaylight is also enabled in HA, and forms a cluster. Neutron
communicates with a Virtual IP Address for OpenDaylight which is load
-balanced acrosss the OpenDaylight cluster. Every Open vSwitch node is
+balanced across the OpenDaylight cluster. Every Open vSwitch node is
connected to every OpenDaylight for High Availability.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-odl_l3-nofeature-ha.yaml deploy
+Simply deploy this scenario by using the os-odl-nofeature-ha.yaml deploy
settings file.
Limitations, Issues and Workarounds
@@ -56,6 +56,6 @@ Limitations, Issues and Workarounds
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-odl_l3-nofeature-noha/index.rst b/docs/release/scenarios/os-odl-nofeature-noha/index.rst
index 182174e3..c3576b65 100644
--- a/docs/release/scenarios/os-odl_l3-nofeature-noha/index.rst
+++ b/docs/release/scenarios/os-odl-nofeature-noha/index.rst
@@ -1,15 +1,15 @@
-.. _os-odl_l3-nofeature-noha:
+.. _os-odl-nofeature-noha:
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-=================================================
-os-odl_l3-nofeature-noha overview and description
-=================================================
+==============================================
+os-odl-nofeature-noha overview and description
+==============================================
.. toctree::
:numbered:
:maxdepth: 4
- os-odl_l3-nofeature-noha.rst
+ os-odl-nofeature-noha.rst
diff --git a/docs/release/scenarios/os-odl_l3-nofeature-noha/os-odl_l3-nofeature-noha.rst b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
index 7b4ebbee..490cfbd9 100644
--- a/docs/release/scenarios/os-odl_l3-nofeature-noha/os-odl_l3-nofeature-noha.rst
+++ b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with the OpenDaylight SDN controller and no extra features enabled.
.. contents::
@@ -30,7 +30,7 @@ the OpenDaylight service on it.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-odl_l3-nofeature-noha.yaml deploy
+Simply deploy this scenario by using the os-odl-nofeature-noha.yaml deploy
settings file.
Limitations, Issues and Workarounds
@@ -50,6 +50,6 @@ Limitations, Issues and Workarounds
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst b/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
index 24de23bb..c0f96ab7 100644
--- a/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
+++ b/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
@@ -2,7 +2,7 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) <optionally add copywriters name>
-This document provides scenario level details for Danube 1.0 of
+This document provides scenario level details for Euphrates 1.0 of
deployment with the OVN SDN controller and no extra features enabled.
.. contents::
@@ -39,6 +39,6 @@ Limitations, Issues and Workarounds
References
==========
-For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+For more information on the OPNFV Euphrates release, please visit
+http://www.opnfv.org/euphrates
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
index bd06c0fa..c0e1cd35 100644
--- a/lib/ansible/playbooks/configure_undercloud.yml
+++ b/lib/ansible/playbooks/configure_undercloud.yml
@@ -45,12 +45,24 @@
- lineinfile:
path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
regexp: '_link_ip_address_pxe_configs'
- line: '_link_mac_pxe_configs(task)'
+ line: ' _link_mac_pxe_configs(task)'
when: aarch64
- - name: undercloud install
- shell: openstack undercloud install &> apex-undercloud-install.log
- become: yes
- become_user: stack
+ - block:
+ - name: undercloud install
+ shell: openstack undercloud install &> apex-undercloud-install.log
+ become: yes
+ become_user: stack
+ rescue:
+ - name: undercloud install retry
+ shell: openstack undercloud install >> apex-undercloud-install.log 2>&1
+ become: yes
+ become_user: stack
+ always:
+ - name: fetch undercloud log
+ fetch:
+ src: /home/stack/apex-undercloud-install.log
+ dest: "{{ apex_temp_dir }}/"
+ flat: yes
- name: openstack-configs nova
shell: openstack-config --set /etc/nova/nova.conf DEFAULT {{ item }}
become: yes
diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml
index 6db94ab4..8a575216 100644
--- a/lib/ansible/playbooks/deploy_dependencies.yml
+++ b/lib/ansible/playbooks/deploy_dependencies.yml
@@ -1,6 +1,12 @@
---
- hosts: localhost
tasks:
+ - yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - python-lxml
+ - libvirt-python
- sysctl:
name: net.ipv4.ip_forward
state: present
@@ -30,21 +36,24 @@
state: present
with_items: '{{ virsh_enabled_networks }}'
- virt_net:
- command: define
+ state: present
name: '{{ item }}'
xml: '{{ lookup("template", "virsh_network_ovs.xml.j2") }}'
- autostart: yes
with_items: '{{ virsh_enabled_networks }}'
- virt_net:
- command: create
+ state: active
name: '{{ item }}'
+ autostart: yes
with_items: '{{ virsh_enabled_networks }}'
- virt_pool:
name: default
- command: define
autostart: yes
- state: active
+ state: present
xml: '{{ lookup("template", "virsh_pool.xml.j2") }}'
+ - virt_pool:
+ name: default
+ autostart: yes
+ state: active
- lineinfile:
path: /etc/modprobe.d/kvm_intel.conf
line: 'options kvm-intel nested=1'
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
index 3313bc87..19e46380 100644
--- a/lib/ansible/playbooks/deploy_overcloud.yml
+++ b/lib/ansible/playbooks/deploy_overcloud.yml
@@ -32,11 +32,13 @@
become: yes
become_user: stack
- name: Import inventory (baremetal)
- shell: "{{ stackrc }} && {{ item }}"
- with_items:
- - openstack overcloud node import instackenv.json
- - openstack overcloud node introspect --all-manageable --provide
+ shell: "{{ stackrc }} && openstack overcloud node import instackenv.json"
when: not virtual
+ - name: Introspect inventory (baremetal)
+ shell: "{{ stackrc }} && openstack overcloud node introspect --all-manageable --provide"
+ when:
+ - not virtual
+ - not aarch64
- name: Import inventory (virtual)
shell: "{{ stackrc }} && openstack overcloud node import --provide instackenv.json"
when: virtual
@@ -48,8 +50,12 @@
- compute
- name: Configure DNS server for ctlplane network
shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
- - name: Execute Overcloud Deployment
- shell: "{{ stackrc }} && bash deploy_command"
+ - block:
+ - name: Execute Overcloud Deployment
+ shell: "{{ stackrc }} && bash deploy_command"
+ rescue:
+ - name: Show overcloud failures
+ shell: "{{ stackrc }} && openstack stack failures list overcloud --long"
- name: Show Keystone output
shell: "{{ overcloudrc }} && {{ item }}"
when: debug
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
index ba0746b2..d6b8805a 100644
--- a/lib/ansible/playbooks/post_deploy_undercloud.yml
+++ b/lib/ansible/playbooks/post_deploy_undercloud.yml
@@ -115,4 +115,3 @@
become_user: stack
when: congress
with_items: "{{ congress_datasources }}"
- ignore_errors: yes
diff --git a/setup.cfg b/setup.cfg
index ee3105af..9c181f5e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -23,6 +23,7 @@ setup-hooks =
[entry_points]
console_scripts =
opnfv-deploy = apex.deploy:main
+ opnfv-clean = apex.clean:main
[files]
packages =