summaryrefslogtreecommitdiffstats
path: root/apex
diff options
context:
space:
mode:
Diffstat (limited to 'apex')
-rw-r--r--apex/__init__.py15
-rw-r--r--apex/build.py238
-rw-r--r--apex/build/__init__.py0
-rw-r--r--apex/build/build_utils.py108
-rw-r--r--apex/clean.py65
-rw-r--r--apex/common/__init__.py0
-rw-r--r--apex/common/constants.py46
-rw-r--r--apex/common/exceptions.py12
-rw-r--r--apex/common/parsers.py73
-rw-r--r--apex/common/utils.py107
-rw-r--r--apex/deploy.py441
-rw-r--r--apex/inventory/__init__.py0
-rw-r--r--apex/inventory/inventory.py89
-rw-r--r--apex/network/__init__.py0
-rw-r--r--apex/network/ip_utils.py230
-rw-r--r--apex/network/jumphost.py172
-rw-r--r--apex/network/network_environment.py218
-rw-r--r--apex/overcloud/__init__.py0
-rw-r--r--apex/overcloud/config.py76
-rw-r--r--apex/overcloud/overcloud_deploy.py556
-rw-r--r--apex/settings/__init__.py0
-rw-r--r--apex/settings/deploy_settings.py188
-rw-r--r--apex/settings/network_settings.py327
-rw-r--r--apex/tests/__init__.py0
-rw-r--r--apex/tests/config/inventory.yaml57
-rw-r--r--apex/tests/constants.py12
-rw-r--r--apex/tests/playbooks/test_playbook.yaml5
-rwxr-xr-xapex/tests/smoke_tests/execute_smoke_tests.sh3
-rw-r--r--apex/tests/smoke_tests/execute_tests.yml11
-rw-r--r--apex/tests/smoke_tests/prepare_undercloud.yml9
-rw-r--r--apex/tests/smoke_tests/smoke_tests.yml3
-rw-r--r--apex/tests/test_apex_clean.py41
-rw-r--r--apex/tests/test_apex_common_utils.py59
-rw-r--r--apex/tests/test_apex_deploy_settings.py101
-rw-r--r--apex/tests/test_apex_inventory.py69
-rw-r--r--apex/tests/test_apex_ip_utils.py132
-rw-r--r--apex/tests/test_apex_network_environment.py169
-rw-r--r--apex/tests/test_apex_network_settings.py156
-rw-r--r--apex/undercloud/__init__.py0
-rw-r--r--apex/undercloud/undercloud.py206
-rw-r--r--apex/virtual/__init__.py0
-rwxr-xr-xapex/virtual/configure_vm.py206
-rw-r--r--apex/virtual/virtual_utils.py140
43 files changed, 4340 insertions, 0 deletions
diff --git a/apex/__init__.py b/apex/__init__.py
new file mode 100644
index 00000000..4db820d9
--- /dev/null
+++ b/apex/__init__.py
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+from apex.network.network_environment import NetworkEnvironment
+from apex.settings.deploy_settings import DeploySettings
+from apex.settings.network_settings import NetworkSettings
+from .clean import clean_nodes
+from .inventory.inventory import Inventory
diff --git a/apex/build.py b/apex/build.py
new file mode 100644
index 00000000..cda4e061
--- /dev/null
+++ b/apex/build.py
@@ -0,0 +1,238 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import uuid
+import yaml
+
+CACHE_JOURNAL = 'cache_journal.yaml'
+TMP_CACHE = '.cache'
+BUILD_ROOT = 'build'
+BUILD_LOG_FILE = './apex_build.log'
+
+
+class ApexBuildException(Exception):
+ pass
+
+
+def create_build_parser():
+ build_parser = argparse.ArgumentParser()
+ build_parser.add_argument('--debug', action='store_true', default=False,
+ help="Turn on debug messages")
+ build_parser.add_argument('-l', '--log-file',
+ default=BUILD_LOG_FILE,
+ dest='log_file', help="Log file to log to")
+ build_parser.add_argument('-c', '--cache-dir',
+ dest='cache_dir',
+ default=None,
+ help='Directory to store cache')
+ build_parser.add_argument('--iso', action='store_true',
+ default=False,
+ help='Build ISO image')
+ build_parser.add_argument('--rpms', action='store_true',
+ default=False,
+ help='Build RPMs')
+ build_parser.add_argument('-r', '--release',
+ dest='build_version',
+ help='Version to apply to build '
+ 'artifact label')
+
+ return build_parser
+
+
+def get_journal(cache_dir):
+ """
+ Search for the journal file and returns its contents
+ :param cache_dir: cache storage directory where journal file is
+ :return: content of journal file
+ """
+ journal_file = "{}/{}".format(cache_dir, CACHE_JOURNAL)
+ if os.path.isfile(journal_file) is False:
+ logging.info("Journal file not found {}, skipping cache search".format(
+ journal_file))
+ else:
+ with open(journal_file, 'r') as fh:
+ cache_journal = yaml.safe_load(fh)
+ assert isinstance(cache_journal, list)
+ return cache_journal
+
+
+def get_cache_file(cache_dir):
+ """
+ Searches for a valid cache entry in the cache journal
+ :param cache_dir: directory where cache and journal are located
+ :return: name of valid cache file
+ """
+ cache_journal = get_journal(cache_dir)
+ if cache_journal is not None:
+ valid_cache = cache_journal[-1]
+ if os.path.isfile(valid_cache):
+ return valid_cache
+
+
+def unpack_cache(cache_dest, cache_dir=None):
+ if cache_dir is None:
+ logging.info("Cache directory not provided, skipping cache unpack")
+ return
+ elif os.path.isdir(cache_dir) is False:
+ logging.info("Cache Directory does not exist, skipping cache unpack")
+ return
+ else:
+ logging.info("Cache Directory Found: {}".format(cache_dir))
+ cache_file = get_cache_file(cache_dir)
+ if cache_file is None:
+ logging.info("No cache file detected, skipping cache unpack")
+ return
+ logging.info("Unpacking Cache {}".format(cache_file))
+ if not os.path.exists(cache_dest):
+ os.makedirs(cache_dest)
+ try:
+ subprocess.check_call(["tar", "xvf", cache_file, "-C", cache_dest])
+ except subprocess.CalledProcessError:
+ logging.warning("Cache unpack failed")
+ return
+ logging.info("Cache unpacked, contents are: {}",
+ os.listdir(cache_dest))
+
+
+def build(build_root, version, iso=False, rpms=False):
+ if iso:
+ make_targets = ['iso']
+ elif rpms:
+ make_targets = ['rpms']
+ else:
+ make_targets = ['images', 'rpms-check']
+ if version is not None:
+ make_args = ['RELEASE={}'.format(version)]
+ else:
+ make_args = []
+ logging.info('Building targets: {}'.format(make_targets))
+ try:
+ output = subprocess.check_output(["make"] + make_args + ["-C",
+ build_root] + make_targets)
+ logging.info(output)
+ except subprocess.CalledProcessError as e:
+ logging.error("Failed to build Apex artifacts")
+ logging.error(e.output)
+ raise e
+
+
+def build_cache(cache_source, cache_dir):
+ """
+ Tar up new cache with unique name and store it in cache storage
+ directory. Also update journal file with new cache entry.
+ :param cache_source: source files to tar up when building cache file
+ :param cache_dir: cache storage location
+ :return: None
+ """
+ if cache_dir is None:
+ logging.info("No cache dir specified, will not build cache")
+ return
+ cache_name = 'apex-cache-{}.tgz'.format(str(uuid.uuid4()))
+ cache_full_path = os.path.join(cache_dir, cache_name)
+ os.makedirs(cache_dir, exist_ok=True)
+ try:
+ subprocess.check_call(['tar', '--atime-preserve', '--dereference',
+ '-caf', cache_full_path, '-C', cache_source,
+ '.'])
+ except BaseException as e:
+ logging.error("Unable to build new cache tarball")
+ if os.path.isfile(cache_full_path):
+ os.remove(cache_full_path)
+ raise e
+ if os.path.isfile(cache_full_path):
+ logging.info("Cache Build Complete")
+ # update journal
+ cache_entries = get_journal(cache_dir)
+ if cache_entries is None:
+ cache_entries = [cache_name]
+ else:
+ cache_entries.append(cache_name)
+ journal_file = os.path.join(cache_dir, CACHE_JOURNAL)
+ with open(journal_file, 'w') as fh:
+ yaml.safe_dump(cache_entries, fh, default_flow_style=False)
+ logging.info("Journal updated with new entry: {}".format(cache_name))
+ else:
+ logging.warning("Cache file did not build correctly")
+
+
+def prune_cache(cache_dir):
+ """
+ Remove older cache entries if there are more than 2
+ :param cache_dir: Cache storage directory
+ :return: None
+ """
+ if cache_dir is None:
+ return
+ cache_modified_flag = False
+ cache_entries = get_journal(cache_dir)
+ while len(cache_entries) > 2:
+ logging.debug("Will remove older cache entries")
+ cache_to_rm = cache_entries[0]
+ cache_full_path = os.path.join(cache_dir, cache_to_rm)
+ if os.path.isfile(cache_full_path):
+ try:
+ os.remove(cache_full_path)
+ cache_entries.pop(0)
+ cache_modified_flag = True
+ except os.EX_OSERR:
+ logging.warning("Failed to remove cache file: {}".format(
+ cache_full_path))
+ break
+
+ else:
+ logging.debug("No more cache cleanup necessary")
+
+ if cache_modified_flag:
+ logging.debug("Updating cache journal")
+ journal_file = os.path.join(cache_dir, CACHE_JOURNAL)
+ with open(journal_file, 'w') as fh:
+ yaml.safe_dump(cache_entries, fh, default_flow_style=False)
+
+if __name__ == '__main__':
+ parser = create_build_parser()
+ args = parser.parse_args(sys.argv[1:])
+ if args.debug:
+ log_level = logging.DEBUG
+ else:
+ log_level = logging.INFO
+ os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
+ formatter = '%(asctime)s %(levelname)s: %(message)s'
+ logging.basicConfig(filename=args.log_file,
+ format=formatter,
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=log_level)
+ console = logging.StreamHandler()
+ console.setLevel(log_level)
+ console.setFormatter(logging.Formatter(formatter))
+ logging.getLogger('').addHandler(console)
+ apex_root = os.path.split(os.getcwd())[0]
+ if 'apex/apex' in apex_root:
+ apex_root = os.path.split(apex_root)[0]
+ for root, dirs, files in os.walk(apex_root):
+ if BUILD_ROOT in dirs and 'apex/apex' not in root:
+ apex_root = root
+ break
+ apex_build_root = os.path.join(apex_root, BUILD_ROOT)
+ if os.path.isdir(apex_build_root):
+ cache_tmp_dir = os.path.join(apex_root, TMP_CACHE)
+ else:
+ logging.error("You must execute this script inside of the Apex "
+ "local code repository")
+ raise ApexBuildException("Invalid path for apex root: {}. Must be "
+ "invoked from within Apex code directory.".
+ format(apex_root))
+ unpack_cache(cache_tmp_dir, args.cache_dir)
+ build(apex_build_root, args.build_version, args.iso, args.rpms)
+ build_cache(cache_tmp_dir, args.cache_dir)
+ prune_cache(args.cache_dir)
diff --git a/apex/build/__init__.py b/apex/build/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/build/__init__.py
diff --git a/apex/build/build_utils.py b/apex/build/build_utils.py
new file mode 100644
index 00000000..14327a90
--- /dev/null
+++ b/apex/build/build_utils.py
@@ -0,0 +1,108 @@
+##############################################################################
+# Copyright (c) 2017 Feng Pan (fpan@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import git
+import logging
+import os
+from pygerrit2.rest import GerritRestAPI
+import re
+import shutil
+import sys
+
+
+def clone_fork(args):
+ ref = None
+ logging.info("Cloning {}".format(args.repo))
+
+ try:
+ cm = git.Repo(search_parent_directories=True).commit().message
+ except git.exc.InvalidGitRepositoryError:
+ logging.debug('Current Apex directory is not a git repo: {}'
+ .format(os.getcwd()))
+ cm = ''
+
+ logging.info("Current commit message: {}".format(cm))
+ m = re.search('{}:\s*(\S+)'.format(args.repo), cm)
+
+ if m:
+ change_id = m.group(1)
+ logging.info("Using change ID {} from {}".format(change_id, args.repo))
+ rest = GerritRestAPI(url=args.url)
+ change_str = "changes/{}?o=CURRENT_REVISION".format(change_id)
+ change = rest.get(change_str)
+ try:
+ assert change['status'] not in 'ABANDONED' 'CLOSED',\
+ 'Change {} is in {} state'.format(change_id, change['status'])
+ if change['status'] == 'MERGED':
+ logging.info('Change {} is merged, ignoring...'
+ .format(change_id))
+ else:
+ current_revision = change['current_revision']
+ ref = change['revisions'][current_revision]['ref']
+ logging.info('setting ref to {}'.format(ref))
+ except KeyError:
+ logging.error('Failed to get valid change data structure from url '
+ '{}/{}, data returned: \n{}'
+ .format(change_id, change_str, change))
+ raise
+
+ # remove existing file or directory named repo
+ if os.path.exists(args.repo):
+ if os.path.isdir(args.repo):
+ shutil.rmtree(args.repo)
+ else:
+ os.remove(args.repo)
+
+ ws = git.Repo.clone_from("{}/{}".format(args.url, args.repo),
+ args.repo, b=args.branch)
+ if ref:
+ git_cmd = ws.git
+ git_cmd.fetch("{}/{}".format(args.url, args.repo), ref)
+ git_cmd.checkout('FETCH_HEAD')
+ logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
+
+
+def get_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--debug', action='store_true', default=False,
+ help="Turn on debug messages")
+ subparsers = parser.add_subparsers()
+ fork = subparsers.add_parser('clone-fork',
+ help='Clone fork of dependent repo')
+ fork.add_argument('-r', '--repo', required=True, help='Name of repository')
+ fork.add_argument('-u', '--url',
+ default='https://gerrit.opnfv.org/gerrit',
+ help='Gerrit URL of repository')
+ fork.add_argument('-b', '--branch',
+ default='master',
+ help='Branch to checkout')
+ fork.set_defaults(func=clone_fork)
+ return parser
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args(sys.argv[1:])
+ if args.debug:
+ logging_level = logging.DEBUG
+ else:
+ logging_level = logging.INFO
+
+ logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging_level)
+ if hasattr(args, 'func'):
+ args.func(args)
+ else:
+ parser.print_help()
+ exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/apex/clean.py b/apex/clean.py
new file mode 100644
index 00000000..af9e8ce0
--- /dev/null
+++ b/apex/clean.py
@@ -0,0 +1,65 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Clean will eventually be migrated to this file
+
+import argparse
+import logging
+import os
+import pyipmi
+import pyipmi.interfaces
+import sys
+
+from .common import utils
+
+
+def clean_nodes(inventory):
+ inv_dict = utils.parse_yaml(inventory)
+ if inv_dict is None or 'nodes' not in inv_dict:
+ logging.error("Inventory file is empty or missing nodes definition")
+ sys.exit(1)
+ for node, node_info in inv_dict['nodes'].items():
+ logging.info("Cleaning node: {}".format(node))
+ try:
+ interface = pyipmi.interfaces.create_interface(
+ 'ipmitool', interface_type='lanplus')
+ connection = pyipmi.create_connection(interface)
+ connection.session.set_session_type_rmcp(node_info['ipmi_ip'])
+ connection.target = pyipmi.Target(0x20)
+ connection.session.set_auth_type_user(node_info['ipmi_user'],
+ node_info['ipmi_pass'])
+ connection.session.establish()
+ connection.chassis_control_power_down()
+ except Exception as e:
+ logging.error("Failure while shutting down node {}".format(e))
+ sys.exit(1)
+
+
+def main():
+ clean_parser = argparse.ArgumentParser()
+ clean_parser.add_argument('-f',
+ dest='inv_file',
+ required=True,
+ help='File which contains inventory')
+ args = clean_parser.parse_args(sys.argv[1:])
+ os.makedirs(os.path.dirname('./apex_clean.log'), exist_ok=True)
+ formatter = '%(asctime)s %(levelname)s: %(message)s'
+ logging.basicConfig(filename='./apex_clean.log',
+ format=formatter,
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging.DEBUG)
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ console.setFormatter(logging.Formatter(formatter))
+ logging.getLogger('').addHandler(console)
+ clean_nodes(args.inv_file)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/apex/common/__init__.py b/apex/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/common/__init__.py
diff --git a/apex/common/constants.py b/apex/common/constants.py
new file mode 100644
index 00000000..0df71526
--- /dev/null
+++ b/apex/common/constants.py
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+
+ADMIN_NETWORK = 'admin'
+TENANT_NETWORK = 'tenant'
+EXTERNAL_NETWORK = 'external'
+STORAGE_NETWORK = 'storage'
+API_NETWORK = 'api'
+CONTROLLER = 'controller'
+COMPUTE = 'compute'
+
+OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
+ STORAGE_NETWORK, API_NETWORK]
+DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
+NTP_SERVER = ["pool.ntp.org"]
+COMPUTE = 'compute'
+CONTROLLER = 'controller'
+ROLES = [COMPUTE, CONTROLLER]
+DOMAIN_NAME = 'localdomain.com'
+COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
+CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
+PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
+ "extraconfig/pre_deploy/"
+DEFAULT_ROOT_DEV = 'sda'
+LIBVIRT_VOLUME_PATH = '/var/lib/libvirt/images'
+
+VIRT_UPLOAD = '--upload'
+VIRT_INSTALL = '-install'
+VIRT_RUN_CMD = '--run-command'
+VIRT_PW = '--root-password'
+
+THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
+THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
+
+DEFAULT_ODL_VERSION = 'carbon'
+DEBUG_OVERCLOUD_PW = 'opnfvapex'
+NET_ENV_FILE = 'network-environment.yaml'
+DEPLOY_TIMEOUT = 90
diff --git a/apex/common/exceptions.py b/apex/common/exceptions.py
new file mode 100644
index 00000000..c660213f
--- /dev/null
+++ b/apex/common/exceptions.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexDeployException(Exception):
+ pass
diff --git a/apex/common/parsers.py b/apex/common/parsers.py
new file mode 100644
index 00000000..8744c862
--- /dev/null
+++ b/apex/common/parsers.py
@@ -0,0 +1,73 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import json
+import logging
+import pprint
+import os
+import re
+
+from apex.common.exceptions import ApexDeployException
+
+"""Parser functions for overcloud/openstack output"""
+
+
+def parse_nova_output(in_file):
+ """
+ Parses nova list output into a dictionary format for node name and ip
+ :param in_file: json format from openstack server list
+ :return: dictionary format for {"node name": "node ip"}
+ """
+ if not os.path.isfile(in_file):
+ raise FileNotFoundError(in_file)
+ node_dict = dict()
+ with open(in_file, 'r') as fh:
+ nova_list = json.load(fh)
+
+ for server in nova_list:
+ ip_match = re.search('([0-9]+\.){3}[0-9]+', server['Networks'])
+ if ip_match is None:
+ logging.error("Unable to find IP in nova output "
+ "{}".format(pprint.pformat(server, indent=4)))
+ raise ApexDeployException("Unable to parse IP from nova output")
+ else:
+ node_dict[server['Name']] = ip_match.group(0)
+
+ if not node_dict:
+ raise ApexDeployException("No overcloud nodes found in: {}".format(
+ in_file))
+ return node_dict
+
+
+def parse_overcloudrc(in_file):
+ """
+ Parses overcloudrc into a dictionary format for key and value
+ :param in_file:
+ :return: dictionary format for {"variable": "value"}
+ """
+ logging.debug("Parsing overcloudrc file {}".format(in_file))
+ if not os.path.isfile(in_file):
+ raise FileNotFoundError(in_file)
+ creds = {}
+ with open(in_file, 'r') as fh:
+ lines = fh.readlines()
+ kv_pattern = re.compile('^export\s+([^\s]+)=([^\s]+)$')
+ for line in lines:
+ if 'export' not in line:
+ continue
+ else:
+ res = re.search(kv_pattern, line.strip())
+ if res:
+ creds[res.group(1)] = res.group(2)
+ logging.debug("os cred found: {}, {}".format(res.group(1),
+ res.group(2)))
+ else:
+ logging.debug("os cred not found in: {}".format(line))
+
+ return creds
diff --git a/apex/common/utils.py b/apex/common/utils.py
new file mode 100644
index 00000000..848f2644
--- /dev/null
+++ b/apex/common/utils.py
@@ -0,0 +1,107 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import json
+import logging
+import os
+import pprint
+import subprocess
+import yaml
+
+
+def str2bool(var):
+ if isinstance(var, bool):
+ return var
+ else:
+ return var.lower() in ("true", "yes")
+
+
+def parse_yaml(yaml_file):
+ with open(yaml_file) as f:
+ parsed_dict = yaml.safe_load(f)
+ return parsed_dict
+
+
+def dump_yaml(data, file):
+ """
+ Dumps data to a file as yaml
+ :param data: yaml to be written to file
+ :param file: filename to write to
+ :return:
+ """
+ logging.debug("Writing file {} with "
+ "yaml data:\n{}".format(file, yaml.safe_dump(data)))
+ with open(file, "w") as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+
+
+def dict_objects_to_str(dictionary):
+ if isinstance(dictionary, list):
+ tmp_list = []
+ for element in dictionary:
+ if isinstance(element, dict):
+ tmp_list.append(dict_objects_to_str(element))
+ else:
+ tmp_list.append(str(element))
+ return tmp_list
+ elif not isinstance(dictionary, dict):
+ if not isinstance(dictionary, bool):
+ return str(dictionary)
+ else:
+ return dictionary
+ return dict((k, dict_objects_to_str(v)) for
+ k, v in dictionary.items())
+
+
+def run_ansible(ansible_vars, playbook, host='localhost', user='root',
+ tmp_dir=None, dry_run=False):
+ """
+ Executes ansible playbook and checks for errors
+ :param ansible_vars: dictionary of variables to inject into ansible run
+ :param playbook: playbook to execute
+ :param tmp_dir: temp directory to store ansible command
+ :param dry_run: Do not actually apply changes
+ :return: None
+ """
+ logging.info("Executing ansible playbook: {}".format(playbook))
+ inv_host = "{},".format(host)
+ if host == 'localhost':
+ conn_type = 'local'
+ else:
+ conn_type = 'smart'
+ ansible_command = ['ansible-playbook', '--become', '-i', inv_host,
+ '-u', user, '-c', conn_type, playbook, '-vvv']
+ if dry_run:
+ ansible_command.append('--check')
+
+ if isinstance(ansible_vars, dict) and ansible_vars:
+ logging.debug("Ansible variables to be set:\n{}".format(
+ pprint.pformat(ansible_vars)))
+ ansible_command.append('--extra-vars')
+ ansible_command.append(json.dumps(ansible_vars))
+ if tmp_dir:
+ ansible_tmp = os.path.join(tmp_dir,
+ os.path.basename(playbook) + '.rerun')
+ # FIXME(trozet): extra vars are printed without single quotes
+ # so a dev has to add them manually to the command to rerun
+ # the playbook. Need to test if we can just add the single quotes
+ # to the json dumps to the ansible command and see if that works
+ with open(ansible_tmp, 'w') as fh:
+ fh.write("ANSIBLE_HOST_KEY_CHECKING=FALSE {}".format(
+ ' '.join(ansible_command)))
+ try:
+ my_env = os.environ.copy()
+ my_env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ logging.info("Executing playbook...this may take some time")
+ logging.debug(subprocess.check_output(ansible_command, env=my_env,
+ stderr=subprocess.STDOUT).decode('utf-8'))
+ except subprocess.CalledProcessError as e:
+ logging.error("Error executing ansible: {}".format(
+ pprint.pformat(e.output.decode('utf-8'))))
+ raise
diff --git a/apex/deploy.py b/apex/deploy.py
new file mode 100644
index 00000000..76708e96
--- /dev/null
+++ b/apex/deploy.py
@@ -0,0 +1,441 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import json
+import logging
+import os
+import pprint
+import shutil
+import sys
+import tempfile
+
+import apex.virtual.configure_vm as vm_lib
+import apex.virtual.virtual_utils as virt_utils
+from apex import DeploySettings
+from apex import Inventory
+from apex import NetworkEnvironment
+from apex import NetworkSettings
+from apex.common import utils
+from apex.common import constants
+from apex.common import parsers
+from apex.common.exceptions import ApexDeployException
+from apex.network import jumphost
+from apex.undercloud import undercloud as uc_lib
+from apex.overcloud import config as oc_cfg
+from apex.overcloud import overcloud_deploy
+
+APEX_TEMP_DIR = tempfile.mkdtemp()
+ANSIBLE_PATH = 'ansible/playbooks'
+SDN_IMAGE = 'overcloud-full-opendaylight.qcow2'
+
+
+def deploy_quickstart(args, deploy_settings_file, network_settings_file,
+ inventory_file=None):
+ pass
+
+
+def validate_cross_settings(deploy_settings, net_settings, inventory):
+ """
+ Used to validate compatibility across settings file.
+ :param deploy_settings: parsed settings for deployment
+ :param net_settings: parsed settings for network
+ :param inventory: parsed inventory file
+ :return: None
+ """
+
+ if deploy_settings['deploy_options']['dataplane'] != 'ovs' and 'tenant' \
+ not in net_settings.enabled_network_list:
+ raise ApexDeployException("Setting a DPDK based dataplane requires"
+ "a dedicated NIC for tenant network")
+
+ # TODO(trozet): add more checks here like RAM for ODL, etc
+ # check if odl_vpp_netvirt is true and vpp is set
+ # Check if fdio and nosdn:
+ # tenant_nic_mapping_controller_members" ==
+ # "$tenant_nic_mapping_compute_members
+
+
+def build_vms(inventory, network_settings):
+ """
+ Creates VMs and configures vbmc and host
+ :param inventory:
+ :param network_settings:
+ :return:
+ """
+
+ for idx, node in enumerate(inventory['nodes']):
+ name = 'baremetal{}'.format(idx)
+ volume = name + ".qcow2"
+ volume_path = os.path.join(constants.LIBVIRT_VOLUME_PATH, volume)
+ # TODO(trozet): add back aarch64
+ # TODO(trozet): add error checking
+ vm_lib.create_vm(
+ name, volume_path,
+ baremetal_interfaces=network_settings.enabled_network_list,
+ memory=node['memory'], cpus=node['cpu'],
+ macs=[node['mac_address']])
+ virt_utils.host_setup({name: node['pm_port']})
+
+
+def create_deploy_parser():
+ deploy_parser = argparse.ArgumentParser()
+ deploy_parser.add_argument('--debug', action='store_true', default=False,
+ help="Turn on debug messages")
+ deploy_parser.add_argument('-l', '--log-file',
+ default='./apex_deploy.log',
+ dest='log_file', help="Log file to log to")
+ deploy_parser.add_argument('-d', '--deploy-settings',
+ dest='deploy_settings_file',
+ required=True,
+ help='File which contains Apex deploy settings')
+ deploy_parser.add_argument('-n', '--network-settings',
+ dest='network_settings_file',
+ required=True,
+ help='File which contains Apex network '
+ 'settings')
+ deploy_parser.add_argument('-i', '--inventory-file',
+ dest='inventory_file',
+ default=None,
+ help='Inventory file which contains POD '
+ 'definition')
+ deploy_parser.add_argument('-e', '--environment-file',
+ dest='env_file',
+ default='opnfv-environment.yaml',
+ help='Provide alternate base env file')
+ deploy_parser.add_argument('-v', '--virtual', action='store_true',
+ default=False,
+ dest='virtual',
+ help='Enable virtual deployment')
+ deploy_parser.add_argument('--interactive', action='store_true',
+ default=False,
+ help='Enable interactive deployment mode which '
+ 'requires user to confirm steps of '
+ 'deployment')
+ deploy_parser.add_argument('--virtual-computes',
+ dest='virt_compute_nodes',
+ default=1,
+ help='Number of Virtual Compute nodes to create'
+ ' and use during deployment (defaults to 1'
+ ' for noha and 2 for ha)')
+ deploy_parser.add_argument('--virtual-cpus',
+ dest='virt_cpus',
+ default=4,
+ help='Number of CPUs to use per Overcloud VM in'
+ ' a virtual deployment (defaults to 4)')
+ deploy_parser.add_argument('--virtual-default-ram',
+ dest='virt_default_ram',
+ default=8,
+ help='Amount of default RAM to use per '
+ 'Overcloud VM in GB (defaults to 8).')
+ deploy_parser.add_argument('--virtual-compute-ram',
+ dest='virt_compute_ram',
+ default=None,
+ help='Amount of RAM to use per Overcloud '
+ 'Compute VM in GB (defaults to 8). '
+ 'Overrides --virtual-default-ram arg for '
+ 'computes')
+ deploy_parser.add_argument('--deploy-dir',
+ default='/usr/share/opnfv-apex',
+ help='Directory to deploy from which contains '
+ 'base config files for deployment')
+ deploy_parser.add_argument('--image-dir',
+ default='/var/opt/opnfv/images',
+ help='Directory which contains '
+ 'base disk images for deployment')
+ deploy_parser.add_argument('--lib-dir',
+ default='/usr/share/opnfv-apex',
+ help='Directory path for apex ansible '
+ 'and third party libs')
+ deploy_parser.add_argument('--quickstart', action='store_true',
+ default=False,
+ help='Use tripleo-quickstart to deploy')
+ return deploy_parser
+
+
+def validate_deploy_args(args):
+ """
+ Validates arguments for deploy
+ :param args:
+ :return: None
+ """
+
+ logging.debug('Validating arguments for deployment')
+ if args.virtual and args.inventory_file is not None:
+ logging.error("Virtual enabled but inventory file also given")
+ raise ApexDeployException('You should not specify an inventory file '
+ 'with virtual deployments')
+ elif args.virtual:
+ args.inventory_file = os.path.join(APEX_TEMP_DIR,
+ 'inventory-virt.yaml')
+ elif os.path.isfile(args.inventory_file) is False:
+ logging.error("Specified inventory file does not exist: {}".format(
+ args.inventory_file))
+ raise ApexDeployException('Specified inventory file does not exist')
+
+ for settings_file in (args.deploy_settings_file,
+ args.network_settings_file):
+ if os.path.isfile(settings_file) is False:
+ logging.error("Specified settings file does not "
+ "exist: {}".format(settings_file))
+ raise ApexDeployException('Specified settings file does not '
+ 'exist: {}'.format(settings_file))
+
+
+def main():
+ parser = create_deploy_parser()
+ args = parser.parse_args(sys.argv[1:])
+ # FIXME (trozet): this is only needed as a workaround for CI. Remove
+ # when CI is changed
+ if os.getenv('IMAGES', False):
+ args.image_dir = os.getenv('IMAGES')
+ if args.debug:
+ log_level = logging.DEBUG
+ else:
+ log_level = logging.INFO
+ os.makedirs(os.path.dirname(args.log_file), exist_ok=True)
+ formatter = '%(asctime)s %(levelname)s: %(message)s'
+ logging.basicConfig(filename=args.log_file,
+ format=formatter,
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=log_level)
+ console = logging.StreamHandler()
+ console.setLevel(log_level)
+ console.setFormatter(logging.Formatter(formatter))
+ logging.getLogger('').addHandler(console)
+ validate_deploy_args(args)
+ # Parse all settings
+ deploy_settings = DeploySettings(args.deploy_settings_file)
+ logging.info("Deploy settings are:\n {}".format(pprint.pformat(
+ deploy_settings)))
+ net_settings = NetworkSettings(args.network_settings_file)
+ logging.info("Network settings are:\n {}".format(pprint.pformat(
+ net_settings)))
+ net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
+ net_env = NetworkEnvironment(net_settings, net_env_file)
+ net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
+ utils.dump_yaml(dict(net_env), net_env_target)
+ ha_enabled = deploy_settings['global_params']['ha_enabled']
+ if args.virtual:
+ if args.virt_compute_ram is None:
+ compute_ram = args.virt_default_ram
+ else:
+ compute_ram = args.virt_compute_ram
+ if deploy_settings['deploy_options']['sdn_controller'] == \
+ 'opendaylight' and args.virt_default_ram < 12:
+ control_ram = 12
+ logging.warning('RAM per controller is too low. OpenDaylight '
+ 'requires at least 12GB per controller.')
+ logging.info('Increasing RAM per controller to 12GB')
+ elif args.virt_default_ram < 10:
+ control_ram = 10
+ logging.warning('RAM per controller is too low. nosdn '
+ 'requires at least 10GB per controller.')
+ logging.info('Increasing RAM per controller to 10GB')
+ else:
+ control_ram = args.virt_default_ram
+ if ha_enabled and args.virt_compute_nodes < 2:
+ logging.debug('HA enabled, bumping number of compute nodes to 2')
+ args.virt_compute_nodes = 2
+ virt_utils.generate_inventory(args.inventory_file, ha_enabled,
+ num_computes=args.virt_compute_nodes,
+ controller_ram=control_ram * 1024,
+ compute_ram=compute_ram * 1024,
+ vcpus=args.virt_cpus
+ )
+ inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
+
+ validate_cross_settings(deploy_settings, net_settings, inventory)
+
+ if args.quickstart:
+ deploy_settings_file = os.path.join(APEX_TEMP_DIR,
+ 'apex_deploy_settings.yaml')
+ utils.dump_yaml(utils.dict_objects_to_str(deploy_settings),
+ deploy_settings_file)
+ logging.info("File created: {}".format(deploy_settings_file))
+ network_settings_file = os.path.join(APEX_TEMP_DIR,
+ 'apex_network_settings.yaml')
+ utils.dump_yaml(utils.dict_objects_to_str(net_settings),
+ network_settings_file)
+ logging.info("File created: {}".format(network_settings_file))
+ deploy_quickstart(args, deploy_settings_file, network_settings_file,
+ args.inventory_file)
+ else:
+ # TODO (trozet): add logic back from:
+ # Iedb75994d35b5dc1dd5d5ce1a57277c8f3729dfd (FDIO DVR)
+ ansible_args = {
+ 'virsh_enabled_networks': net_settings.enabled_network_list
+ }
+ ansible_path = os.path.join(args.lib_dir, ANSIBLE_PATH)
+ utils.run_ansible(ansible_args,
+ os.path.join(args.lib_dir,
+ ansible_path,
+ 'deploy_dependencies.yml'))
+ uc_external = False
+ if 'external' in net_settings.enabled_network_list:
+ uc_external = True
+ if args.virtual:
+ # create all overcloud VMs
+ build_vms(inventory, net_settings)
+ else:
+ # Attach interfaces to jumphost for baremetal deployment
+ jump_networks = ['admin']
+ if uc_external:
+ jump_networks.append('external')
+ for network in jump_networks:
+ iface = net_settings['network'][network]['installer_vm'][
+ 'members'](0)
+ bridge = "br-{}".format(network)
+ jumphost.attach_interface_to_ovs(bridge, iface, network)
+ # Dump all settings out to temp bash files to be sourced
+ instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
+ with open(instackenv_json, 'w') as fh:
+ json.dump(inventory, fh)
+
+ # Create and configure undercloud
+ if args.debug:
+ root_pw = constants.DEBUG_OVERCLOUD_PW
+ else:
+ root_pw = None
+ undercloud = uc_lib.Undercloud(args.image_dir,
+ root_pw=root_pw,
+ external_network=uc_external)
+ undercloud.start()
+
+ # Generate nic templates
+ for role in 'compute', 'controller':
+ oc_cfg.create_nic_template(net_settings, deploy_settings, role,
+ args.deploy_dir, APEX_TEMP_DIR)
+ # Install Undercloud
+ undercloud.configure(net_settings,
+ os.path.join(args.lib_dir,
+ ansible_path,
+ 'configure_undercloud.yml'),
+ APEX_TEMP_DIR)
+
+ # Prepare overcloud-full.qcow2
+ logging.info("Preparing Overcloud for deployment...")
+ sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
+ overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
+ root_pw=root_pw)
+ opnfv_env = os.path.join(args.deploy_dir, args.env_file)
+ overcloud_deploy.prep_env(deploy_settings, net_settings, opnfv_env,
+ net_env_target, APEX_TEMP_DIR)
+ overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings,
+ inventory, APEX_TEMP_DIR,
+ args.virtual, args.env_file)
+ deploy_playbook = os.path.join(args.lib_dir, ansible_path,
+ 'deploy_overcloud.yml')
+ virt_env = 'virtual-environment.yaml'
+ bm_env = 'baremetal-environment.yaml'
+ for p_env in virt_env, bm_env:
+ shutil.copyfile(os.path.join(args.deploy_dir, p_env),
+ os.path.join(APEX_TEMP_DIR, p_env))
+
+ # Start Overcloud Deployment
+ logging.info("Executing Overcloud Deployment...")
+ deploy_vars = dict()
+ deploy_vars['virtual'] = args.virtual
+ deploy_vars['debug'] = args.debug
+ deploy_vars['dns_server_args'] = ''
+ deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
+ deploy_vars['stackrc'] = 'source /home/stack/stackrc'
+ deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
+ for dns_server in net_settings['dns_servers']:
+ deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
+ dns_server)
+ try:
+ utils.run_ansible(deploy_vars, deploy_playbook, host=undercloud.ip,
+ user='stack', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Overcloud deployment complete")
+ os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+ except Exception:
+ logging.error("Deployment Failed. Please check log")
+ raise
+
+ # Post install
+ logging.info("Executing post deploy configuration")
+ jumphost.configure_bridges(net_settings)
+ nova_output = os.path.join(APEX_TEMP_DIR, 'nova_output')
+ deploy_vars['overcloud_nodes'] = parsers.parse_nova_output(
+ nova_output)
+ deploy_vars['SSH_OPTIONS'] = '-o StrictHostKeyChecking=no -o ' \
+ 'GlobalKnownHostsFile=/dev/null -o ' \
+ 'UserKnownHostsFile=/dev/null -o ' \
+ 'LogLevel=error'
+ deploy_vars['external_network_cmds'] = \
+ overcloud_deploy.external_network_cmds(net_settings)
+ # TODO(trozet): just parse all ds_opts as deploy vars one time
+ ds_opts = deploy_settings['deploy_options']
+ deploy_vars['gluon'] = ds_opts['gluon']
+ deploy_vars['sdn'] = ds_opts['sdn_controller']
+ for dep_option in 'yardstick', 'dovetail', 'vsperf':
+ if dep_option in ds_opts:
+ deploy_vars[dep_option] = ds_opts[dep_option]
+ else:
+ deploy_vars[dep_option] = False
+ deploy_vars['dataplane'] = ds_opts['dataplane']
+ overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc')
+ if ds_opts['congress']:
+ deploy_vars['congress_datasources'] = \
+ overcloud_deploy.create_congress_cmds(overcloudrc)
+ deploy_vars['congress'] = True
+ else:
+ deploy_vars['congress'] = False
+ # TODO(trozet): this is probably redundant with getting external
+ # network info from undercloud.py
+ if 'external' in net_settings.enabled_network_list:
+ ext_cidr = net_settings['networks']['external'][0]['cidr']
+ else:
+ ext_cidr = net_settings['networks']['admin']['cidr']
+ deploy_vars['external_cidr'] = str(ext_cidr)
+ if ext_cidr.version == 6:
+ deploy_vars['external_network_ipv6'] = True
+ else:
+ deploy_vars['external_network_ipv6'] = False
+ post_undercloud = os.path.join(args.lib_dir, ansible_path,
+ 'post_deploy_undercloud.yml')
+ logging.info("Executing post deploy configuration undercloud playbook")
+ try:
+ utils.run_ansible(deploy_vars, post_undercloud, host=undercloud.ip,
+ user='stack', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Post Deploy Undercloud Configuration Complete")
+ except Exception:
+ logging.error("Post Deploy Undercloud Configuration failed. "
+ "Please check log")
+ raise
+ # Post deploy overcloud node configuration
+ # TODO(trozet): just parse all ds_opts as deploy vars one time
+ deploy_vars['sfc'] = ds_opts['sfc']
+ deploy_vars['vpn'] = ds_opts['vpn']
+ # TODO(trozet): pull all logs and store in tmp dir in overcloud
+ # playbook
+ post_overcloud = os.path.join(args.lib_dir, ansible_path,
+ 'post_deploy_overcloud.yml')
+ # Run per overcloud node
+ for node, ip in deploy_vars['overcloud_nodes'].items():
+ logging.info("Executing Post deploy overcloud playbook on "
+ "node {}".format(node))
+ try:
+ utils.run_ansible(deploy_vars, post_overcloud, host=ip,
+ user='heat-admin', tmp_dir=APEX_TEMP_DIR)
+ logging.info("Post Deploy Overcloud Configuration Complete "
+ "for node {}".format(node))
+ except Exception:
+ logging.error("Post Deploy Overcloud Configuration failed "
+ "for node {}. Please check log".format(node))
+ raise
+ logging.info("Apex deployment complete")
+ logging.info("Undercloud IP: {}, please connect by doing "
+ "'opnfv-util undercloud'".format(undercloud.ip))
+ # TODO(trozet): add logging here showing controller VIP and horizon url
+if __name__ == '__main__':
+ main()
diff --git a/apex/inventory/__init__.py b/apex/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/inventory/__init__.py
diff --git a/apex/inventory/inventory.py b/apex/inventory/inventory.py
new file mode 100644
index 00000000..dd731a83
--- /dev/null
+++ b/apex/inventory/inventory.py
@@ -0,0 +1,89 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import json
+import platform
+
+import yaml
+
+from apex.common import constants
+from apex.common import utils
+
+
+class Inventory(dict):
+ """
+ This class parses an APEX inventory yaml file into an object. It
+ generates or detects all missing fields for deployment.
+
+ It then collapses one level of identification from the object to
+ convert it to a structure that can be dumped into a json file formatted
+ such that Triple-O can read the resulting json as an instackenv.json file.
+ """
+ def __init__(self, source, ha=True, virtual=False):
+ init_dict = {}
+ self.root_device = constants.DEFAULT_ROOT_DEV
+ if isinstance(source, str):
+ with open(source, 'r') as inventory_file:
+ yaml_dict = yaml.safe_load(inventory_file)
+ # collapse node identifiers from the structure
+ init_dict['nodes'] = list(map(lambda n: n[1],
+ yaml_dict['nodes'].items()))
+ else:
+ # assume input is a dict to build from
+ init_dict = source
+
+ # move ipmi_* to pm_*
+ # make mac a list
+ def munge_nodes(node):
+ node['pm_addr'] = node['ipmi_ip']
+ node['pm_password'] = node['ipmi_pass']
+ node['pm_user'] = node['ipmi_user']
+ node['mac'] = [node['mac_address']]
+ if 'cpus' in node:
+ node['cpu'] = node['cpus']
+
+ for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
+ 'disk_device'):
+ if i == 'disk_device' and 'disk_device' in node.keys():
+ self.root_device = node[i]
+ else:
+ continue
+ del node[i]
+
+ return node
+
+ super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
+
+ # verify number of nodes
+ if ha and len(self['nodes']) < 5 and not virtual:
+ raise InventoryException('You must provide at least 5 '
+ 'nodes for HA baremetal deployment')
+ elif len(self['nodes']) < 2:
+ raise InventoryException('You must provide at least 2 nodes '
+ 'for non-HA baremetal deployment')
+
+ if virtual:
+ self['arch'] = platform.machine()
+ self['host-ip'] = '192.168.122.1'
+ self['power_manager'] = \
+ 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
+ self['seed-ip'] = ''
+ self['ssh-key'] = 'INSERT_STACK_USER_PRIV_KEY'
+ self['ssh-user'] = 'root'
+
+ def dump_instackenv_json(self):
+ print(json.dumps(dict(self), sort_keys=True, indent=4))
+
+
+class InventoryException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/apex/network/__init__.py b/apex/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/network/__init__.py
diff --git a/apex/network/ip_utils.py b/apex/network/ip_utils.py
new file mode 100644
index 00000000..ae60b705
--- /dev/null
+++ b/apex/network/ip_utils.py
@@ -0,0 +1,230 @@
+##############################################################################
+# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+import ipaddress
+import subprocess
+import re
+import logging
+
+
+def get_ip_range(start_offset=None, count=None, end_offset=None,
+ cidr=None, interface=None):
+ """
+ Generate IP range for a network (cidr) or an interface.
+
+ If CIDR is provided, it will take precedence over interface. In this case,
+ The entire CIDR IP address space is considered usable. start_offset will be
+ calculated from the network address, and end_offset will be calculated from
+ the last address in subnet.
+
+ If interface is provided, the interface IP will be used to calculate
+ offsets:
+ - If the interface IP is in the first half of the address space,
+ start_offset will be calculated from the interface IP, and end_offset
+ will be calculated from end of address space.
+ - If the interface IP is in the second half of the address space,
+ start_offset will be calculated from the network address in the address
+ space, and end_offset will be calculated from the interface IP.
+
+ 2 of start_offset, end_offset and count options must be provided:
+ - If start_offset and end_offset are provided, a range from
+ start_offset to end_offset will be returned.
+ - If count is provided, a range from either start_offset to
+ (start_offset+count) or (end_offset-count) to end_offset will be
+ returned. The IP range returned will be of size <count>.
+ Both start_offset and end_offset must be greater than 0.
+
+ Returns IP range in the format of "first_addr,second_addr" or exception
+ is raised.
+ """
+ if cidr:
+ if count and start_offset and not end_offset:
+ start_index = start_offset
+ end_index = start_offset + count - 1
+ elif count and end_offset and not start_offset:
+ end_index = -1 - end_offset
+ start_index = -1 - end_index - count + 1
+ elif start_offset and end_offset and not count:
+ start_index = start_offset
+ end_index = -1 - end_offset
+ else:
+ raise IPUtilsException("Argument error: must pass in exactly 2 of"
+ " start_offset, end_offset and count")
+
+ start_ip = cidr[start_index]
+ end_ip = cidr[end_index]
+ network = cidr
+ elif interface:
+ network = interface.network
+ number_of_addr = network.num_addresses
+ if interface.ip < network[int(number_of_addr / 2)]:
+ if count and start_offset and not end_offset:
+ start_ip = interface.ip + start_offset
+ end_ip = start_ip + count - 1
+ elif count and end_offset and not start_offset:
+ end_ip = network[-1 - end_offset]
+ start_ip = end_ip - count + 1
+ elif start_offset and end_offset and not count:
+ start_ip = interface.ip + start_offset
+ end_ip = network[-1 - end_offset]
+ else:
+ raise IPUtilsException(
+ "Argument error: must pass in exactly 2 of"
+ " start_offset, end_offset and count")
+ else:
+ if count and start_offset and not end_offset:
+ start_ip = network[start_offset]
+ end_ip = start_ip + count - 1
+ elif count and end_offset and not start_offset:
+ end_ip = interface.ip - end_offset
+ start_ip = end_ip - count + 1
+ elif start_offset and end_offset and not count:
+ start_ip = network[start_offset]
+ end_ip = interface.ip - end_offset
+ else:
+ raise IPUtilsException(
+ "Argument error: must pass in exactly 2 of"
+ " start_offset, end_offset and count")
+
+ else:
+ raise IPUtilsException("Must pass in cidr or interface to generate"
+ "ip range")
+
+ range_result = _validate_ip_range(start_ip, end_ip, network)
+ if range_result:
+ ip_range = "{},{}".format(start_ip, end_ip)
+ return ip_range
+ else:
+ raise IPUtilsException("Invalid IP range: {},{} for network {}"
+ .format(start_ip, end_ip, network))
+
+
+def get_ip(offset, cidr=None, interface=None):
+ """
+ Returns an IP in a network given an offset.
+
+ Either cidr or interface must be provided, cidr takes precedence.
+
+ If cidr is provided, offset is calculated from network address.
+ If interface is provided, offset is calculated from interface IP.
+
+ offset can be positive or negative, but the resulting IP address must also
+ be contained in the same subnet, otherwise an exception will be raised.
+
+ returns a IP address object.
+ """
+ if cidr:
+ ip = cidr[0 + offset]
+ network = cidr
+ elif interface:
+ ip = interface.ip + offset
+ network = interface.network
+ else:
+ raise IPUtilsException("Must pass in cidr or interface to generate IP")
+
+ if ip not in network:
+ raise IPUtilsException("IP {} not in network {}".format(ip, network))
+ else:
+ return str(ip)
+
+
+def get_interface(nic, address_family=4):
+ """
+ Returns interface object for a given NIC name in the system
+
+ Only global address will be returned at the moment.
+
+ Returns interface object if an address is found for the given nic,
+ otherwise returns None.
+ """
+ if not nic.strip():
+ logging.error("empty nic name specified")
+ return None
+ output = subprocess.getoutput("/usr/sbin/ip -{} addr show {} scope global"
+ .format(address_family, nic))
+ if address_family == 4:
+ pattern = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}")
+ elif address_family == 6:
+ pattern = re.compile("([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}")
+ else:
+ raise IPUtilsException("Invalid address family: {}"
+ .format(address_family))
+ match = re.search(pattern, output)
+ if match:
+ logging.info("found interface {} ip: {}".format(nic, match.group()))
+ return ipaddress.ip_interface(match.group())
+ else:
+ logging.info("interface ip not found! ip address output:\n{}"
+ .format(output))
+ return None
+
+
+def find_gateway(interface):
+ """
+ Validate gateway on the system
+
+ Ensures that the provided interface object is in fact configured as default
+ route on the system.
+
+ Returns gateway IP (reachable from interface) if default route is found,
+ otherwise returns None.
+ """
+
+ address_family = interface.version
+ output = subprocess.getoutput("/usr/sbin/ip -{} route".format(
+ address_family))
+
+ pattern = re.compile("default\s+via\s+(\S+)\s+")
+ match = re.search(pattern, output)
+
+ if match:
+ gateway_ip = match.group(1)
+ reverse_route_output = subprocess.getoutput("/usr/sbin/ip route get {}"
+ .format(gateway_ip))
+ pattern = re.compile("{}.+src\s+{}".format(gateway_ip, interface.ip))
+ if not re.search(pattern, reverse_route_output):
+ logging.warning("Default route doesn't match interface specified: "
+ "{}".format(reverse_route_output))
+ return None
+ else:
+ return gateway_ip
+ else:
+ logging.warning("Can't find gateway address on system")
+ return None
+
+
+def _validate_ip_range(start_ip, end_ip, cidr):
+ """
+ Validates an IP range is in good order and the range is part of cidr.
+
+ Returns True if validation succeeds, False otherwise.
+ """
+ ip_range = "{},{}".format(start_ip, end_ip)
+ if end_ip <= start_ip:
+ logging.warning("IP range {} is invalid: end_ip should be greater "
+ "than starting ip".format(ip_range))
+ return False
+ if start_ip not in ipaddress.ip_network(cidr):
+ logging.warning('start_ip {} is not in network {}'
+ .format(start_ip, cidr))
+ return False
+ if end_ip not in ipaddress.ip_network(cidr):
+ logging.warning('end_ip {} is not in network {}'.format(end_ip, cidr))
+ return False
+
+ return True
+
+
+class IPUtilsException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/apex/network/jumphost.py b/apex/network/jumphost.py
new file mode 100644
index 00000000..81562c7a
--- /dev/null
+++ b/apex/network/jumphost.py
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import os
+import re
+import shutil
+import subprocess
+
+from apex.common.exceptions import ApexDeployException
+from apex.network import ip_utils
+
+NET_MAP = {
+ 'admin': 'br-admin',
+ 'tenant': 'br-tenant',
+ 'external': 'br-external',
+ 'storage': 'br-storage',
+ 'api': 'br-api'
+}
+
+
+def configure_bridges(ns):
+ """
+ Configures IP on jumphost bridges
+ :param ns: network_settings
+ :return: None
+ """
+ bridge_networks = ['admin']
+ if 'external' in ns.enabled_network_list:
+ bridge_networks.append('external')
+ for network in bridge_networks:
+ if network == 'external':
+ net_config = ns['networks'][network][0]
+ else:
+ net_config = ns['networks'][network]
+ cidr = net_config['cidr']
+ interface = ip_utils.get_interface(NET_MAP[network], cidr.version)
+
+ if interface:
+ logging.info("Bridge {} already configured with IP: {}".format(
+ NET_MAP[network], interface.ip))
+ else:
+ logging.info("Will configure IP for {}".format(NET_MAP[network]))
+ ovs_ip = net_config['overcloud_ip_range'][1]
+ if cidr.version == 6:
+ ipv6_br_path = "/proc/sys/net/ipv6/conf/{}/disable_" \
+ "ipv6".format(NET_MAP[network])
+ try:
+ subprocess.check_call('echo', 0, '>', ipv6_br_path)
+ except subprocess.CalledProcessError:
+ logging.error("Unable to enable ipv6 on "
+ "bridge {}".format(NET_MAP[network]))
+ raise
+ try:
+ ip_prefix = "{}/{}".format(ovs_ip, cidr.prefixlen)
+ subprocess.check_call(['ip', 'addr', 'add', ip_prefix, 'dev',
+ NET_MAP[network]])
+ subprocess.check_call(['ip', 'link', 'set', 'up', NET_MAP[
+ network]])
+ logging.info("IP configured: {} on bridge {}".format(ovs_ip,
+ NET_MAP[network]))
+ except subprocess.CalledProcessError:
+ logging.error("Unable to configure IP address on "
+ "bridge {}".format(NET_MAP[network]))
+
+
+def attach_interface_to_ovs(bridge, interface, network):
+ """
+ Attaches jumphost interface to OVS for baremetal deployments
+ :param bridge: bridge to attach to
+ :param interface: interface to attach to bridge
+ :param network: Apex network type for these interfaces
+ :return: None
+ """
+
+ net_cfg_path = '/etc/sysconfig/network-scripts'
+ if_file = os.path.join(net_cfg_path, "ifcfg-{}".format(interface))
+ ovs_file = os.path.join(net_cfg_path, "ifcfg-{}".format(bridge))
+
+ logging.info("Attaching interface: {} to bridge: {} on network {}".format(
+ bridge, interface, network
+ ))
+
+ try:
+ output = subprocess.check_output(['ovs-vsctl', 'list-ports', bridge],
+ stderr=subprocess.STDOUT)
+ if bridge in output:
+ logging.debug("Interface already attached to bridge")
+ return
+ except subprocess.CalledProcessError as e:
+ logging.error("Unable to dump ports for bridge: {}".format(bridge))
+ logging.error("Error output: {}".format(e.output))
+ raise
+
+ if not os.path.isfile(if_file):
+ logging.error("Interface ifcfg not found: {}".format(if_file))
+ raise FileNotFoundError("Interface file missing: {}".format(if_file))
+
+ ifcfg_params = {
+ 'IPADDR': '',
+ 'NETMASK': '',
+ 'GATEWAY': '',
+ 'METRIC': '',
+ 'DNS1': '',
+ 'DNS2': '',
+ 'PREFIX': ''
+ }
+ with open(if_file, 'r') as fh:
+ interface_output = fh.read()
+
+ for param in ifcfg_params.keys():
+ match = re.search("{}=(.*)\n".format(param), interface_output)
+ if match:
+ ifcfg_params[param] = match.group(1)
+
+ if not ifcfg_params['IPADDR']:
+ logging.error("IPADDR missing in {}".format(if_file))
+ raise ApexDeployException("IPADDR missing in {}".format(if_file))
+ if not (ifcfg_params['NETMASK'] or ifcfg_params['PREFIX']):
+ logging.error("NETMASK/PREFIX missing in {}".format(if_file))
+ raise ApexDeployException("NETMASK/PREFIX missing in {}".format(
+ if_file))
+ if network == 'external' and not ifcfg_params['GATEWAY']:
+ logging.error("GATEWAY is required to be in {} for external "
+ "network".format(if_file))
+ raise ApexDeployException("GATEWAY is required to be in {} for "
+ "external network".format(if_file))
+
+ shutil.move(if_file, "{}.orig".format(if_file))
+ if_content = """DEVICE={}
+DEVICETYPE=ovs
+TYPE=OVSPort
+PEERDNS=no
+BOOTPROTO=static
+NM_CONTROLLED=no
+ONBOOT=yes
+OVS_BRIDGE={}
+PROMISC=yes""".format(interface, bridge)
+
+ bridge_content = """DEVICE={}
+DEVICETYPE=ovs
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes""".format(bridge)
+ peer_dns = 'no'
+ for param, value in ifcfg_params.items():
+ if value:
+ bridge_content += "\n{}={}".format(param, value)
+ if param == 'DNS1' or param == 'DNS2':
+ peer_dns = 'yes'
+ bridge_content += "\n{}={}".format('PEERDNS', peer_dns)
+
+ logging.debug("New interface file content:\n{}".format(if_content))
+ logging.debug("New bridge file content:\n{}".format(bridge_content))
+ with open(if_file, 'w') as fh:
+ fh.write(if_content)
+ with open(ovs_file, 'w') as fh:
+ fh.write(bridge_content)
+ logging.info("New network ifcfg files written")
+ logging.info("Restarting Linux networking")
+ try:
+ subprocess.check_call(['systemctl', 'restart', 'network'])
+ except subprocess.CalledProcessError:
+ logging.error("Failed to restart Linux networking")
+ raise
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
new file mode 100644
index 00000000..c2e9991a
--- /dev/null
+++ b/apex/network/network_environment.py
@@ -0,0 +1,218 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import re
+
+import yaml
+
+from apex.settings.network_settings import NetworkSettings
+from apex.common.constants import (
+ CONTROLLER,
+ COMPUTE,
+ ADMIN_NETWORK,
+ TENANT_NETWORK,
+ STORAGE_NETWORK,
+ EXTERNAL_NETWORK,
+ API_NETWORK
+)
+
+HEAT_NONE = 'OS::Heat::None'
+PORTS = '/ports'
+# Resources defined by <resource name>: <prefix>
+EXTERNAL_RESOURCES = {'OS::TripleO::Network::External': None,
+ 'OS::TripleO::Network::Ports::ExternalVipPort': PORTS,
+ 'OS::TripleO::Controller::Ports::ExternalPort': PORTS,
+ 'OS::TripleO::Compute::Ports::ExternalPort': PORTS}
+TENANT_RESOURCES = {'OS::TripleO::Network::Tenant': None,
+ 'OS::TripleO::Controller::Ports::TenantPort': PORTS,
+ 'OS::TripleO::Compute::Ports::TenantPort': PORTS}
+STORAGE_RESOURCES = {'OS::TripleO::Network::Storage': None,
+ 'OS::TripleO::Network::Ports::StorageVipPort': PORTS,
+ 'OS::TripleO::Controller::Ports::StoragePort': PORTS,
+ 'OS::TripleO::Compute::Ports::StoragePort': PORTS}
+API_RESOURCES = {'OS::TripleO::Network::InternalApi': None,
+ 'OS::TripleO::Network::Ports::InternalApiVipPort': PORTS,
+ 'OS::TripleO::Controller::Ports::InternalApiPort': PORTS,
+ 'OS::TripleO::Compute::Ports::InternalApiPort': PORTS}
+
+# A list of flags that will be set to true when IPv6 is enabled
+IPV6_FLAGS = ["NovaIPv6", "MongoDbIPv6", "CorosyncIPv6", "CephIPv6",
+ "RabbitIPv6", "MemcachedIPv6"]
+
+reg = 'resource_registry'
+param_def = 'parameter_defaults'
+
+
+class NetworkEnvironment(dict):
+ """
+ This class creates a Network Environment to be used in TripleO Heat
+ Templates.
+
+ The class builds upon an existing network-environment file and modifies
+ based on a NetworkSettings object.
+ """
+ def __init__(self, net_settings, filename, compute_pre_config=False,
+ controller_pre_config=False):
+ """
+ Create Network Environment according to Network Settings
+ """
+ init_dict = {}
+ if isinstance(filename, str):
+ with open(filename, 'r') as net_env_fh:
+ init_dict = yaml.safe_load(net_env_fh)
+
+ super().__init__(init_dict)
+ if not isinstance(net_settings, NetworkSettings):
+ raise NetworkEnvException('Invalid Network Settings object')
+
+ self._set_tht_dir()
+
+ nets = net_settings['networks']
+
+ admin_cidr = nets[ADMIN_NETWORK]['cidr']
+ admin_prefix = str(admin_cidr.prefixlen)
+ self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
+ self[param_def]['ControlPlaneDefaultRoute'] = \
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
+ self[param_def]['EC2MetadataIp'] = \
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
+ self[param_def]['DnsServers'] = net_settings['dns_servers']
+
+ if EXTERNAL_NETWORK in net_settings.enabled_network_list:
+ external_cidr = net_settings.get_network(EXTERNAL_NETWORK)['cidr']
+ self[param_def]['ExternalNetCidr'] = str(external_cidr)
+ external_vlan = self._get_vlan(net_settings.get_network(
+ EXTERNAL_NETWORK))
+ if isinstance(external_vlan, int):
+ self[param_def]['NeutronExternalNetworkBridge'] = '""'
+ self[param_def]['ExternalNetworkVlanID'] = external_vlan
+ external_range = net_settings.get_network(EXTERNAL_NETWORK)[
+ 'overcloud_ip_range']
+ self[param_def]['ExternalAllocationPools'] = \
+ [{'start': str(external_range[0]),
+ 'end': str(external_range[1])}]
+ self[param_def]['ExternalInterfaceDefaultRoute'] = \
+ net_settings.get_network(EXTERNAL_NETWORK)['gateway']
+
+ if external_cidr.version == 6:
+ postfix = '/external_v6.yaml'
+ else:
+ postfix = '/external.yaml'
+ else:
+ postfix = '/noop.yaml'
+
+ # apply resource registry update for EXTERNAL_RESOURCES
+ self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
+
+ if TENANT_NETWORK in net_settings.enabled_network_list:
+ tenant_range = nets[TENANT_NETWORK]['overcloud_ip_range']
+ self[param_def]['TenantAllocationPools'] = \
+ [{'start': str(tenant_range[0]),
+ 'end': str(tenant_range[1])}]
+ tenant_cidr = nets[TENANT_NETWORK]['cidr']
+ self[param_def]['TenantNetCidr'] = str(tenant_cidr)
+ if tenant_cidr.version == 6:
+ postfix = '/tenant_v6.yaml'
+ # set overlay_ip_version option in Neutron ML2 config
+ self[param_def]['NeutronOverlayIPVersion'] = "6"
+ else:
+ postfix = '/tenant.yaml'
+
+ tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
+ if isinstance(tenant_vlan, int):
+ self[param_def]['TenantNetworkVlanID'] = tenant_vlan
+ else:
+ postfix = '/noop.yaml'
+
+ # apply resource registry update for TENANT_RESOURCES
+ self._config_resource_reg(TENANT_RESOURCES, postfix)
+
+ if STORAGE_NETWORK in net_settings.enabled_network_list:
+ storage_range = nets[STORAGE_NETWORK]['overcloud_ip_range']
+ self[param_def]['StorageAllocationPools'] = \
+ [{'start': str(storage_range[0]),
+ 'end': str(storage_range[1])}]
+ storage_cidr = nets[STORAGE_NETWORK]['cidr']
+ self[param_def]['StorageNetCidr'] = str(storage_cidr)
+ if storage_cidr.version == 6:
+ postfix = '/storage_v6.yaml'
+ else:
+ postfix = '/storage.yaml'
+ storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
+ if isinstance(storage_vlan, int):
+ self[param_def]['StorageNetworkVlanID'] = storage_vlan
+ else:
+ postfix = '/noop.yaml'
+
+ # apply resource registry update for STORAGE_RESOURCES
+ self._config_resource_reg(STORAGE_RESOURCES, postfix)
+
+ if API_NETWORK in net_settings.enabled_network_list:
+ api_range = nets[API_NETWORK]['overcloud_ip_range']
+ self[param_def]['InternalApiAllocationPools'] = \
+ [{'start': str(api_range[0]),
+ 'end': str(api_range[1])}]
+ api_cidr = nets[API_NETWORK]['cidr']
+ self[param_def]['InternalApiNetCidr'] = str(api_cidr)
+ if api_cidr.version == 6:
+ postfix = '/internal_api_v6.yaml'
+ else:
+ postfix = '/internal_api.yaml'
+ api_vlan = self._get_vlan(nets[API_NETWORK])
+ if isinstance(api_vlan, int):
+ self[param_def]['InternalApiNetworkVlanID'] = api_vlan
+ else:
+ postfix = '/noop.yaml'
+
+ # apply resource registry update for API_RESOURCES
+ self._config_resource_reg(API_RESOURCES, postfix)
+
+ # Set IPv6 related flags to True. Not that we do not set those to False
+ # when IPv4 is configured, we'll use the default or whatever the user
+ # may have set.
+ if net_settings.get_ip_addr_family() == 6:
+ for flag in IPV6_FLAGS:
+ self[param_def][flag] = True
+
+ def _get_vlan(self, network):
+ if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
+ return network['nic_mapping'][CONTROLLER]['vlan']
+ elif isinstance(network['nic_mapping'][COMPUTE]['vlan'], int):
+ return network['nic_mapping'][COMPUTE]['vlan']
+ else:
+ return 'native'
+
+ def _set_tht_dir(self):
+ self.tht_dir = None
+ for key, prefix in TENANT_RESOURCES.items():
+ if prefix is None:
+ prefix = ''
+ m = re.split('%s/\w+\.yaml' % prefix, self[reg][key])
+ if m is not None and len(m) > 1:
+ self.tht_dir = m[0]
+ break
+ if not self.tht_dir:
+ raise NetworkEnvException('Unable to parse THT Directory')
+
+ def _config_resource_reg(self, resources, postfix):
+ for key, prefix in resources.items():
+ if prefix is None:
+ if postfix == '/noop.yaml':
+ self[reg][key] = HEAT_NONE
+ continue
+ prefix = ''
+ self[reg][key] = self.tht_dir + prefix + postfix
+
+
+class NetworkEnvException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/apex/overcloud/__init__.py b/apex/overcloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/overcloud/__init__.py
diff --git a/apex/overcloud/config.py b/apex/overcloud/config.py
new file mode 100644
index 00000000..6e116de2
--- /dev/null
+++ b/apex/overcloud/config.py
@@ -0,0 +1,76 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+"""
+Utilities for generating overcloud configuration
+"""
+
+import logging
+import os
+
+from jinja2 import Environment
+from jinja2 import FileSystemLoader
+from apex.common.exceptions import ApexDeployException
+
+
+def create_nic_template(network_settings, deploy_settings, role, template_dir,
+ target_dir):
+ """
+ Creates NIC heat template files
+ :param ns: Network settings
+ :param ds: Deploy Settings
+ :param role: controller or compute
+ :param template_dir: directory where base templates are stored
+ :param target_dir: to store rendered nic template
+ :return:
+ """
+ # TODO(trozet): rather than use Jinja2 to build these files, use with py
+ if role not in ['controller', 'compute']:
+ raise ApexDeployException("Invalid type for overcloud node: {"
+ "}".format(type))
+ logging.info("Creating template for {}".format(role))
+ template_file = 'nics-template.yaml.jinja2'
+ nets = network_settings.get('networks')
+ env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
+ template = env.get_template(template_file)
+ ds = deploy_settings.get('deploy_options')
+ ext_net = 'br-ex'
+ ovs_dpdk_br = ''
+ if ds['dataplane'] == 'fdio':
+ nets['tenant']['nic_mapping'][role]['phys_type'] = 'vpp_interface'
+ if ds['sdn_controller'] == 'opendaylight':
+ nets['external'][0]['nic_mapping'][role]['phys_type'] = \
+ 'vpp_interface'
+ ext_net = 'vpp_interface'
+ elif ds['dataplane'] == 'ovs_dpdk':
+ ovs_dpdk_br = 'br-phy'
+ if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
+ .get('uio-driver')):
+ nets['tenant']['nic_mapping'][role]['uio-driver'] =\
+ ds['performance'][role.title()]['vpp']['uio-driver']
+ if ds['sdn_controller'] == 'opendaylight':
+ nets['external'][0]['nic_mapping'][role]['uio-driver'] =\
+ ds['performance'][role.title()]['vpp']['uio-driver']
+ if (ds.get('performance', {}).get(role.title(), {}).get('vpp', {})
+ .get('interface-options')):
+ nets['tenant']['nic_mapping'][role]['interface-options'] =\
+ ds['performance'][role.title()]['vpp']['interface-options']
+
+ template_output = template.render(
+ nets=nets,
+ role=role,
+ external_net_af=network_settings.get_ip_addr_family(),
+ external_net_type=ext_net,
+ ovs_dpdk_bridge=ovs_dpdk_br)
+
+ logging.debug("Template output: {}".format(template_output))
+ target = os.path.join(target_dir, "{}.yaml".format(role))
+ with open(target, "w") as f:
+ f.write(template_output)
+ logging.info("Wrote template {}".format(target))
diff --git a/apex/overcloud/overcloud_deploy.py b/apex/overcloud/overcloud_deploy.py
new file mode 100644
index 00000000..3c108464
--- /dev/null
+++ b/apex/overcloud/overcloud_deploy.py
@@ -0,0 +1,556 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import base64
+import fileinput
+import logging
+import os
+import re
+import shutil
+import uuid
+import struct
+import time
+
+from apex.common import constants as con
+from apex.common.exceptions import ApexDeployException
+from apex.common import parsers
+from apex.virtual import virtual_utils as virt_utils
+from cryptography.hazmat.primitives import serialization as \
+ crypto_serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.backends import default_backend as \
+ crypto_default_backend
+
+
+SDN_FILE_MAP = {
+ 'opendaylight': {
+ 'sfc': 'opendaylight_sfc.yaml',
+ 'vpn': 'neutron-bgpvpn-opendaylight.yaml',
+ 'gluon': 'gluon.yaml',
+ 'vpp': {
+ 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml',
+ 'default': 'neutron-opendaylight-honeycomb.yaml'
+ },
+ 'default': 'neutron-opendaylight.yaml',
+ },
+ 'onos': {
+ 'sfc': 'neutron-onos-sfc.yaml',
+ 'default': 'neutron-onos.yaml'
+ },
+ 'ovn': 'neutron-ml2-ovn.yaml',
+ False: {
+ 'vpp': 'neutron-ml2-vpp.yaml',
+ 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml')
+ }
+}
+
+OTHER_FILE_MAP = {
+ 'tacker': 'enable_tacker.yaml',
+ 'congress': 'enable_congress.yaml',
+ 'barometer': 'enable_barometer.yaml',
+ 'rt_kvm': 'enable_rt_kvm.yaml'
+}
+
+OVS_PERF_MAP = {
+ 'HostCpusList': 'dpdk_cores',
+ 'NeutronDpdkCoreList': 'pmd_cores',
+ 'NeutronDpdkSocketMemory': 'socket_memory',
+ 'NeutronDpdkMemoryChannels': 'memory_channels'
+}
+
+OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
+OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
+ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
+ ".noarch.rpm"
+
+
+def build_sdn_env_list(ds, sdn_map, env_list=None):
+ if env_list is None:
+ env_list = list()
+ for k, v in sdn_map.items():
+ if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+ if isinstance(v, dict):
+ env_list.extend(build_sdn_env_list(ds, v))
+ else:
+ env_list.append(os.path.join(con.THT_ENV_DIR, v))
+ elif isinstance(v, tuple):
+ if ds[k] == v[0]:
+ env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
+ if len(env_list) == 0:
+ try:
+ env_list.append(os.path.join(
+ con.THT_ENV_DIR, sdn_map[ds['sdn_controller']]['default']))
+ except KeyError:
+ logging.warning("Unable to find default file for SDN")
+
+ return env_list
+
+
+def create_deploy_cmd(ds, ns, inv, tmp_dir,
+ virtual, env_file='opnfv-environment.yaml'):
+
+ logging.info("Creating deployment command")
+ deploy_options = [env_file, 'network-environment.yaml']
+ ds_opts = ds['deploy_options']
+ deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+ # TODO(trozet): make sure rt kvm file is in tht dir
+ for k, v in OTHER_FILE_MAP.items():
+ if k in ds_opts and ds_opts[k]:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+
+ if ds_opts['ceph']:
+ prep_storage_env(ds, tmp_dir)
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'storage-environment.yaml'))
+ if ds['global_params']['ha_enabled']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'puppet-pacemaker.yaml'))
+
+ if virtual:
+ deploy_options.append('virtual-environment.yaml')
+ else:
+ deploy_options.append('baremetal-environment.yaml')
+
+ nodes = inv['nodes']
+ num_control = 0
+ num_compute = 0
+ for node in nodes:
+ if node['capabilities'] == 'profile:control':
+ num_control += 1
+ elif node['capabilities'] == 'profile:compute':
+ num_compute += 1
+ else:
+ # TODO(trozet) do we want to allow capabilities to not exist?
+ logging.error("Every node must include a 'capabilities' key "
+ "tagged with either 'profile:control' or "
+ "'profile:compute'")
+ raise ApexDeployException("Node missing capabilities "
+ "key: {}".format(node))
+ if num_control == 0 or num_compute == 0:
+ logging.error("Detected 0 control or compute nodes. Control nodes: "
+ "{}, compute nodes{}".format(num_control, num_compute))
+ raise ApexDeployException("Invalid number of control or computes")
+ cmd = "openstack overcloud deploy --templates --timeout {} " \
+ "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT)
+ # build cmd env args
+ for option in deploy_options:
+ cmd += " -e {}".format(option)
+ cmd += " --ntp-server {}".format(ns['ntp'][0])
+ cmd += " --control-scale {}".format(num_control)
+ cmd += " --compute-scale {}".format(num_compute)
+ cmd += ' --control-flavor control --compute-flavor compute'
+ logging.info("Deploy command set: {}".format(cmd))
+
+ with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
+ fh.write(cmd)
+ return cmd
+
+
+def prep_image(ds, img, tmp_dir, root_pw=None):
+ """
+ Locates sdn image and preps for deployment.
+ :param ds: deploy settings
+ :param img: sdn image
+ :param tmp_dir: dir to store modified sdn image
+ :param root_pw: password to configure for overcloud image
+ :return: None
+ """
+ # TODO(trozet): Come up with a better way to organize this logic in this
+ # function
+ logging.info("Preparing image: {} for deployment".format(img))
+ if not os.path.isfile(img):
+ logging.error("Missing SDN image {}".format(img))
+ raise ApexDeployException("Missing SDN image file: {}".format(img))
+
+ ds_opts = ds['deploy_options']
+ virt_cmds = list()
+ sdn = ds_opts['sdn_controller']
+ # we need this due to rhbz #1436021
+ # fixed in systemd-219-37.el7
+ if sdn is not False:
+ logging.info("Neutron openvswitch-agent disabled")
+ virt_cmds.extend([{
+ con.VIRT_RUN_CMD:
+ "rm -f /etc/systemd/system/multi-user.target.wants/"
+ "neutron-openvswitch-agent.service"},
+ {
+ con.VIRT_RUN_CMD:
+ "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent"
+ ".service"
+ }])
+
+ if ds_opts['vpn']:
+ virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"})
+ logging.info("ZRPC and Quagga enabled")
+
+ dataplane = ds_opts['dataplane']
+ if dataplane == 'ovs_dpdk' or dataplane == 'fdio':
+ logging.info("Enabling kernel modules for dpdk")
+ # file to module mapping
+ uio_types = {
+ os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci',
+ os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic'
+ }
+ for mod_file, mod in uio_types:
+ with open(mod_file, 'w') as fh:
+ fh.write('#!/bin/bash\n')
+ fh.write('exec /sbin/modprobe {}'.format(mod))
+ fh.close()
+
+ virt_cmds.extend([
+ {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format(
+ mod_file)},
+ {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/"
+ "{}".format(os.path.basename(mod_file))}
+ ])
+ if root_pw:
+ pw_op = "password:{}".format(root_pw)
+ virt_cmds.append({con.VIRT_PW: pw_op})
+ if ds_opts['sfc'] and dataplane == 'ovs':
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y install "
+ "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+ "{}".format(OVS_NSH_KMOD_RPM)},
+ {con.VIRT_RUN_CMD: "yum upgrade -y "
+ "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
+ "{}".format(OVS_NSH_RPM)}
+ ])
+ if dataplane == 'fdio':
+ # Patch neutron with using OVS external interface for router
+ # and add generic linux NS interface driver
+ virt_cmds.append(
+ {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch "
+ "-p1 < neutron-patch-NSDriver.patch"})
+
+ if sdn == 'opendaylight':
+ if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
+ {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+ con.DEFAULT_ODL_VERSION)},
+ {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
+ {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
+ "/root/puppet-opendaylight-"
+ "{}.tar.gz".format(ds_opts['odl_version'])}
+ ])
+ elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
+ and ds_opts['odl_vpp_netvirt']:
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
+ {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
+ ODL_NETVIRT_VPP_RPM)}
+ ])
+
+ if sdn == 'ovn':
+ virt_cmds.extend([
+ {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
+ "*openvswitch*"},
+ {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
+ "*openvswitch*"}
+ ])
+
+ tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+ shutil.copyfile(img, tmp_oc_image)
+ logging.debug("Temporary overcloud image stored as: {}".format(
+ tmp_oc_image))
+ virt_utils.virt_customize(virt_cmds, tmp_oc_image)
+ logging.info("Overcloud image customization complete")
+
+
+def make_ssh_key():
+ """
+ Creates public and private ssh keys with 1024 bit RSA encryption
+ :return: private, public key
+ """
+ key = rsa.generate_private_key(
+ backend=crypto_default_backend(),
+ public_exponent=65537,
+ key_size=1024
+ )
+
+ private_key = key.private_bytes(
+ crypto_serialization.Encoding.PEM,
+ crypto_serialization.PrivateFormat.PKCS8,
+ crypto_serialization.NoEncryption())
+ public_key = key.public_key().public_bytes(
+ crypto_serialization.Encoding.OpenSSH,
+ crypto_serialization.PublicFormat.OpenSSH
+ )
+ pub_key = re.sub('ssh-rsa\s*', '', public_key.decode('utf-8'))
+ return private_key.decode('utf-8'), pub_key
+
+
+def prep_env(ds, ns, opnfv_env, net_env, tmp_dir):
+ """
+ Creates modified opnfv/network environments for deployment
+ :param ds: deploy settings
+ :param ns: network settings
+ :param opnfv_env: file path for opnfv-environment file
+ :param net_env: file path for network-environment file
+ :param tmp_dir: Apex tmp dir
+ :return:
+ """
+
+ logging.info("Preparing opnfv-environment and network-environment files")
+ ds_opts = ds['deploy_options']
+ tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env))
+ shutil.copyfile(opnfv_env, tmp_opnfv_env)
+ tenant_nic_map = ns['networks']['tenant']['nic_mapping']
+ tenant_ctrl_nic = tenant_nic_map['controller']['members'][0]
+ tenant_comp_nic = tenant_nic_map['compute']['members'][0]
+
+ # SSH keys
+ private_key, public_key = make_ssh_key()
+
+ # Make easier/faster variables to index in the file editor
+ if 'performance' in ds_opts:
+ perf = True
+ # vpp
+ if 'vpp' in ds_opts['performance']['Compute']:
+ perf_vpp_comp = ds_opts['performance']['Compute']['vpp']
+ else:
+ perf_vpp_comp = None
+ if 'vpp' in ds_opts['performance']['Controller']:
+ perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp']
+ else:
+ perf_vpp_ctrl = None
+
+ # ovs
+ if 'ovs' in ds_opts['performance']['Compute']:
+ perf_ovs_comp = ds_opts['performance']['Compute']['ovs']
+ else:
+ perf_ovs_comp = None
+
+ # kernel
+ if 'kernel' in ds_opts['performance']['Compute']:
+ perf_kern_comp = ds_opts['performance']['Compute']['kernel']
+ else:
+ perf_kern_comp = None
+ else:
+ perf = False
+
+ # Modify OPNFV environment
+ for line in fileinput.input(tmp_opnfv_env, inplace=True):
+ line = line.strip('\n')
+ if 'CloudDomain' in line:
+ print(" CloudDomain: {}".format(ns['domain_name']))
+ elif ds_opts['sdn_controller'] == 'opendaylight' and \
+ 'odl_vpp_routing_node' in ds_opts and ds_opts[
+ 'odl_vpp_routing_node'] != 'dvr':
+ if 'opendaylight::vpp_routing_node' in line:
+ print(" opendaylight::vpp_routing_node: ${}.${}".format(
+ ds_opts['odl_vpp_routing_node'], ns['domain_name']))
+ elif 'ControllerExtraConfig' in line:
+ print(" ControllerExtraConfig:\n "
+ "tripleo::profile::base::neutron::agents::honeycomb"
+ "::interface_role_mapping: ['{}:tenant-"
+ "interface]'".format(tenant_ctrl_nic))
+ elif 'NovaComputeExtraConfig' in line:
+ print(" NovaComputeExtraConfig:\n "
+ "tripleo::profile::base::neutron::agents::honeycomb"
+ "::interface_role_mapping: ['{}:tenant-"
+ "interface]'".format(tenant_comp_nic))
+ else:
+ print(line)
+
+ elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
+ if 'NeutronVPPAgentPhysnets' in line:
+ print(" NeutronVPPAgentPhysnets: 'datacentre:{}'".format(
+ tenant_ctrl_nic))
+ else:
+ print(line)
+ elif perf:
+ line_printed = False
+ for role in 'NovaCompute', 'Controller':
+ if role == 'NovaCompute':
+ perf_opts = perf_vpp_comp
+ else:
+ perf_opts = perf_vpp_ctrl
+ cfg = "{}ExtraConfig".format(role)
+ if cfg in line and perf_opts:
+ if 'main-core' in perf_opts:
+ print(" {}:\n"
+ " fdio::vpp_cpu_main_core: '{}'"
+ "".format(cfg, perf_opts['main-core']))
+ line_printed = True
+ break
+ elif 'corelist-workers' in perf_vpp_comp:
+ print(" {}:\n"
+ " fdio::vpp_cpu_corelist_workers: '{}'"
+ "".format(cfg, perf_opts['corelist-workers']))
+ line_printed = True
+ break
+
+ # kernel args
+ # (FIXME) use compute's kernel settings for all nodes for now.
+ if 'ComputeKernelArgs' in line and perf_kern_comp:
+ kernel_args = ''
+ for k, v in perf_kern_comp.items():
+ kernel_args += "{}={}".format(k, v)
+ if kernel_args:
+ print("ComputeKernelArgs: '{}'".format(kernel_args))
+ line_printed = True
+ elif ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
+ for k, v in OVS_PERF_MAP.items():
+ if k in line and v in perf_ovs_comp:
+ print(" {}: {}".format(k, perf_ovs_comp[v]))
+ line_printed = True
+
+ if not line_printed:
+ print(line)
+ elif 'replace_private_key' in line:
+ print(" key: '{}'".format(private_key))
+ elif 'replace_public_key' in line:
+ print(" key: '{}'".format(public_key))
+ else:
+ print(line)
+
+ logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+
+ # Modify Network environment
+ for line in fileinput.input(net_env, inplace=True):
+ line = line.strip('\n')
+ if ds_opts['dataplane'] == 'ovs_dpdk':
+ if 'ComputeExtraConfigPre' in line:
+ print(' OS::TripleO::ComputeExtraConfigPre: '
+ './ovs-dpdk-preconfig.yaml')
+ else:
+ print(line)
+ elif perf and perf_kern_comp:
+ if 'resource_registry' in line:
+ print("resource_registry:\n"
+ " OS::TripleO::NodeUserData: first-boot.yaml")
+ elif 'NovaSchedulerDefaultFilters' in line:
+ print(" NovaSchedulerDefaultFilters: 'RamFilter,"
+ "ComputeFilter,AvailabilityZoneFilter,"
+ "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
+ "NUMATopologyFilter'")
+ else:
+ print(line)
+ else:
+ print(line)
+
+ logging.info("network-environment file written to {}".format(net_env))
+
+
+def generate_ceph_key():
+ key = os.urandom(16)
+ header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
+ return base64.b64encode(header + key)
+
+
+def prep_storage_env(ds, tmp_dir):
+ """
+ Creates storage environment file for deployment. Source file is copied by
+ undercloud playbook to host.
+ :param ds:
+ :param tmp_dir:
+ :return:
+ """
+ ds_opts = ds['deploy_options']
+ storage_file = os.path.join(tmp_dir, 'storage-environment.yaml')
+ if not os.path.isfile(storage_file):
+ logging.error("storage-environment file is not in tmp directory: {}. "
+ "Check if file was copied from "
+ "undercloud".format(tmp_dir))
+ raise ApexDeployException("storage-environment file not copied from "
+ "undercloud")
+ for line in fileinput.input(storage_file, inplace=True):
+ line = line.strip('\n')
+ if 'CephClusterFSID' in line:
+ print(" CephClusterFSID: {}".format(str(uuid.uuid4())))
+ elif 'CephMonKey' in line:
+ print(" CephMonKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
+ elif 'CephAdminKey' in line:
+ print(" CephAdminKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
+ else:
+ print(line)
+ if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+ with open(storage_file, 'a') as fh:
+ fh.write(' ExtraConfig:\n')
+ fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
+ ds_opts['ceph_device']
+ ))
+
+
+def external_network_cmds(ns):
+ """
+ Generates external network openstack commands
+ :param ns: network settings
+ :return: list of commands to configure external network
+ """
+ if 'external' in ns.enabled_network_list:
+ net_config = ns['networks']['external'][0]
+ external = True
+ pool_start, pool_end = net_config['floating_ip_range']
+ else:
+ net_config = ns['networks']['admin']
+ external = False
+ pool_start, pool_end = ns['apex']['networks']['admin'][
+ 'introspection_range']
+ nic_config = net_config['nic_mapping']
+ gateway = net_config['gateway']
+ cmds = list()
+ # create network command
+ if nic_config['compute']['vlan'] == 'native':
+ ext_type = 'flat'
+ else:
+ ext_type = "vlan --provider-segment {}".format(nic_config[
+ 'compute']['vlan'])
+ cmds.append("openstack network create external --project service "
+ "--external --provider-network-type {} "
+ "--provider-physical-network datacentre".format(ext_type))
+ # create subnet command
+ cidr = net_config['cidr']
+ subnet_cmd = "openstack subnet create external-subnet --project " \
+ "service --network external --no-dhcp --gateway {} " \
+ "--allocation-pool start={},end={} --subnet-range " \
+ "{}".format(gateway, pool_start, pool_end, str(cidr))
+ if external and cidr.version == 6:
+ subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
+ '--ipv6-address-mode slaac'
+ cmds.append(subnet_cmd)
+ logging.debug("Neutron external network commands determined "
+ "as: {}".format(cmds))
+ return cmds
+
+
+def create_congress_cmds(overcloud_file):
+ drivers = ['nova', 'neutronv2', 'cinder', 'glancev2', 'keystone', 'doctor']
+ overcloudrc = parsers.parse_overcloudrc(overcloud_file)
+ logging.info("Creating congress commands")
+ try:
+ ds_cfg = [
+ "username={}".format(overcloudrc['OS_USERNAME']),
+ "tenant_name={}".format(overcloudrc['OS_PROJECT_NAME']),
+ "password={}".format(overcloudrc['OS_PASSWORD']),
+ "auth_url={}".format(overcloudrc['OS_AUTH_URL'])
+ ]
+ except KeyError:
+ logging.error("Unable to find all keys required for congress in "
+ "overcloudrc: OS_USERNAME, OS_PROJECT_NAME, "
+ "OS_PASSWORD, OS_AUTH_URL. Please check overcloudrc "
+ "file: {}".format(overcloud_file))
+ raise
+ cmds = list()
+ ds_cfg = '--config ' + ' --config '.join(ds_cfg)
+
+ for driver in drivers:
+ if driver == 'doctor':
+ cmd = "{} \"{}\"".format(driver, driver)
+ else:
+ cmd = "{} \"{}\" {}".format(driver, driver, ds_cfg)
+ if driver == 'nova':
+ cmd += '--config api_version="2.34"'
+ logging.debug("Congress command created: {}".format(cmd))
+ cmds.append(cmd)
+ return cmds
diff --git a/apex/settings/__init__.py b/apex/settings/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/settings/__init__.py
diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py
new file mode 100644
index 00000000..c8e347b7
--- /dev/null
+++ b/apex/settings/deploy_settings.py
@@ -0,0 +1,188 @@
+##############################################################################
+# Copyright (c) 2016 Michael Chapman (michapma@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+import yaml
+
+from apex.common import utils
+from apex.common import constants
+
+REQ_DEPLOY_SETTINGS = ['sdn_controller',
+ 'odl_version',
+ 'tacker',
+ 'congress',
+ 'dataplane',
+ 'sfc',
+ 'vpn',
+ 'vpp',
+ 'ceph',
+ 'gluon',
+ 'rt_kvm']
+
+OPT_DEPLOY_SETTINGS = ['performance',
+ 'vsperf',
+ 'ceph_device',
+ 'yardstick',
+ 'dovetail',
+ 'odl_vpp_routing_node',
+ 'odl_vpp_netvirt',
+ 'barometer']
+
+VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
+VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
+VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'master']
+
+
+class DeploySettings(dict):
+ """
+ This class parses a APEX deploy settings yaml file into an object
+
+ Currently the parsed object is dumped into a bash global definition file
+ for deploy.sh consumption. This object will later be used directly as
+ deployment script move to python.
+ """
+ def __init__(self, filename):
+ if isinstance(filename, str):
+ with open(filename, 'r') as deploy_settings_file:
+ init_dict = yaml.safe_load(deploy_settings_file)
+ else:
+ # assume input is a dict to build from
+ init_dict = filename
+
+ super().__init__(init_dict)
+ self._validate_settings()
+
+ def _validate_settings(self):
+ """
+ Validates the deploy settings file provided
+
+ DeploySettingsException will be raised if validation fails.
+ """
+
+ if 'deploy_options' not in self:
+ raise DeploySettingsException("No deploy options provided in"
+ " deploy settings file")
+ if 'global_params' not in self:
+ raise DeploySettingsException("No global options provided in"
+ " deploy settings file")
+
+ deploy_options = self['deploy_options']
+ if not isinstance(deploy_options, dict):
+ raise DeploySettingsException("deploy_options should be a list")
+
+ if ('gluon' in self['deploy_options'] and
+ 'vpn' in self['deploy_options']):
+ if (self['deploy_options']['gluon'] is True and
+ self['deploy_options']['vpn'] is False):
+ raise DeploySettingsException(
+ "Invalid deployment configuration: "
+ "If gluon is enabled, "
+ "vpn also needs to be enabled")
+
+ for setting, value in deploy_options.items():
+ if setting not in REQ_DEPLOY_SETTINGS + OPT_DEPLOY_SETTINGS:
+ raise DeploySettingsException("Invalid deploy_option {} "
+ "specified".format(setting))
+ if setting == 'dataplane':
+ if value not in VALID_DATAPLANES:
+ planes = ' '.join(VALID_DATAPLANES)
+ raise DeploySettingsException(
+ "Invalid dataplane {} specified. Valid dataplanes:"
+ " {}".format(value, planes))
+
+ for req_set in REQ_DEPLOY_SETTINGS:
+ if req_set not in deploy_options:
+ if req_set == 'dataplane':
+ self['deploy_options'][req_set] = 'ovs'
+ elif req_set == 'ceph':
+ self['deploy_options'][req_set] = True
+ elif req_set == 'odl_version':
+ self['deploy_options'][req_set] = \
+ constants.DEFAULT_ODL_VERSION
+ else:
+ self['deploy_options'][req_set] = False
+ elif req_set == 'odl_version' and self['deploy_options'][
+ 'odl_version'] not in VALID_ODL_VERSIONS:
+ raise DeploySettingsException(
+ "Invalid ODL version: {}".format(self[deploy_options][
+ 'odl_version']))
+
+ if 'performance' in deploy_options:
+ if not isinstance(deploy_options['performance'], dict):
+ raise DeploySettingsException("Performance deploy_option"
+ "must be a dictionary.")
+ for role, role_perf_sets in deploy_options['performance'].items():
+ if role not in VALID_ROLES:
+ raise DeploySettingsException("Performance role {}"
+ "is not valid, choose"
+ "from {}".format(
+ role,
+ " ".join(VALID_ROLES)
+ ))
+
+ for key in role_perf_sets:
+ if key not in VALID_PERF_OPTS:
+ raise DeploySettingsException("Performance option {} "
+ "is not valid, choose"
+ "from {}".format(
+ key,
+ " ".join(
+ VALID_PERF_OPTS)
+ ))
+
+ def _dump_performance(self):
+ """
+ Creates performance settings string for bash consumption.
+
+ Output will be in the form of a list that can be iterated over in
+ bash, with each string being the direct input to the performance
+ setting script in the form <role> <category> <key> <value> to
+ facilitate modification of the correct image.
+ """
+ bash_str = 'performance_options=(\n'
+ deploy_options = self['deploy_options']
+ for role, settings in deploy_options['performance'].items():
+ for category, options in settings.items():
+ for key, value in options.items():
+ bash_str += "\"{} {} {} {}\"\n".format(role,
+ category,
+ key,
+ value)
+ bash_str += ')\n'
+ bash_str += '\n'
+ bash_str += 'performance_roles=(\n'
+ for role in self['deploy_options']['performance']:
+ bash_str += role + '\n'
+ bash_str += ')\n'
+ bash_str += '\n'
+
+ return bash_str
+
+ def _dump_deploy_options_array(self):
+ """
+ Creates deploy settings array in bash syntax.
+ """
+ bash_str = ''
+ for key, value in self['deploy_options'].items():
+ if not isinstance(value, bool):
+ bash_str += "deploy_options_array[{}]=\"{}\"\n".format(key,
+ value)
+ else:
+ bash_str += "deploy_options_array[{}]={}\n".format(key,
+ value)
+ return bash_str
+
+
+class DeploySettingsException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/apex/settings/network_settings.py b/apex/settings/network_settings.py
new file mode 100644
index 00000000..14870078
--- /dev/null
+++ b/apex/settings/network_settings.py
@@ -0,0 +1,327 @@
+##############################################################################
+# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import ipaddress
+import logging
+from copy import copy
+
+import yaml
+
+from apex.common import utils
+from apex.common.constants import (
+ CONTROLLER,
+ COMPUTE,
+ ROLES,
+ DOMAIN_NAME,
+ DNS_SERVERS,
+ NTP_SERVER,
+ ADMIN_NETWORK,
+ EXTERNAL_NETWORK,
+ OPNFV_NETWORK_TYPES,
+)
+from apex.network import ip_utils
+
+
+class NetworkSettings(dict):
+ """
+ This class parses APEX network settings yaml file into an object. It
+ generates or detects all missing fields for deployment.
+
+ The resulting object will be used later to generate network environment
+ file as well as configuring post deployment networks.
+
+ Currently the parsed object is dumped into a bash global definition file
+ for deploy.sh consumption. This object will later be used directly as
+ deployment script move to python.
+ """
+ def __init__(self, filename):
+ init_dict = {}
+ if isinstance(filename, str):
+ with open(filename, 'r') as network_settings_file:
+ init_dict = yaml.safe_load(network_settings_file)
+ else:
+ # assume input is a dict to build from
+ init_dict = filename
+ super().__init__(init_dict)
+
+ if 'apex' in self:
+ # merge two dicts Non-destructively
+ def merge(pri, sec):
+ for key, val in sec.items():
+ if key in pri:
+ if isinstance(val, dict):
+ merge(pri[key], val)
+ # else
+ # do not overwrite what's already there
+ else:
+ pri[key] = val
+ # merge the apex specific config into the first class settings
+ merge(self, copy(self['apex']))
+
+ self.enabled_network_list = []
+ self.nics = {COMPUTE: {}, CONTROLLER: {}}
+ self.nics_specified = {COMPUTE: False, CONTROLLER: False}
+ self._validate_input()
+
+ def get_network(self, network):
+ if network == EXTERNAL_NETWORK and self['networks'][network]:
+ for net in self['networks'][network]:
+ if 'public' in net:
+ return net
+
+ raise NetworkSettingsException("The external network, "
+ "'public', should be defined "
+ "when external networks are "
+ "enabled")
+ else:
+ return self['networks'][network]
+
+ def _validate_input(self):
+ """
+ Validates the network settings file and populates all fields.
+
+ NetworkSettingsException will be raised if validation fails.
+ """
+ if not self['networks'].get(ADMIN_NETWORK, {}).get('enabled', False):
+ raise NetworkSettingsException("You must enable admin network "
+ "and configure it explicitly or "
+ "use auto-detection")
+
+ for network in OPNFV_NETWORK_TYPES:
+ if network in self['networks']:
+ _network = self.get_network(network)
+ if _network.get('enabled', True):
+ logging.info("{} enabled".format(network))
+ self._config_required_settings(network)
+ nicmap = _network['nic_mapping']
+ self._validate_overcloud_nic_order(network)
+ iface = nicmap[CONTROLLER]['members'][0]
+ self._config_ip_range(network=network,
+ interface=iface,
+ ip_range='overcloud_ip_range',
+ start_offset=21, end_offset=21)
+ self.enabled_network_list.append(network)
+ # TODO self._config_optional_settings(network)
+ else:
+ logging.info("{} disabled, will collapse with "
+ "admin network".format(network))
+ else:
+ logging.info("{} is not in specified, will collapse with "
+ "admin network".format(network))
+
+ if 'dns-domain' not in self:
+ self['domain_name'] = DOMAIN_NAME
+ else:
+ self['domain_name'] = self['dns-domain']
+ self['dns_servers'] = self.get('dns_nameservers', DNS_SERVERS)
+ self['ntp_servers'] = self.get('ntp', NTP_SERVER)
+
+ def _validate_overcloud_nic_order(self, network):
+ """
+ Detects if nic order is specified per profile (compute/controller)
+ for network
+
+ If nic order is specified in a network for a profile, it should be
+ specified for every network with that profile other than admin network
+
+ Duplicate nic names are also not allowed across different networks
+
+ :param network: network to detect if nic order present
+ :return: None
+ """
+ for role in ROLES:
+ _network = self.get_network(network)
+ _nicmap = _network.get('nic_mapping', {})
+ _role = _nicmap.get(role, {})
+ interfaces = _role.get('members', [])
+
+ if interfaces:
+ interface = interfaces[0]
+ if not isinstance(_role.get('vlan', 'native'), int) and \
+ any(y == interface for x, y in self.nics[role].items()):
+ raise NetworkSettingsException(
+ "Duplicate {} already specified for "
+ "another network".format(interface))
+ self.nics[role][network] = interface
+ self.nics_specified[role] = True
+ logging.info("{} nic order specified for network {"
+ "}".format(role, network))
+ else:
+ raise NetworkSettingsException(
+ "Interface members are not supplied for {} network "
+ "for the {} role. Please add nic assignments"
+ "".format(network, role))
+
+ def _config_required_settings(self, network):
+ """
+ Configures either CIDR or bridged_interface setting
+
+ cidr takes precedence if both cidr and bridged_interface are specified
+ for a given network.
+
+ When using bridged_interface, we will detect network setting on the
+ given NIC in the system. The resulting config in settings object will
+ be an ipaddress.network object, replacing the NIC name.
+ """
+ _network = self.get_network(network)
+ # if vlan not defined then default it to native
+ if network is not ADMIN_NETWORK:
+ for role in ROLES:
+ if 'vlan' not in _network['nic_mapping'][role]:
+ _network['nic_mapping'][role]['vlan'] = 'native'
+
+ cidr = _network.get('cidr')
+
+ if cidr:
+ cidr = ipaddress.ip_network(_network['cidr'])
+ _network['cidr'] = cidr
+ logging.info("{}_cidr: {}".format(network, cidr))
+ elif 'installer_vm' in _network:
+ ucloud_if_list = _network['installer_vm']['members']
+ # If cidr is not specified, we need to know if we should find
+ # IPv6 or IPv4 address on the interface
+ ip = ipaddress.ip_address(_network['installer_vm']['ip'])
+ nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
+ if nic_if:
+ logging.info("{}_bridged_interface: {}".
+ format(network, nic_if))
+ else:
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: Unable to find valid "
+ "ip for interface {}".format(network, ucloud_if_list[0]))
+
+ else:
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: either installer_vm "
+ "members or cidr must be specified".format(network))
+
+ # undercloud settings
+ if network == ADMIN_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='dhcp_range',
+ start_offset=2, count=9)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='introspection_range',
+ start_offset=11, count=9)
+ elif network == EXTERNAL_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='floating_ip_range',
+ end_offset=2, count=20)
+
+ gateway = _network['gateway']
+ interface = _network['installer_vm']['ip']
+ self._config_gateway(network, gateway, interface)
+
+ def _config_ip_range(self, network, ip_range, interface=None,
+ start_offset=None, end_offset=None, count=None):
+ """
+ Configures IP range for a given setting.
+ If the setting is already specified, no change will be made.
+ The spec for start_offset, end_offset and count are identical to
+ ip_utils.get_ip_range.
+ """
+ _network = self.get_network(network)
+ if ip_range not in _network:
+ cidr = _network.get('cidr')
+ _ip_range = ip_utils.get_ip_range(start_offset=start_offset,
+ end_offset=end_offset,
+ count=count,
+ cidr=cidr,
+ interface=interface)
+ _network[ip_range] = _ip_range.split(',')
+
+ logging.info("Config IP Range: {} {}".format(network, ip_range))
+
+ def _gen_ip(self, network, offset):
+ """
+ Generate and ip offset within the given network
+ """
+ _network = self.get_network(network)
+ cidr = _network.get('cidr')
+ ip = ip_utils.get_ip(offset, cidr)
+ logging.info("Config IP: {} {}".format(network, ip))
+ return ip
+
+ def _config_optional_settings(self, network):
+ """
+ Configures optional settings:
+ - admin_network:
+ - provisioner_ip
+ - dhcp_range
+ - introspection_range
+ - public_network:
+ - provisioner_ip
+ - floating_ip_range
+ - gateway
+ """
+ if network == ADMIN_NETWORK:
+ # FIXME: _config_ip function does not exist!
+ self._config_ip(network, None, 'provisioner_ip', 1)
+ self._config_ip_range(network=network,
+ ip_range='dhcp_range',
+ start_offset=2, count=9)
+ self._config_ip_range(network=network,
+ ip_range='introspection_range',
+ start_offset=11, count=9)
+ elif network == EXTERNAL_NETWORK:
+ # FIXME: _config_ip function does not exist!
+ self._config_ip(network, None, 'provisioner_ip', 1)
+ self._config_ip_range(network=network,
+ ip_range='floating_ip_range',
+ end_offset=2, count=20)
+ self._config_gateway(network)
+
+ def _config_gateway(self, network, gateway, interface):
+ """
+ Configures gateway setting for a given network.
+
+ If cidr is specified, we always use the first address in the address
+ space for gateway. Otherwise, we detect the system gateway.
+ """
+ _network = self.get_network(network)
+ if not gateway:
+ cidr = _network.get('cidr')
+ if cidr:
+ _gateway = ip_utils.get_ip(1, cidr)
+ else:
+ _gateway = ip_utils.find_gateway(interface)
+
+ if _gateway:
+ _network['gateway'] = _gateway
+ else:
+ raise NetworkSettingsException("Failed to set gateway")
+
+ logging.info("Config Gateway: {} {}".format(network, gateway))
+
+ def get_ip_addr_family(self,):
+ """
+ Returns IP address family for current deployment.
+
+ If any enabled network has IPv6 CIDR, the deployment is classified as
+ IPv6.
+ """
+ return max([
+ ipaddress.ip_network(self.get_network(n)['cidr']).version
+ for n in self.enabled_network_list])
+
+
+class NetworkSettingsException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/apex/tests/__init__.py b/apex/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/tests/__init__.py
diff --git a/apex/tests/config/inventory.yaml b/apex/tests/config/inventory.yaml
new file mode 100644
index 00000000..2abe0fc9
--- /dev/null
+++ b/apex/tests/config/inventory.yaml
@@ -0,0 +1,57 @@
+---
+nodes:
+ node1:
+ mac_address: "00:25:B5:cc:00:1e"
+ ipmi_ip: 72.30.8.69
+ ipmi_user: admin
+ ipmi_pass: octopus
+ pm_type: "pxe_ipmitool"
+ cpus: 2
+ memory: 8192
+ disk: 40
+ arch: "x86_64"
+ capabilities: "profile:control"
+ node2:
+ mac_address: "00:25:B5:cc:00:5d"
+ ipmi_ip: 72.30.8.78
+ ipmi_user: admin
+ ipmi_pass: octopus
+ pm_type: "pxe_ipmitool"
+ cpus: 2
+ memory: 8192
+ disk: 40
+ arch: "x86_64"
+ capabilities: "profile:control"
+ node3:
+ mac_address: "00:25:B5:cc:00:1d"
+ ipmi_ip: 72.30.8.67
+ ipmi_user: admin
+ ipmi_pass: octopus
+ pm_type: "pxe_ipmitool"
+ cpus: 2
+ memory: 8192
+ disk: 40
+ arch: "x86_64"
+ capabilities: "profile:control"
+ node4:
+ mac_address: "00:25:B5:cc:00:3c"
+ ipmi_ip: 72.30.8.76
+ ipmi_user: admin
+ ipmi_pass: octopus
+ pm_type: "pxe_ipmitool"
+ cpus: 2
+ memory: 8192
+ disk: 40
+ arch: "x86_64"
+ capabilities: "profile:compute"
+ node5:
+ mac_address: "00:25:B5:cc:00:5b"
+ ipmi_ip: 72.30.8.71
+ ipmi_user: admin
+ ipmi_pass: octopus
+ pm_type: "pxe_ipmitool"
+ cpus: 2
+ memory: 8192
+ disk: 40
+ arch: "x86_64"
+ capabilities: "profile:compute"
diff --git a/apex/tests/constants.py b/apex/tests/constants.py
new file mode 100644
index 00000000..47e63e2c
--- /dev/null
+++ b/apex/tests/constants.py
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TEST_CONFIG_DIR = 'config'
+TEST_BUILD_DIR = 'build'
+TEST_PLAYBOOK_DIR = 'playbooks'
diff --git a/apex/tests/playbooks/test_playbook.yaml b/apex/tests/playbooks/test_playbook.yaml
new file mode 100644
index 00000000..800d8fde
--- /dev/null
+++ b/apex/tests/playbooks/test_playbook.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: "Test playbook"
diff --git a/apex/tests/smoke_tests/execute_smoke_tests.sh b/apex/tests/smoke_tests/execute_smoke_tests.sh
new file mode 100755
index 00000000..27f95251
--- /dev/null
+++ b/apex/tests/smoke_tests/execute_smoke_tests.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+python ~/snaps/snaps/test_runner.py -e ~stack/overcloudrc -n external -c -a -i -f -k -l INFO &> ~stack/smoke-tests.out \ No newline at end of file
diff --git a/apex/tests/smoke_tests/execute_tests.yml b/apex/tests/smoke_tests/execute_tests.yml
new file mode 100644
index 00000000..5042d230
--- /dev/null
+++ b/apex/tests/smoke_tests/execute_tests.yml
@@ -0,0 +1,11 @@
+---
+- hosts: all
+ become: yes
+ become_method: sudo
+ become_user: root
+
+ tasks:
+ - name: Copy execute_smoke_tests.sh
+ copy: src=execute_smoke_tests.sh dest=~/execute_smoke_tests.sh mode=0755
+ - name: Execute Tests
+ command: sh ~/execute_smoke_tests.sh | tee ~/unit_tests.out \ No newline at end of file
diff --git a/apex/tests/smoke_tests/prepare_undercloud.yml b/apex/tests/smoke_tests/prepare_undercloud.yml
new file mode 100644
index 00000000..7ad769c0
--- /dev/null
+++ b/apex/tests/smoke_tests/prepare_undercloud.yml
@@ -0,0 +1,9 @@
+---
+- hosts: all
+ become: yes
+ become_method: sudo
+ become_user: root
+
+ tasks:
+ - git: repo=https://gerrit.opnfv.org/gerrit/snaps dest=~/snaps
+ - command: pip install -e ~/snaps/
diff --git a/apex/tests/smoke_tests/smoke_tests.yml b/apex/tests/smoke_tests/smoke_tests.yml
new file mode 100644
index 00000000..b67c194f
--- /dev/null
+++ b/apex/tests/smoke_tests/smoke_tests.yml
@@ -0,0 +1,3 @@
+---
+- include: prepare_undercloud.yml
+- include: execute_tests.yml \ No newline at end of file
diff --git a/apex/tests/test_apex_clean.py b/apex/tests/test_apex_clean.py
new file mode 100644
index 00000000..d0b87917
--- /dev/null
+++ b/apex/tests/test_apex_clean.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import mock
+import pyipmi
+import pyipmi.chassis
+from mock import patch
+from nose import tools
+
+from apex import clean_nodes
+
+
+class TestClean(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_clean(self):
+ with mock.patch.object(pyipmi.Session, 'establish') as mock_method:
+ with patch.object(pyipmi.chassis.Chassis,
+ 'chassis_control_power_down') as mock_method2:
+ clean_nodes('apex/tests/config/inventory.yaml')
+
+ tools.assert_equal(mock_method.call_count, 5)
+ tools.assert_equal(mock_method2.call_count, 5)
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
new file mode 100644
index 00000000..357ad1b0
--- /dev/null
+++ b/apex/tests/test_apex_common_utils.py
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import ipaddress
+import nose.tools
+import os
+
+from apex.common import utils
+from apex.settings.network_settings import NetworkSettings
+from apex.tests.constants import (
+ TEST_CONFIG_DIR,
+ TEST_PLAYBOOK_DIR)
+
+NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
+
+
+class TestCommonUtils(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_str2bool(self):
+ nose.tools.assert_equal(utils.str2bool(True), True)
+ nose.tools.assert_equal(utils.str2bool(False), False)
+ nose.tools.assert_equal(utils.str2bool("True"), True)
+ nose.tools.assert_equal(utils.str2bool("YES"), True)
+
+ def test_parse_yaml(self):
+ nose.tools.assert_is_instance(utils.parse_yaml(NET_SETS), dict)
+
+ def test_dict_to_string(self):
+ net_settings = NetworkSettings(NET_SETS)
+ output = utils.dict_objects_to_str(net_settings)
+ nose.tools.assert_is_instance(output, dict)
+ for k, v in output.items():
+ nose.tools.assert_is_instance(k, str)
+ nose.tools.assert_not_is_instance(v, ipaddress.IPv4Address)
+
+ def test_run_ansible(self):
+ playbook = 'apex/tests/playbooks/test_playbook.yaml'
+ nose.tools.assert_equal(
+ utils.run_ansible(None, os.path.join(playbook),
+ dry_run=True), None)
diff --git a/apex/tests/test_apex_deploy_settings.py b/apex/tests/test_apex_deploy_settings.py
new file mode 100644
index 00000000..312c1f3a
--- /dev/null
+++ b/apex/tests/test_apex_deploy_settings.py
@@ -0,0 +1,101 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# https://docs.python.org/3/library/io.html
+import os
+import tempfile
+
+from nose.tools import assert_equal
+from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+
+from apex.settings.deploy_settings import DeploySettings
+from apex.settings.deploy_settings import DeploySettingsException
+from apex.tests.constants import TEST_CONFIG_DIR
+
+deploy_files = ('deploy_settings.yaml',
+ 'os-nosdn-nofeature-noha.yaml',
+ 'os-nosdn-ovs_dpdk-noha.yaml',
+ 'os-ocl-nofeature-ha.yaml',
+ 'os-odl-bgpvpn-ha.yaml',
+ 'os-odl-bgpvpn-noha.yaml',
+ 'os-odl-nofeature-ha.yaml',
+ 'os-nosdn-nofeature-ha.yaml',
+ 'os-nosdn-ovs_dpdk-ha.yaml',
+ 'os-nosdn-performance-ha.yaml',
+ 'os-odl-nofeature-ha.yaml',
+ 'os-onos-nofeature-ha.yaml',
+ 'os-onos-sfc-ha.yaml')
+
+test_deploy_content = (
+ 'global_params:',
+ 'deploy_options: string',
+ """deploy_options: string
+global_params:""",
+ """global_params:
+deploy_options:
+ error: error
+""",
+ """global_params:
+deploy_options:
+ performance: string
+""",
+ """global_params:
+deploy_options:
+ dataplane: invalid
+""",
+ """global_params:
+deploy_options:
+ performance:
+ Controller:
+ error: error
+""",
+ """global_params:
+deploy_options:
+ performance:
+ InvalidRole:
+ error: error
+""",)
+
+
+class TestIpUtils(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ for f in deploy_files:
+ ds = DeploySettings(os.path.join(TEST_CONFIG_DIR, 'deploy', f))
+ ds = DeploySettings(ds)
+
+ def test__validate_settings(self):
+ for c in test_deploy_content:
+ try:
+ f = tempfile.NamedTemporaryFile(mode='w')
+ f.write(c)
+ f.flush()
+ assert_raises(DeploySettingsException,
+ DeploySettings, f.name)
+ finally:
+ f.close()
+
+ def test_exception(self):
+ e = DeploySettingsException("test")
+ print(e)
+ assert_is_instance(e, DeploySettingsException)
diff --git a/apex/tests/test_apex_inventory.py b/apex/tests/test_apex_inventory.py
new file mode 100644
index 00000000..ed95c53c
--- /dev/null
+++ b/apex/tests/test_apex_inventory.py
@@ -0,0 +1,69 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+import sys
+from io import StringIO
+
+from nose.tools import assert_equal
+from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+from nose.tools import assert_regexp_matches
+
+from apex import Inventory
+from apex.inventory.inventory import InventoryException
+from apex.tests.constants import TEST_CONFIG_DIR
+
+inventory_files = ('intel_pod2_settings.yaml',
+ 'nokia_pod1_settings.yaml',
+ 'pod_example_settings.yaml')
+
+files_dir = os.path.join(TEST_CONFIG_DIR, 'inventory')
+
+
+class TestInventory(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ for f in inventory_files:
+ i = Inventory(os.path.join(files_dir, f))
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # test virtual
+ i = Inventory(i, virtual=True)
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # Remove nodes to violate HA node count
+ while len(i['nodes']) >= 5:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i)
+
+ # Remove nodes to violate non-HA node count
+ while len(i['nodes']) >= 2:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i, ha=False)
+
+ def test_exception(self):
+ e = InventoryException("test")
+ print(e)
+ assert_is_instance(e, InventoryException)
diff --git a/apex/tests/test_apex_ip_utils.py b/apex/tests/test_apex_ip_utils.py
new file mode 100644
index 00000000..04a1b2bb
--- /dev/null
+++ b/apex/tests/test_apex_ip_utils.py
@@ -0,0 +1,132 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import ipaddress
+import re
+from ipaddress import IPv4Address
+from ipaddress import ip_network
+
+from nose.tools import assert_equal
+from nose.tools import assert_false
+from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+from nose.tools import assert_regexp_matches
+from nose.tools import assert_true
+
+from apex.network.ip_utils import IPUtilsException
+from apex.network.ip_utils import _validate_ip_range
+from apex.network.ip_utils import find_gateway
+from apex.network.ip_utils import get_interface
+from apex.network.ip_utils import get_ip
+from apex.network.ip_utils import get_ip_range
+
+ip4_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
+ip4_range_pattern = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{1,'
+ '3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
+
+
+def get_default_gateway_linux():
+ """Read the default gateway directly from /proc."""
+ with open("/proc/net/route") as fh:
+ for line in fh:
+ fields = line.strip().split()
+ if fields[2] not in ('00000000', 'Gateway'):
+ return fields[0]
+
+
+class TestIpUtils(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+ klass.iface_name = get_default_gateway_linux()
+ iface = get_interface(klass.iface_name)
+ klass.iface = iface
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_get_interface(self):
+ assert_equal(get_interface(''), None)
+ assert_equal(get_interface('notreal'), None)
+ assert_is_instance(get_interface(self.iface_name,
+ address_family=4),
+ IPv4Address)
+ # can't enable this until there's a v6 address on the ci hosts
+ # assert_is_instance(get_interface(
+ # self.iface_name,
+ # address_family=6), IPv6Address)
+ assert_raises(IPUtilsException,
+ get_interface, self.iface_name, 0)
+
+ def test_find_gateway(self):
+ assert_is_instance(find_gateway(self.iface), str)
+ iface_virbr0 = get_interface('virbr0')
+ assert_equal(find_gateway(iface_virbr0), None)
+
+ def test_get_ip(self):
+ cidr = ipaddress.ip_network("10.10.10.0/24")
+ assert_equal(get_ip(1, cidr=cidr), "10.10.10.1")
+ assert_raises(IPUtilsException, get_ip, 1000, interface=self.iface)
+ assert_regexp_matches(get_ip(1, interface=self.iface), ip4_pattern)
+ assert_raises(IPUtilsException, get_ip, 1)
+
+ def test_get_ip_range_raises(self):
+ assert_raises(IPUtilsException, get_ip_range)
+ assert_raises(IPUtilsException, get_ip_range, interface=self.iface)
+
+ def test_get_ip_range_with_interface(self):
+ assert_regexp_matches(get_ip_range(interface=self.iface,
+ start_offset=1, end_offset=20),
+ ip4_range_pattern)
+ assert_regexp_matches(get_ip_range(interface=self.iface,
+ start_offset=1, count=10),
+ ip4_range_pattern)
+ assert_regexp_matches(get_ip_range(interface=self.iface, end_offset=20,
+ count=10), ip4_range_pattern)
+
+ def test_get_ip_range_with_cidr(self):
+ cidr = ip_network('10.10.10.0/24')
+ assert_raises(IPUtilsException, get_ip_range, cidr=cidr)
+ assert_regexp_matches(get_ip_range(cidr=cidr, start_offset=1,
+ end_offset=20), ip4_pattern)
+ assert_regexp_matches(get_ip_range(cidr=cidr, start_offset=1,
+ count=10), ip4_pattern)
+ assert_regexp_matches(get_ip_range(cidr=cidr, end_offset=20,
+ count=10), ip4_pattern)
+
+ def test__validate_ip_range(self):
+ cidr = ip_network('10.10.10.0/24')
+ assert_true(_validate_ip_range(
+ start_ip=ipaddress.IPv4Address('10.10.10.1'),
+ end_ip=ipaddress.IPv4Address('10.10.10.10'),
+ cidr=cidr))
+ assert_false(_validate_ip_range(
+ start_ip=ipaddress.IPv4Address('10.10.10.10'),
+ end_ip=ipaddress.IPv4Address('10.10.10.1'),
+ cidr=cidr))
+ assert_false(_validate_ip_range(
+ start_ip=ipaddress.IPv4Address('10.10.0.1'),
+ end_ip=ipaddress.IPv4Address('10.10.10.10'),
+ cidr=cidr))
+ assert_false(_validate_ip_range(
+ start_ip=ipaddress.IPv4Address('10.10.10.1'),
+ end_ip=ipaddress.IPv4Address('10.10.11.10'),
+ cidr=cidr))
+
+ def test_exception(self):
+ e = IPUtilsException("test")
+ print(e)
+ assert_is_instance(e, IPUtilsException)
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
new file mode 100644
index 00000000..5047adbb
--- /dev/null
+++ b/apex/tests/test_apex_network_environment.py
@@ -0,0 +1,169 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+
+from copy import copy
+
+from nose.tools import assert_equal
+from nose.tools import assert_is_instance
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+
+from apex.common.constants import (
+ EXTERNAL_NETWORK,
+ TENANT_NETWORK,
+ STORAGE_NETWORK,
+ API_NETWORK,
+ NET_ENV_FILE)
+from apex import NetworkEnvironment
+from apex.network.network_environment import NetworkEnvException
+from apex import NetworkSettings
+from apex.tests.constants import TEST_CONFIG_DIR
+from apex.tests.constants import TEST_BUILD_DIR
+
+
+class TestNetworkEnvironment(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+ klass.ns = NetworkSettings(
+ os.path.join(TEST_CONFIG_DIR, 'network/network_settings.yaml'))
+ klass.ns_vlans = NetworkSettings(
+ os.path.join(TEST_CONFIG_DIR,
+ 'network/network_settings_vlans.yaml'))
+ klass.ns_ipv6 = NetworkSettings(
+ os.path.join(TEST_CONFIG_DIR, 'network/network_settings_v6.yaml'))
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ assert_raises(NetworkEnvException, NetworkEnvironment,
+ None, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+
+ def test_netenv_settings_external_network_vlans(self):
+ # test vlans
+ ne = NetworkEnvironment(self.ns_vlans,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ assert_equal(ne['parameter_defaults']['NeutronExternalNetworkBridge'],
+ '""')
+ assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 501)
+
+ def test_netenv_settings_external_network_ipv6(self):
+ # Test IPv6
+ ne = NetworkEnvironment(self.ns_ipv6,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::External']
+ assert_equal(regstr.split('/')[-1], 'external_v6.yaml')
+
+ def test_netenv_settings_external_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing EXTERNAL_NETWORK
+ ns.enabled_network_list.remove(EXTERNAL_NETWORK)
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::External']
+ assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
+
+ def test_netenv_settings_tenant_network_vlans(self):
+ # test vlans
+ ne = NetworkEnvironment(self.ns_vlans,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 401)
+
+# Apex is does not support v6 tenant networks
+# Though there is code that would fire if a
+# v6 cidr was passed in, just uncomment this to
+# cover that code
+# def test_netenv_settings_tenant_network_v6(self):
+# # Test IPv6
+# ne = NetworkEnvironment(self.ns_ipv6,
+# '../build/network-environment.yaml')
+# regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
+# assert_equal(regstr.split('/')[-1], 'tenant_v6.yaml')
+
+ def test_netenv_settings_tenant_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing TENANT_NETWORK
+ ns.enabled_network_list.remove(TENANT_NETWORK)
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::Tenant']
+ assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
+
+ def test_netenv_settings_storage_network_vlans(self):
+ # test vlans
+ ne = NetworkEnvironment(self.ns_vlans,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 201)
+
+ def test_netenv_settings_storage_network_v6(self):
+ # Test IPv6
+ ne = NetworkEnvironment(self.ns_ipv6,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::Storage']
+ assert_equal(regstr.split('/')[-1], 'storage_v6.yaml')
+
+ def test_netenv_settings_storage_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing STORAGE_NETWORK
+ ns.enabled_network_list.remove(STORAGE_NETWORK)
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::Storage']
+ assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
+
+ def test_netenv_settings_api_network_v4(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns['networks'][API_NETWORK]['cidr'] = '10.11.12.0/24'
+ ns = NetworkSettings(ns)
+ # test vlans
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
+
+ def test_netenv_settings_api_network_vlans(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns = NetworkSettings(ns)
+ # test vlans
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
+
+ def test_netenv_settings_api_network_v6(self):
+ # Test IPv6
+ ne = NetworkEnvironment(self.ns_ipv6,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::InternalApi']
+ assert_equal(regstr.split('/')[-1], 'internal_api_v6.yaml')
+
+ def test_netenv_settings_api_network_removed(self):
+ ns = copy(self.ns)
+ # API_NETWORK is not in the default network settings file
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ regstr = ne['resource_registry']['OS::TripleO::Network::InternalApi']
+ assert_equal(regstr.split('/')[-1], 'OS::Heat::None')
+
+ def test_numa_configs(self):
+ ne = NetworkEnvironment(self.ns,
+ os.path.join(TEST_BUILD_DIR, NET_ENV_FILE),
+ compute_pre_config=True,
+ controller_pre_config=True)
+ assert_is_instance(ne, dict)
+ assert_not_equal(ne, {})
+
+ def test_exception(self):
+ e = NetworkEnvException("test")
+ print(e)
+ assert_is_instance(e, NetworkEnvException)
diff --git a/apex/tests/test_apex_network_settings.py b/apex/tests/test_apex_network_settings.py
new file mode 100644
index 00000000..adff8cff
--- /dev/null
+++ b/apex/tests/test_apex_network_settings.py
@@ -0,0 +1,156 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+
+from nose.tools import (
+ assert_equal,
+ assert_is_instance,
+ assert_raises
+)
+
+from apex.common.constants import (
+ EXTERNAL_NETWORK,
+ STORAGE_NETWORK,
+ ADMIN_NETWORK,
+)
+from apex import NetworkSettings
+from apex.settings.network_settings import NetworkSettingsException
+from apex.tests.constants import TEST_CONFIG_DIR
+
+files_dir = os.path.join(TEST_CONFIG_DIR, 'network')
+
+
+class TestNetworkSettings(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ assert_is_instance(
+ NetworkSettings(os.path.join(files_dir, 'network_settings.yaml')),
+ NetworkSettings)
+
+ def test_init_vlans(self):
+ assert_is_instance(
+ NetworkSettings(os.path.join(files_dir,
+ 'network_settings_vlans.yaml')),
+ NetworkSettings)
+
+# TODO, v6 test is stuck
+ # def test_init_v6(self):
+ # assert_is_instance(
+ # NetworkSettings(files_dir+'network_settings_v6.yaml', True),
+ # NetworkSettings)
+
+ def test_init_admin_disabled_or_missing(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # remove admin, apex section will re-add it
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove admin and apex
+ ns.pop('apex', None)
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_init_collapse_storage(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # remove storage
+ ns['networks'].pop('storage', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_init_missing_dns_domain(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # remove storage
+ ns.pop('dns-domain', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_get_network_settings(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ assert_is_instance(ns, NetworkSettings)
+ for role in ['controller', 'compute']:
+ nic_index = 0
+ print(ns.nics)
+ for network in ns.enabled_network_list:
+ nic = 'eth' + str(nic_index)
+ assert_equal(ns.nics[role][network], nic)
+ nic_index += 1
+
+ def test_get_enabled_networks(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ assert_is_instance(ns.enabled_network_list, list)
+
+ def test_invalid_nic_members(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # set duplicate nic
+ storage_net_nicmap['controller']['members'][0] = 'eth0'
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove nic members
+ storage_net_nicmap['controller']['members'] = []
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_missing_vlan(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # remove vlan from storage net
+ storage_net_nicmap['compute'].pop('vlan', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+# TODO
+# need to manipulate interfaces some how
+# maybe for ip_utils to return something to pass this
+# def test_admin_auto_detect(self):
+# ns = NetworkSettings(files_dir+'network_settings.yaml')
+# # remove cidr to force autodetection
+# ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+# assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_admin_fail_auto_detect(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # remove cidr and installer_vm to fail autodetect
+ ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+ ns['networks'][ADMIN_NETWORK].pop('installer_vm', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_exception(self):
+ e = NetworkSettingsException("test")
+ print(e)
+ assert_is_instance(e, NetworkSettingsException)
+
+ def test_config_ip(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # set the provisioner ip to None to force _gen_ip to generate one
+ ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'] = None
+ ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'] = None
+ # Now rebuild network settings object and check for repopulated values
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'],
+ '192.0.2.1')
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'],
+ '192.168.37.1')
+
+ def test_config_gateway(self):
+ ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+ # set the gateway ip to None to force _config_gateway to generate one
+ ns['networks'][EXTERNAL_NETWORK][0]['gateway'] = None
+ # Now rebuild network settings object and check for a repopulated value
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['gateway'],
+ '192.168.37.1')
diff --git a/apex/undercloud/__init__.py b/apex/undercloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/undercloud/__init__.py
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
new file mode 100644
index 00000000..7efc2cb3
--- /dev/null
+++ b/apex/undercloud/undercloud.py
@@ -0,0 +1,206 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import logging
+import os
+import shutil
+import time
+
+from apex.virtual import virtual_utils as virt_utils
+from apex.virtual import configure_vm as vm_lib
+from apex.common import constants
+from apex.common import utils
+
+
+class ApexUndercloudException(Exception):
+ pass
+
+
+class Undercloud:
+ """
+ This class represents an Apex Undercloud VM
+ """
+ def __init__(self, image_path, root_pw=None, external_network=False):
+ self.ip = None
+ self.root_pw = root_pw
+ self.external_net = external_network
+ self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
+ 'undercloud.qcow2')
+ self.image_path = image_path
+ self.vm = None
+ if Undercloud._get_vm():
+ logging.error("Undercloud VM already exists. Please clean "
+ "before creating")
+ raise ApexUndercloudException("Undercloud VM already exists!")
+ self.create()
+
+ @staticmethod
+ def _get_vm():
+ conn = libvirt.open('qemu:///system')
+ try:
+ vm = conn.lookupByName('undercloud')
+ return vm
+ except libvirt.libvirtError:
+ logging.debug("No undercloud VM exists")
+
+ def create(self):
+ networks = ['admin']
+ if self.external_net:
+ networks.append('external')
+ self.vm = vm_lib.create_vm(name='undercloud',
+ image=self.volume,
+ baremetal_interfaces=networks,
+ direct_boot='overcloud-full',
+ kernel_args=['console=ttyS0',
+ 'root=/dev/sda'],
+ default_network=True)
+ self.setup_volumes()
+ self.inject_auth()
+
+ def _set_ip(self):
+ ip_out = self.vm.interfaceAddresses(
+ libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE, 0)
+ if ip_out:
+ for (name, val) in ip_out.items():
+ for ipaddr in val['addrs']:
+ if ipaddr['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
+ self.ip = ipaddr['addr']
+ return True
+
+ def start(self):
+ """
+ Start Undercloud VM
+ :return: None
+ """
+ if self.vm.isActive():
+ logging.info("Undercloud already started")
+ else:
+ logging.info("Starting undercloud")
+ self.vm.create()
+ # give 10 seconds to come up
+ time.sleep(10)
+ # set IP
+ for x in range(5):
+ if self._set_ip():
+ logging.info("Undercloud started. IP Address: {}".format(
+ self.ip))
+ break
+ logging.debug("Did not find undercloud IP in {} "
+ "attempts...".format(x))
+ time.sleep(10)
+ else:
+ logging.error("Cannot find IP for Undercloud")
+ raise ApexUndercloudException(
+ "Unable to find IP for undercloud. Check if VM booted "
+ "correctly")
+
+ def configure(self, net_settings, playbook, apex_temp_dir):
+ """
+ Configures undercloud VM
+ :return:
+ """
+ # TODO(trozet): If undercloud install fails we can add a retry
+ logging.info("Configuring Undercloud...")
+ # run ansible
+ ansible_vars = Undercloud.generate_config(net_settings)
+ ansible_vars['apex_temp_dir'] = apex_temp_dir
+ utils.run_ansible(ansible_vars, playbook, host=self.ip, user='stack')
+ logging.info("Undercloud installed!")
+
+ def setup_volumes(self):
+ for img_file in ('overcloud-full.vmlinuz', 'overcloud-full.initrd',
+ 'undercloud.qcow2'):
+ src_img = os.path.join(self.image_path, img_file)
+ dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
+ if not os.path.isfile(src_img):
+ raise ApexUndercloudException(
+ "Required source file does not exist:{}".format(src_img))
+ if os.path.exists(dest_img):
+ os.remove(dest_img)
+ shutil.copyfile(src_img, dest_img)
+
+ # TODO(trozet):check if resize needed right now size is 50gb
+ # there is a lib called vminspect which has some dependencies and is
+ # not yet available in pip. Consider switching to this lib later.
+ # execute ansible playbook
+
+ def inject_auth(self):
+ virt_ops = list()
+ # virt-customize keys/pws
+ if self.root_pw:
+ pw_op = "password:{}".format(self.root_pw)
+ virt_ops.append({constants.VIRT_PW: pw_op})
+ # ssh key setup
+ virt_ops.append({constants.VIRT_RUN_CMD:
+ 'mkdir -p /root/.ssh'})
+ virt_ops.append({constants.VIRT_UPLOAD:
+ '/root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys'})
+ run_cmds = [
+ 'chmod 600 /root/.ssh/authorized_keys',
+ 'restorecon /root/.ssh/authorized_keys',
+ 'cp /root/.ssh/authorized_keys /home/stack/.ssh/',
+ 'chown stack:stack /home/stack/.ssh/authorized_keys',
+ 'chmod 600 /home/stack/.ssh/authorized_keys'
+ ]
+ for cmd in run_cmds:
+ virt_ops.append({constants.VIRT_RUN_CMD: cmd})
+ virt_utils.virt_customize(virt_ops, self.volume)
+
+ @staticmethod
+ def generate_config(ns):
+ """
+ Generates a dictionary of settings for configuring undercloud
+ :param ns: network settings to derive undercloud settings
+ :return: dictionary of settings
+ """
+
+ ns_admin = ns['networks']['admin']
+ intro_range = ns['apex']['networks']['admin']['introspection_range']
+ config = dict()
+ config['undercloud_config'] = [
+ "enable_ui false",
+ "undercloud_update_packages false",
+ "undercloud_debug false",
+ "undercloud_hostname undercloud.{}".format(ns['dns-domain']),
+ "local_ip {}/{}".format(str(ns_admin['installer_vm']['ip']),
+ str(ns_admin['cidr']).split('/')[1]),
+ "network_gateway {}".format(str(ns_admin['installer_vm']['ip'])),
+ "network_cidr {}".format(str(ns_admin['cidr'])),
+ "dhcp_start {}".format(str(ns_admin['dhcp_range'][0])),
+ "dhcp_end {}".format(str(ns_admin['dhcp_range'][1])),
+ "inspection_iprange {}".format(','.join(intro_range))
+ ]
+
+ config['ironic_config'] = [
+ "disk_utils iscsi_verify_attempts 30",
+ "disk_partitioner check_device_max_retries 40"
+ ]
+
+ config['nova_config'] = [
+ "dns_domain {}".format(ns['dns-domain']),
+ "dhcp_domain {}".format(ns['dns-domain'])
+ ]
+
+ config['neutron_config'] = [
+ "dns_domain {}".format(ns['dns-domain']),
+ ]
+ # FIXME(trozet): possible bug here with not using external network
+ ns_external = ns['networks']['external'][0]
+ config['external_network'] = {
+ "vlan": ns_external['installer_vm']['vlan'],
+ "ip": ns_external['installer_vm']['ip'],
+ "prefix": str(ns_external['cidr']).split('/')[1],
+ "enabled": ns_external['enabled']
+ }
+
+ # FIXME (trozet): for now hardcoding aarch64 to false
+ config['aarch64'] = False
+
+ return config
diff --git a/apex/virtual/__init__.py b/apex/virtual/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/apex/virtual/__init__.py
diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py
new file mode 100755
index 00000000..3af7d1e8
--- /dev/null
+++ b/apex/virtual/configure_vm.py
@@ -0,0 +1,206 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import libvirt
+import logging
+import math
+import os
+import random
+
+MAX_NUM_MACS = math.trunc(0xff / 2)
+
+
+def generate_baremetal_macs(count=1):
+ """Generate an Ethernet MAC address suitable for baremetal testing."""
+ # NOTE(dprince): We generate our own bare metal MAC address's here
+ # instead of relying on libvirt so that we can ensure the
+ # locally administered bit is set low. (The libvirt default is
+ # to set the 2nd MSB high.) This effectively allows our
+ # fake baremetal VMs to more accurately behave like real hardware
+ # and fixes issues with bridge/DHCP configurations which rely
+ # on the fact that bridges assume the MAC address of the lowest
+ # attached NIC.
+ # MACs generated for a given machine will also be in sequential
+ # order, which matches how most BM machines are laid out as well.
+ # Additionally we increment each MAC by two places.
+ macs = []
+
+ if count > MAX_NUM_MACS:
+ raise ValueError("The MAX num of MACS supported is %i." % MAX_NUM_MACS)
+
+ base_nums = [0x00,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
+
+ start = random.randint(0x00, 0xff)
+ if (start + (count * 2)) > 0xff:
+ # leave room to generate macs in sequence
+ start = 0xff - count * 2
+ for num in range(0, count * 2, 2):
+ mac = start + num
+ macs.append(base_mac + ":" + ("%02x" % mac))
+ return macs
+
+
+def create_vm_storage(domain, vol_path='/var/lib/libvirt/images'):
+ volume_name = domain + '.qcow2'
+ stgvol_xml = """
+ <volume>
+ <name>{}</name>
+ <allocation>0</allocation>
+ <capacity unit="G">41</capacity>
+ <target>
+ <format type='qcow2'/>
+ <path>{}</path>
+ <permissions>
+ <owner>107</owner>
+ <group>107</group>
+ <mode>0744</mode>
+ <label>virt_image_t</label>
+ </permissions>
+ </target>
+ </volume>""".format(volume_name, os.path.join(vol_path, volume_name))
+
+ conn = libvirt.open('qemu:///system')
+ pool = conn.storagePoolLookupByName('default')
+ if pool is None:
+ raise Exception("Default libvirt storage pool missing")
+ # TODO(trozet) create default storage pool
+
+ if pool.isActive() == 0:
+ pool.create()
+ try:
+ vol = pool.storageVolLookupByName(volume_name)
+ vol.wipe(0)
+ vol.delete(0)
+ except libvirt.libvirtError as e:
+ if e.get_error_code() != libvirt.VIR_ERR_NO_STORAGE_VOL:
+ raise
+ new_vol = pool.createXML(stgvol_xml)
+ if new_vol is None:
+ raise Exception("Unable to create new volume")
+ logging.debug("Created new storage volume: {}".format(volume_name))
+
+
+def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'],
+ arch='x86_64', engine='kvm', memory=8192, bootdev='network',
+ cpus=4, nic_driver='virtio', macs=[], direct_boot=None,
+ kernel_args=None, default_network=False,
+ template_dir='/usr/share/opnfv-apex'):
+ # TODO(trozet): fix name here to be image since it is full path of qcow2
+ create_vm_storage(name)
+ with open(os.path.join(template_dir, 'domain.xml'), 'r') as f:
+ source_template = f.read()
+ imagefile = os.path.realpath(image)
+ memory = int(memory) * 1024
+ params = {
+ 'name': name,
+ 'imagefile': imagefile,
+ 'engine': engine,
+ 'arch': arch,
+ 'memory': str(memory),
+ 'cpus': str(cpus),
+ 'bootdev': bootdev,
+ 'network': '',
+ 'enable_serial_console': '',
+ 'direct_boot': '',
+ 'kernel_args': '',
+ 'user_interface': '',
+ }
+
+ # Configure the bus type for the target disk device
+ params['diskbus'] = diskbus
+ nicparams = {
+ 'nicdriver': nic_driver,
+ }
+ if default_network:
+ params['network'] = """
+ <!-- regular natted network, for access to the vm -->
+ <interface type='network'>
+ <source network='default'/>
+ <model type='%(nicdriver)s'/>
+ </interface>""" % nicparams
+ else:
+ params['network'] = ''
+ while len(macs) < len(baremetal_interfaces):
+ macs += generate_baremetal_macs(1)
+
+ params['bm_network'] = ""
+ for bm_interface, mac in zip(baremetal_interfaces, macs):
+ bm_interface_params = {
+ 'bminterface': bm_interface,
+ 'bmmacaddress': mac,
+ 'nicdriver': nic_driver,
+ }
+ params['bm_network'] += """
+ <!-- bridged 'bare metal' network on %(bminterface)s -->
+ <interface type='network'>
+ <mac address='%(bmmacaddress)s'/>
+ <source network='%(bminterface)s'/>
+ <model type='%(nicdriver)s'/>
+ </interface>""" % bm_interface_params
+
+ params['enable_serial_console'] = """
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ """
+ if direct_boot:
+ params['direct_boot'] = """
+ <kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
+ <initrd>/var/lib/libvirt/images/%(direct_boot)s.initrd</initrd>
+ """ % {'direct_boot': direct_boot}
+ if kernel_args:
+ params['kernel_args'] = """
+ <cmdline>%s</cmdline>
+ """ % ' '.join(kernel_args)
+
+ if arch == 'aarch64':
+
+ params['direct_boot'] += """
+ <loader readonly='yes' \
+ type='pflash'>/usr/share/AAVMF/AAVMF_CODE.fd</loader>
+ <nvram>/var/lib/libvirt/qemu/nvram/centos7.0_VARS.fd</nvram>
+ """
+ params['user_interface'] = """
+ <controller type='virtio-serial' index='0'>
+ <address type='virtio-mmio'/>
+ </controller>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <channel type='unix'>
+ <target type='virtio' name='org.qemu.guest_agent.0'/>
+ <address type='virtio-serial' controller='0' bus='0' port='1'/>
+ </channel>
+ """
+ else:
+ params['user_interface'] = """
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ </video>
+ """
+
+ libvirt_template = source_template % params
+ logging.debug("libvirt template is {}".format(libvirt_template))
+ conn = libvirt.open('qemu:///system')
+ vm = conn.defineXML(libvirt_template)
+ logging.info("Created machine %s with UUID %s" % (name, vm.UUIDString()))
+ return vm
diff --git a/apex/virtual/virtual_utils.py b/apex/virtual/virtual_utils.py
new file mode 100644
index 00000000..5ebb0582
--- /dev/null
+++ b/apex/virtual/virtual_utils.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import copy
+import iptc
+import logging
+import os
+import pprint
+import subprocess
+
+from apex.common import utils
+from apex.virtual import configure_vm as vm_lib
+from virtualbmc import manager as vbmc_lib
+
+DEFAULT_RAM = 8192
+DEFAULT_PM_PORT = 6230
+DEFAULT_USER = 'admin'
+DEFAULT_PASS = 'password'
+DEFAULT_VIRT_IP = '192.168.122.1'
+
+
+def generate_inventory(target_file, ha_enabled=False, num_computes=1,
+ controller_ram=DEFAULT_RAM, arch='x86_64',
+ compute_ram=DEFAULT_RAM, vcpus=4):
+ """
+ Generates inventory file for virtual deployments
+ :param target_file:
+ :param ha_enabled:
+ :param num_computes:
+ :param controller_ram:
+ :param arch:
+ :param compute_ram:
+ :param vcpus:
+ :return:
+ """
+
+ node = {'mac_address': '',
+ 'ipmi_ip': DEFAULT_VIRT_IP,
+ 'ipmi_user': DEFAULT_USER,
+ 'ipmi_pass': DEFAULT_PASS,
+ 'pm_type': 'pxe_ipmitool',
+ 'pm_port': '',
+ 'cpu': vcpus,
+ 'memory': DEFAULT_RAM,
+ 'disk': 41,
+ 'arch': arch,
+ 'capabilities': ''
+ }
+
+ inv_output = {'nodes': {}}
+ if ha_enabled:
+ num_ctrlrs = 3
+ else:
+ num_ctrlrs = 1
+
+ for idx in range(num_ctrlrs + num_computes):
+ tmp_node = copy.deepcopy(node)
+ tmp_node['mac_address'] = vm_lib.generate_baremetal_macs(1)[0]
+ tmp_node['pm_port'] = DEFAULT_PM_PORT + idx
+ if idx < num_ctrlrs:
+ tmp_node['capabilities'] = 'profile:control'
+ tmp_node['memory'] = controller_ram
+ else:
+ tmp_node['capabilities'] = 'profile:compute'
+ tmp_node['memory'] = compute_ram
+ inv_output['nodes']['node{}'.format(idx)] = copy.deepcopy(tmp_node)
+
+ utils.dump_yaml(inv_output, target_file)
+
+ logging.info('Virtual environment file created: {}'.format(target_file))
+
+
+def host_setup(node):
+ """
+ Handles configuring vmbc and firewalld/iptables
+ :param node: dictionary of domain names and ports for ipmi
+ :return:
+ """
+ vbmc_manager = vbmc_lib.VirtualBMCManager()
+ for name, port in node.items():
+ vbmc_manager.add(username=DEFAULT_USER, password=DEFAULT_PASS,
+ port=port, address=DEFAULT_VIRT_IP, domain_name=name,
+ libvirt_uri='qemu:///system',
+ libvirt_sasl_password=False,
+ libvirt_sasl_username=False)
+
+ # TODO(trozet): add support for firewalld
+ subprocess.call(['systemctl', 'stop', 'firewalld'])
+
+ # iptables rule
+ rule = iptc.Rule()
+ rule.protocol = 'udp'
+ match = rule.create_match('udp')
+ match.dport = str(port)
+ rule.add_match(match)
+ rule.target = iptc.Target(rule, "ACCEPT")
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
+ chain.insert_rule(rule)
+ try:
+ subprocess.check_call(['vbmc', 'start', name])
+ logging.debug("Started vbmc for domain {}".format(name))
+ except subprocess.CalledProcessError:
+ logging.error("Failed to start vbmc for {}".format(name))
+ raise
+ logging.debug('vmbcs setup: {}'.format(vbmc_manager.list()))
+
+
+def virt_customize(ops, target):
+ """
+ Helper function to virt customize disks
+ :param ops: list of of operations and arguments
+ :param target: target disk to modify
+ :return: None
+ """
+ logging.info("Virt customizing target disk: {}".format(target))
+ virt_cmd = ['virt-customize']
+ for op in ops:
+ for op_cmd, op_arg in op.items():
+ virt_cmd.append(op_cmd)
+ virt_cmd.append(op_arg)
+ virt_cmd.append('-a')
+ virt_cmd.append(target)
+ if not os.path.isfile(target):
+ raise FileNotFoundError
+ my_env = os.environ.copy()
+ my_env['LIBGUESTFS_BACKEND'] = 'direct'
+ logging.debug("Virt-customizing with: \n{}".format(virt_cmd))
+ try:
+ logging.debug(subprocess.check_output(virt_cmd, env=my_env,
+ stderr=subprocess.STDOUT))
+ except subprocess.CalledProcessError as e:
+ logging.error("Error executing virt-customize: {}".format(
+ pprint.pformat(e.output)))
+ raise