summaryrefslogtreecommitdiffstats
path: root/lib/python
diff options
context:
space:
mode:
authorTim Rozet <trozet@redhat.com>2017-06-25 21:25:36 -0400
committerTim Rozet <trozet@redhat.com>2017-08-23 08:59:54 -0400
commitf4d388ea508ba00771e43a219ac64e0d430b73bd (patch)
tree4f61a89664474154c3d6f7adecfbb0396617199c /lib/python
parent807fad268c90649f2901c5f5c4cdeb788a0308e0 (diff)
Migrates Apex to Python
Removes all bash libraries and converts almost all of the code to a mixture of Python and Ansible. utils.sh and clean.sh still exist. clean.sh will be migrated fully to clean.py in another patch. The Apex Python package is now built into the opnfv-apex-common RPM. To install locally do 'pip3 install .'. To deploy: opnfv-deploy -d <file> -n <file> --image-dir /root/apex/.build -v --debug Non-python files (THT yaml, settings files, ansible playbooks) are all installed into /usr/share/opnfv-apex/. The RPM will copy settings files into /etc/opnfv-apex/. JIRA: APEX-317 Change-Id: I3232f0329bcd13bce5a28da6a8c9c84d0b048024 Signed-off-by: Tim Rozet <trozet@redhat.com>
Diffstat (limited to 'lib/python')
-rw-r--r--lib/python/apex/__init__.py15
-rw-r--r--lib/python/apex/clean.py39
-rw-r--r--lib/python/apex/common/__init__.py0
-rw-r--r--lib/python/apex/common/constants.py30
-rw-r--r--lib/python/apex/common/utils.py31
-rw-r--r--lib/python/apex/deploy_settings.py195
-rw-r--r--lib/python/apex/inventory.py98
-rw-r--r--lib/python/apex/ip_utils.py230
-rw-r--r--lib/python/apex/network_environment.py219
-rw-r--r--lib/python/apex/network_settings.py360
-rwxr-xr-xlib/python/apex_python_utils.py265
-rw-r--r--lib/python/build_utils.py108
12 files changed, 0 insertions, 1590 deletions
diff --git a/lib/python/apex/__init__.py b/lib/python/apex/__init__.py
deleted file mode 100644
index b2a45f7d..00000000
--- a/lib/python/apex/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-from .network_settings import NetworkSettings
-from .deploy_settings import DeploySettings
-from .network_environment import NetworkEnvironment
-from .clean import clean_nodes
-from .inventory import Inventory
diff --git a/lib/python/apex/clean.py b/lib/python/apex/clean.py
deleted file mode 100644
index 184b5ec9..00000000
--- a/lib/python/apex/clean.py
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Clean will eventually be migrated to this file
-
-import logging
-import pyipmi
-import pyipmi.interfaces
-import sys
-
-from .common import utils
-
-
-def clean_nodes(inventory):
- inv_dict = utils.parse_yaml(inventory)
- if inv_dict is None or 'nodes' not in inv_dict:
- logging.error("Inventory file is empty or missing nodes definition")
- sys.exit(1)
- for node, node_info in inv_dict['nodes'].items():
- logging.info("Cleaning node: {}".format(node))
- try:
- interface = pyipmi.interfaces.create_interface(
- 'ipmitool', interface_type='lanplus')
- connection = pyipmi.create_connection(interface)
- connection.session.set_session_type_rmcp(node_info['ipmi_ip'])
- connection.target = pyipmi.Target(0x20)
- connection.session.set_auth_type_user(node_info['ipmi_user'],
- node_info['ipmi_pass'])
- connection.session.establish()
- connection.chassis_control_power_down()
- except Exception as e:
- logging.error("Failure while shutting down node {}".format(e))
- sys.exit(1)
diff --git a/lib/python/apex/common/__init__.py b/lib/python/apex/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/lib/python/apex/common/__init__.py
+++ /dev/null
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
deleted file mode 100644
index 3aa28eab..00000000
--- a/lib/python/apex/common/constants.py
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-ADMIN_NETWORK = 'admin'
-TENANT_NETWORK = 'tenant'
-EXTERNAL_NETWORK = 'external'
-STORAGE_NETWORK = 'storage'
-API_NETWORK = 'api'
-CONTROLLER = 'controller'
-COMPUTE = 'compute'
-
-OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
- STORAGE_NETWORK, API_NETWORK]
-DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
-NTP_SERVER = ["pool.ntp.org"]
-COMPUTE = 'compute'
-CONTROLLER = 'controller'
-ROLES = [COMPUTE, CONTROLLER]
-DOMAIN_NAME = 'localdomain.com'
-COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
-CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
-PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
- "extraconfig/pre_deploy/"
-DEFAULT_ROOT_DEV = 'sda'
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
deleted file mode 100644
index 8e6896fa..00000000
--- a/lib/python/apex/common/utils.py
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-
-
-def str2bool(var):
- if isinstance(var, bool):
- return var
- else:
- return var.lower() in ("true", "yes")
-
-
-def parse_yaml(yaml_file):
- with open(yaml_file) as f:
- parsed_dict = yaml.safe_load(f)
- return parsed_dict
-
-
-def write_str(bash_str, path=None):
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
diff --git a/lib/python/apex/deploy_settings.py b/lib/python/apex/deploy_settings.py
deleted file mode 100644
index 06185941..00000000
--- a/lib/python/apex/deploy_settings.py
+++ /dev/null
@@ -1,195 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Michael Chapman (michapma@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import yaml
-import logging
-
-from .common import utils
-
-REQ_DEPLOY_SETTINGS = ['sdn_controller',
- 'odl_version',
- 'tacker',
- 'congress',
- 'dataplane',
- 'sfc',
- 'vpn',
- 'vpp',
- 'ceph',
- 'gluon',
- 'rt_kvm']
-
-OPT_DEPLOY_SETTINGS = ['performance',
- 'vsperf',
- 'ceph_device',
- 'yardstick',
- 'dovetail',
- 'odl_vpp_routing_node',
- 'odl_vpp_netvirt',
- 'barometer']
-
-VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
-VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
-VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
-
-
-class DeploySettings(dict):
- """
- This class parses a APEX deploy settings yaml file into an object
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
- """
- def __init__(self, filename):
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as deploy_settings_file:
- init_dict = yaml.safe_load(deploy_settings_file)
- else:
- # assume input is a dict to build from
- init_dict = filename
-
- super().__init__(init_dict)
- self._validate_settings()
-
- def _validate_settings(self):
- """
- Validates the deploy settings file provided
-
- DeploySettingsException will be raised if validation fails.
- """
-
- if 'deploy_options' not in self:
- raise DeploySettingsException("No deploy options provided in"
- " deploy settings file")
- if 'global_params' not in self:
- raise DeploySettingsException("No global options provided in"
- " deploy settings file")
-
- deploy_options = self['deploy_options']
- if not isinstance(deploy_options, dict):
- raise DeploySettingsException("deploy_options should be a list")
-
- if ('gluon' in self['deploy_options'] and
- 'vpn' in self['deploy_options']):
- if (self['deploy_options']['gluon'] is True and
- self['deploy_options']['vpn'] is False):
- raise DeploySettingsException(
- "Invalid deployment configuration: "
- "If gluon is enabled, "
- "vpn also needs to be enabled")
-
- for setting, value in deploy_options.items():
- if setting not in REQ_DEPLOY_SETTINGS + OPT_DEPLOY_SETTINGS:
- raise DeploySettingsException("Invalid deploy_option {} "
- "specified".format(setting))
- if setting == 'dataplane':
- if value not in VALID_DATAPLANES:
- planes = ' '.join(VALID_DATAPLANES)
- raise DeploySettingsException(
- "Invalid dataplane {} specified. Valid dataplanes:"
- " {}".format(value, planes))
-
- for req_set in REQ_DEPLOY_SETTINGS:
- if req_set not in deploy_options:
- if req_set == 'dataplane':
- self['deploy_options'][req_set] = 'ovs'
- elif req_set == 'ceph':
- self['deploy_options'][req_set] = True
- else:
- self['deploy_options'][req_set] = False
-
- if 'performance' in deploy_options:
- if not isinstance(deploy_options['performance'], dict):
- raise DeploySettingsException("Performance deploy_option"
- "must be a dictionary.")
- for role, role_perf_sets in deploy_options['performance'].items():
- if role not in VALID_ROLES:
- raise DeploySettingsException("Performance role {}"
- "is not valid, choose"
- "from {}".format(
- role,
- " ".join(VALID_ROLES)
- ))
-
- for key in role_perf_sets:
- if key not in VALID_PERF_OPTS:
- raise DeploySettingsException("Performance option {} "
- "is not valid, choose"
- "from {}".format(
- key,
- " ".join(
- VALID_PERF_OPTS)
- ))
-
- def _dump_performance(self):
- """
- Creates performance settings string for bash consumption.
-
- Output will be in the form of a list that can be iterated over in
- bash, with each string being the direct input to the performance
- setting script in the form <role> <category> <key> <value> to
- facilitate modification of the correct image.
- """
- bash_str = 'performance_options=(\n'
- deploy_options = self['deploy_options']
- for role, settings in deploy_options['performance'].items():
- for category, options in settings.items():
- for key, value in options.items():
- bash_str += "\"{} {} {} {}\"\n".format(role,
- category,
- key,
- value)
- bash_str += ')\n'
- bash_str += '\n'
- bash_str += 'performance_roles=(\n'
- for role in self['deploy_options']['performance']:
- bash_str += role + '\n'
- bash_str += ')\n'
- bash_str += '\n'
-
- return bash_str
-
- def _dump_deploy_options_array(self):
- """
- Creates deploy settings array in bash syntax.
- """
- bash_str = ''
- for key, value in self['deploy_options'].items():
- if not isinstance(value, bool):
- bash_str += "deploy_options_array[{}]=\"{}\"\n".format(key,
- value)
- else:
- bash_str += "deploy_options_array[{}]={}\n".format(key,
- value)
- return bash_str
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- bash_str = ''
- for key, value in self['global_params'].items():
- bash_str += "{}={}\n".format(key, value)
- if 'performance' in self['deploy_options']:
- bash_str += self._dump_performance()
- bash_str += self._dump_deploy_options_array()
- utils.write_str(bash_str, path)
-
-
-class DeploySettingsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
deleted file mode 100644
index 64f47b49..00000000
--- a/lib/python/apex/inventory.py
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Dan Radez (dradez@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import json
-import platform
-
-from .common import constants
-from .common import utils
-
-
-class Inventory(dict):
- """
- This class parses an APEX inventory yaml file into an object. It
- generates or detects all missing fields for deployment.
-
- It then collapses one level of identification from the object to
- convert it to a structure that can be dumped into a json file formatted
- such that Triple-O can read the resulting json as an instackenv.json file.
- """
- def __init__(self, source, ha=True, virtual=False):
- init_dict = {}
- self.root_device = constants.DEFAULT_ROOT_DEV
- if isinstance(source, str):
- with open(source, 'r') as inventory_file:
- yaml_dict = yaml.safe_load(inventory_file)
- # collapse node identifiers from the structure
- init_dict['nodes'] = list(map(lambda n: n[1],
- yaml_dict['nodes'].items()))
- else:
- # assume input is a dict to build from
- init_dict = source
-
- # move ipmi_* to pm_*
- # make mac a list
- def munge_nodes(node):
- node['pm_addr'] = node['ipmi_ip']
- node['pm_password'] = node['ipmi_pass']
- node['pm_user'] = node['ipmi_user']
- node['mac'] = [node['mac_address']]
- if 'cpus' in node:
- node['cpu'] = node['cpus']
-
- for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
- 'disk_device'):
- if i == 'disk_device' and 'disk_device' in node.keys():
- self.root_device = node[i]
- else:
- continue
- del node[i]
-
- return node
-
- super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
-
- # verify number of nodes
- if ha and len(self['nodes']) < 5:
- raise InventoryException('You must provide at least 5 '
- 'nodes for HA baremetal deployment')
- elif len(self['nodes']) < 2:
- raise InventoryException('You must provide at least 2 nodes '
- 'for non-HA baremetal deployment')
-
- if virtual:
- self['arch'] = platform.machine()
- self['host-ip'] = '192.168.122.1'
- self['power_manager'] = \
- 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
- self['seed-ip'] = ''
- self['ssh-key'] = 'INSERT_STACK_USER_PRIV_KEY'
- self['ssh-user'] = 'root'
-
- def dump_instackenv_json(self):
- print(json.dumps(dict(self), sort_keys=True, indent=4))
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
- utils.write_str(bash_str, path)
-
-
-class InventoryException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/ip_utils.py b/lib/python/apex/ip_utils.py
deleted file mode 100644
index ae60b705..00000000
--- a/lib/python/apex/ip_utils.py
+++ /dev/null
@@ -1,230 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import ipaddress
-import subprocess
-import re
-import logging
-
-
-def get_ip_range(start_offset=None, count=None, end_offset=None,
- cidr=None, interface=None):
- """
- Generate IP range for a network (cidr) or an interface.
-
- If CIDR is provided, it will take precedence over interface. In this case,
- The entire CIDR IP address space is considered usable. start_offset will be
- calculated from the network address, and end_offset will be calculated from
- the last address in subnet.
-
- If interface is provided, the interface IP will be used to calculate
- offsets:
- - If the interface IP is in the first half of the address space,
- start_offset will be calculated from the interface IP, and end_offset
- will be calculated from end of address space.
- - If the interface IP is in the second half of the address space,
- start_offset will be calculated from the network address in the address
- space, and end_offset will be calculated from the interface IP.
-
- 2 of start_offset, end_offset and count options must be provided:
- - If start_offset and end_offset are provided, a range from
- start_offset to end_offset will be returned.
- - If count is provided, a range from either start_offset to
- (start_offset+count) or (end_offset-count) to end_offset will be
- returned. The IP range returned will be of size <count>.
- Both start_offset and end_offset must be greater than 0.
-
- Returns IP range in the format of "first_addr,second_addr" or exception
- is raised.
- """
- if cidr:
- if count and start_offset and not end_offset:
- start_index = start_offset
- end_index = start_offset + count - 1
- elif count and end_offset and not start_offset:
- end_index = -1 - end_offset
- start_index = -1 - end_index - count + 1
- elif start_offset and end_offset and not count:
- start_index = start_offset
- end_index = -1 - end_offset
- else:
- raise IPUtilsException("Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
-
- start_ip = cidr[start_index]
- end_ip = cidr[end_index]
- network = cidr
- elif interface:
- network = interface.network
- number_of_addr = network.num_addresses
- if interface.ip < network[int(number_of_addr / 2)]:
- if count and start_offset and not end_offset:
- start_ip = interface.ip + start_offset
- end_ip = start_ip + count - 1
- elif count and end_offset and not start_offset:
- end_ip = network[-1 - end_offset]
- start_ip = end_ip - count + 1
- elif start_offset and end_offset and not count:
- start_ip = interface.ip + start_offset
- end_ip = network[-1 - end_offset]
- else:
- raise IPUtilsException(
- "Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
- else:
- if count and start_offset and not end_offset:
- start_ip = network[start_offset]
- end_ip = start_ip + count - 1
- elif count and end_offset and not start_offset:
- end_ip = interface.ip - end_offset
- start_ip = end_ip - count + 1
- elif start_offset and end_offset and not count:
- start_ip = network[start_offset]
- end_ip = interface.ip - end_offset
- else:
- raise IPUtilsException(
- "Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
-
- else:
- raise IPUtilsException("Must pass in cidr or interface to generate"
- "ip range")
-
- range_result = _validate_ip_range(start_ip, end_ip, network)
- if range_result:
- ip_range = "{},{}".format(start_ip, end_ip)
- return ip_range
- else:
- raise IPUtilsException("Invalid IP range: {},{} for network {}"
- .format(start_ip, end_ip, network))
-
-
-def get_ip(offset, cidr=None, interface=None):
- """
- Returns an IP in a network given an offset.
-
- Either cidr or interface must be provided, cidr takes precedence.
-
- If cidr is provided, offset is calculated from network address.
- If interface is provided, offset is calculated from interface IP.
-
- offset can be positive or negative, but the resulting IP address must also
- be contained in the same subnet, otherwise an exception will be raised.
-
- returns a IP address object.
- """
- if cidr:
- ip = cidr[0 + offset]
- network = cidr
- elif interface:
- ip = interface.ip + offset
- network = interface.network
- else:
- raise IPUtilsException("Must pass in cidr or interface to generate IP")
-
- if ip not in network:
- raise IPUtilsException("IP {} not in network {}".format(ip, network))
- else:
- return str(ip)
-
-
-def get_interface(nic, address_family=4):
- """
- Returns interface object for a given NIC name in the system
-
- Only global address will be returned at the moment.
-
- Returns interface object if an address is found for the given nic,
- otherwise returns None.
- """
- if not nic.strip():
- logging.error("empty nic name specified")
- return None
- output = subprocess.getoutput("/usr/sbin/ip -{} addr show {} scope global"
- .format(address_family, nic))
- if address_family == 4:
- pattern = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}")
- elif address_family == 6:
- pattern = re.compile("([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}")
- else:
- raise IPUtilsException("Invalid address family: {}"
- .format(address_family))
- match = re.search(pattern, output)
- if match:
- logging.info("found interface {} ip: {}".format(nic, match.group()))
- return ipaddress.ip_interface(match.group())
- else:
- logging.info("interface ip not found! ip address output:\n{}"
- .format(output))
- return None
-
-
-def find_gateway(interface):
- """
- Validate gateway on the system
-
- Ensures that the provided interface object is in fact configured as default
- route on the system.
-
- Returns gateway IP (reachable from interface) if default route is found,
- otherwise returns None.
- """
-
- address_family = interface.version
- output = subprocess.getoutput("/usr/sbin/ip -{} route".format(
- address_family))
-
- pattern = re.compile("default\s+via\s+(\S+)\s+")
- match = re.search(pattern, output)
-
- if match:
- gateway_ip = match.group(1)
- reverse_route_output = subprocess.getoutput("/usr/sbin/ip route get {}"
- .format(gateway_ip))
- pattern = re.compile("{}.+src\s+{}".format(gateway_ip, interface.ip))
- if not re.search(pattern, reverse_route_output):
- logging.warning("Default route doesn't match interface specified: "
- "{}".format(reverse_route_output))
- return None
- else:
- return gateway_ip
- else:
- logging.warning("Can't find gateway address on system")
- return None
-
-
-def _validate_ip_range(start_ip, end_ip, cidr):
- """
- Validates an IP range is in good order and the range is part of cidr.
-
- Returns True if validation succeeds, False otherwise.
- """
- ip_range = "{},{}".format(start_ip, end_ip)
- if end_ip <= start_ip:
- logging.warning("IP range {} is invalid: end_ip should be greater "
- "than starting ip".format(ip_range))
- return False
- if start_ip not in ipaddress.ip_network(cidr):
- logging.warning('start_ip {} is not in network {}'
- .format(start_ip, cidr))
- return False
- if end_ip not in ipaddress.ip_network(cidr):
- logging.warning('end_ip {} is not in network {}'.format(end_ip, cidr))
- return False
-
- return True
-
-
-class IPUtilsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
deleted file mode 100644
index dd9530b8..00000000
--- a/lib/python/apex/network_environment.py
+++ /dev/null
@@ -1,219 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import re
-from .common.constants import (
- CONTROLLER,
- COMPUTE,
- ADMIN_NETWORK,
- TENANT_NETWORK,
- STORAGE_NETWORK,
- EXTERNAL_NETWORK,
- API_NETWORK,
- CONTROLLER_PRE,
- COMPUTE_PRE,
- PRE_CONFIG_DIR
-)
-from .network_settings import NetworkSettings
-
-HEAT_NONE = 'OS::Heat::None'
-PORTS = '/ports'
-# Resources defined by <resource name>: <prefix>
-EXTERNAL_RESOURCES = {'OS::TripleO::Network::External': None,
- 'OS::TripleO::Network::Ports::ExternalVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::ExternalPort': PORTS,
- 'OS::TripleO::Compute::Ports::ExternalPort': PORTS}
-TENANT_RESOURCES = {'OS::TripleO::Network::Tenant': None,
- 'OS::TripleO::Controller::Ports::TenantPort': PORTS,
- 'OS::TripleO::Compute::Ports::TenantPort': PORTS}
-STORAGE_RESOURCES = {'OS::TripleO::Network::Storage': None,
- 'OS::TripleO::Network::Ports::StorageVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::StoragePort': PORTS,
- 'OS::TripleO::Compute::Ports::StoragePort': PORTS}
-API_RESOURCES = {'OS::TripleO::Network::InternalApi': None,
- 'OS::TripleO::Network::Ports::InternalApiVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::InternalApiPort': PORTS,
- 'OS::TripleO::Compute::Ports::InternalApiPort': PORTS}
-
-# A list of flags that will be set to true when IPv6 is enabled
-IPV6_FLAGS = ["NovaIPv6", "MongoDbIPv6", "CorosyncIPv6", "CephIPv6",
- "RabbitIPv6", "MemcachedIPv6"]
-
-reg = 'resource_registry'
-param_def = 'parameter_defaults'
-
-
-class NetworkEnvironment(dict):
- """
- This class creates a Network Environment to be used in TripleO Heat
- Templates.
-
- The class builds upon an existing network-environment file and modifies
- based on a NetworkSettings object.
- """
- def __init__(self, net_settings, filename, compute_pre_config=False,
- controller_pre_config=False):
- """
- Create Network Environment according to Network Settings
- """
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as net_env_fh:
- init_dict = yaml.safe_load(net_env_fh)
-
- super().__init__(init_dict)
- if not isinstance(net_settings, NetworkSettings):
- raise NetworkEnvException('Invalid Network Settings object')
-
- self._set_tht_dir()
-
- nets = net_settings['networks']
-
- admin_cidr = nets[ADMIN_NETWORK]['cidr']
- admin_prefix = str(admin_cidr.prefixlen)
- self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
- self[param_def]['ControlPlaneDefaultRoute'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
- self[param_def]['EC2MetadataIp'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
- self[param_def]['DnsServers'] = net_settings['dns_servers']
-
- if EXTERNAL_NETWORK in net_settings.enabled_network_list:
- external_cidr = net_settings.get_network(EXTERNAL_NETWORK)['cidr']
- self[param_def]['ExternalNetCidr'] = str(external_cidr)
- external_vlan = self._get_vlan(net_settings.get_network(
- EXTERNAL_NETWORK))
- if isinstance(external_vlan, int):
- self[param_def]['NeutronExternalNetworkBridge'] = '""'
- self[param_def]['ExternalNetworkVlanID'] = external_vlan
- external_range = net_settings.get_network(EXTERNAL_NETWORK)[
- 'overcloud_ip_range']
- self[param_def]['ExternalAllocationPools'] = \
- [{'start': str(external_range[0]),
- 'end': str(external_range[1])}]
- self[param_def]['ExternalInterfaceDefaultRoute'] = \
- net_settings.get_network(EXTERNAL_NETWORK)['gateway']
-
- if external_cidr.version == 6:
- postfix = '/external_v6.yaml'
- else:
- postfix = '/external.yaml'
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for EXTERNAL_RESOURCES
- self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
-
- if TENANT_NETWORK in net_settings.enabled_network_list:
- tenant_range = nets[TENANT_NETWORK]['overcloud_ip_range']
- self[param_def]['TenantAllocationPools'] = \
- [{'start': str(tenant_range[0]),
- 'end': str(tenant_range[1])}]
- tenant_cidr = nets[TENANT_NETWORK]['cidr']
- self[param_def]['TenantNetCidr'] = str(tenant_cidr)
- if tenant_cidr.version == 6:
- postfix = '/tenant_v6.yaml'
- # set overlay_ip_version option in Neutron ML2 config
- self[param_def]['NeutronOverlayIPVersion'] = "6"
- else:
- postfix = '/tenant.yaml'
-
- tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
- if isinstance(tenant_vlan, int):
- self[param_def]['TenantNetworkVlanID'] = tenant_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for TENANT_RESOURCES
- self._config_resource_reg(TENANT_RESOURCES, postfix)
-
- if STORAGE_NETWORK in net_settings.enabled_network_list:
- storage_range = nets[STORAGE_NETWORK]['overcloud_ip_range']
- self[param_def]['StorageAllocationPools'] = \
- [{'start': str(storage_range[0]),
- 'end': str(storage_range[1])}]
- storage_cidr = nets[STORAGE_NETWORK]['cidr']
- self[param_def]['StorageNetCidr'] = str(storage_cidr)
- if storage_cidr.version == 6:
- postfix = '/storage_v6.yaml'
- else:
- postfix = '/storage.yaml'
- storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
- if isinstance(storage_vlan, int):
- self[param_def]['StorageNetworkVlanID'] = storage_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for STORAGE_RESOURCES
- self._config_resource_reg(STORAGE_RESOURCES, postfix)
-
- if API_NETWORK in net_settings.enabled_network_list:
- api_range = nets[API_NETWORK]['overcloud_ip_range']
- self[param_def]['InternalApiAllocationPools'] = \
- [{'start': str(api_range[0]),
- 'end': str(api_range[1])}]
- api_cidr = nets[API_NETWORK]['cidr']
- self[param_def]['InternalApiNetCidr'] = str(api_cidr)
- if api_cidr.version == 6:
- postfix = '/internal_api_v6.yaml'
- else:
- postfix = '/internal_api.yaml'
- api_vlan = self._get_vlan(nets[API_NETWORK])
- if isinstance(api_vlan, int):
- self[param_def]['InternalApiNetworkVlanID'] = api_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for API_RESOURCES
- self._config_resource_reg(API_RESOURCES, postfix)
-
- # Set IPv6 related flags to True. Not that we do not set those to False
- # when IPv4 is configured, we'll use the default or whatever the user
- # may have set.
- if net_settings.get_ip_addr_family() == 6:
- for flag in IPV6_FLAGS:
- self[param_def][flag] = True
-
- def _get_vlan(self, network):
- if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
- return network['nic_mapping'][CONTROLLER]['vlan']
- elif isinstance(network['nic_mapping'][COMPUTE]['vlan'], int):
- return network['nic_mapping'][COMPUTE]['vlan']
- else:
- return 'native'
-
- def _set_tht_dir(self):
- self.tht_dir = None
- for key, prefix in TENANT_RESOURCES.items():
- if prefix is None:
- prefix = ''
- m = re.split('%s/\w+\.yaml' % prefix, self[reg][key])
- if m is not None and len(m) > 1:
- self.tht_dir = m[0]
- break
- if not self.tht_dir:
- raise NetworkEnvException('Unable to parse THT Directory')
-
- def _config_resource_reg(self, resources, postfix):
- for key, prefix in resources.items():
- if prefix is None:
- if postfix == '/noop.yaml':
- self[reg][key] = HEAT_NONE
- continue
- prefix = ''
- self[reg][key] = self.tht_dir + prefix + postfix
-
-
-class NetworkEnvException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
deleted file mode 100644
index 79b0a9d1..00000000
--- a/lib/python/apex/network_settings.py
+++ /dev/null
@@ -1,360 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import logging
-import ipaddress
-
-from copy import copy
-from .common import utils
-from . import ip_utils
-from .common.constants import (
- CONTROLLER,
- COMPUTE,
- ROLES,
- DOMAIN_NAME,
- DNS_SERVERS,
- NTP_SERVER,
- ADMIN_NETWORK,
- EXTERNAL_NETWORK,
- OPNFV_NETWORK_TYPES,
-)
-
-
-class NetworkSettings(dict):
- """
- This class parses APEX network settings yaml file into an object. It
- generates or detects all missing fields for deployment.
-
- The resulting object will be used later to generate network environment
- file as well as configuring post deployment networks.
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
- """
- def __init__(self, filename):
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as network_settings_file:
- init_dict = yaml.safe_load(network_settings_file)
- else:
- # assume input is a dict to build from
- init_dict = filename
- super().__init__(init_dict)
-
- if 'apex' in self:
- # merge two dics Nondestructively
- def merge(pri, sec):
- for key, val in sec.items():
- if key in pri:
- if isinstance(val, dict):
- merge(pri[key], val)
- # else
- # do not overwrite what's already there
- else:
- pri[key] = val
- # merge the apex specific config into the first class settings
- merge(self, copy(self['apex']))
-
- self.enabled_network_list = []
- self.nics = {COMPUTE: {}, CONTROLLER: {}}
- self.nics_specified = {COMPUTE: False, CONTROLLER: False}
- self._validate_input()
-
- def get_network(self, network):
- if network == EXTERNAL_NETWORK and self['networks'][network]:
- for net in self['networks'][network]:
- if 'public' in net:
- return net
-
- raise NetworkSettingsException("The external network, "
- "'public', should be defined "
- "when external networks are "
- "enabled")
- else:
- return self['networks'][network]
-
- def _validate_input(self):
- """
- Validates the network settings file and populates all fields.
-
- NetworkSettingsException will be raised if validation fails.
- """
- if not self['networks'].get(ADMIN_NETWORK, {}).get('enabled', False):
- raise NetworkSettingsException("You must enable admin network "
- "and configure it explicitly or "
- "use auto-detection")
-
- for network in OPNFV_NETWORK_TYPES:
- if network in self['networks']:
- _network = self.get_network(network)
- if _network.get('enabled', True):
- logging.info("{} enabled".format(network))
- self._config_required_settings(network)
- nicmap = _network['nic_mapping']
- self._validate_overcloud_nic_order(network)
- iface = nicmap[CONTROLLER]['members'][0]
- self._config_ip_range(network=network,
- interface=iface,
- ip_range='overcloud_ip_range',
- start_offset=21, end_offset=21)
- self.enabled_network_list.append(network)
- # TODO self._config_optional_settings(network)
- else:
- logging.info("{} disabled, will collapse with "
- "admin network".format(network))
- else:
- logging.info("{} is not in specified, will collapse with "
- "admin network".format(network))
-
- if 'dns-domain' not in self:
- self['domain_name'] = DOMAIN_NAME
- self['dns_servers'] = self.get('dns_nameservers', DNS_SERVERS)
- self['ntp_servers'] = self.get('ntp', NTP_SERVER)
-
- def _validate_overcloud_nic_order(self, network):
- """
- Detects if nic order is specified per profile (compute/controller)
- for network
-
- If nic order is specified in a network for a profile, it should be
- specified for every network with that profile other than admin network
-
- Duplicate nic names are also not allowed across different networks
-
- :param network: network to detect if nic order present
- :return: None
- """
- for role in ROLES:
- _network = self.get_network(network)
- _nicmap = _network.get('nic_mapping', {})
- _role = _nicmap.get(role, {})
- interfaces = _role.get('members', [])
-
- if interfaces:
- interface = interfaces[0]
- if not isinstance(_role.get('vlan', 'native'), int) and \
- any(y == interface for x, y in self.nics[role].items()):
- raise NetworkSettingsException(
- "Duplicate {} already specified for "
- "another network".format(interface))
- self.nics[role][network] = interface
- self.nics_specified[role] = True
- logging.info("{} nic order specified for network {"
- "}".format(role, network))
- else:
- raise NetworkSettingsException(
- "Interface members are not supplied for {} network "
- "for the {} role. Please add nic assignments"
- "".format(network, role))
-
- def _config_required_settings(self, network):
- """
- Configures either CIDR or bridged_interface setting
-
- cidr takes precedence if both cidr and bridged_interface are specified
- for a given network.
-
- When using bridged_interface, we will detect network setting on the
- given NIC in the system. The resulting config in settings object will
- be an ipaddress.network object, replacing the NIC name.
- """
- _network = self.get_network(network)
- # if vlan not defined then default it to native
- if network is not ADMIN_NETWORK:
- for role in ROLES:
- if 'vlan' not in _network['nic_mapping'][role]:
- _network['nic_mapping'][role]['vlan'] = 'native'
-
- cidr = _network.get('cidr')
-
- if cidr:
- cidr = ipaddress.ip_network(_network['cidr'])
- _network['cidr'] = cidr
- logging.info("{}_cidr: {}".format(network, cidr))
- elif 'installer_vm' in _network:
- ucloud_if_list = _network['installer_vm']['members']
- # If cidr is not specified, we need to know if we should find
- # IPv6 or IPv4 address on the interface
- ip = ipaddress.ip_address(_network['installer_vm']['ip'])
- nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
- if nic_if:
- logging.info("{}_bridged_interface: {}".
- format(network, nic_if))
- else:
- raise NetworkSettingsException(
- "Auto detection failed for {}: Unable to find valid "
- "ip for interface {}".format(network, ucloud_if_list[0]))
-
- else:
- raise NetworkSettingsException(
- "Auto detection failed for {}: either installer_vm "
- "members or cidr must be specified".format(network))
-
- # undercloud settings
- if network == ADMIN_NETWORK:
- provisioner_ip = _network['installer_vm']['ip']
- iface = _network['installer_vm']['members'][0]
- if not provisioner_ip:
- _network['installer_vm']['ip'] = self._gen_ip(network, 1)
- self._config_ip_range(network=network, interface=iface,
- ip_range='dhcp_range',
- start_offset=2, count=9)
- self._config_ip_range(network=network, interface=iface,
- ip_range='introspection_range',
- start_offset=11, count=9)
- elif network == EXTERNAL_NETWORK:
- provisioner_ip = _network['installer_vm']['ip']
- iface = _network['installer_vm']['members'][0]
- if not provisioner_ip:
- _network['installer_vm']['ip'] = self._gen_ip(network, 1)
- self._config_ip_range(network=network, interface=iface,
- ip_range='floating_ip_range',
- end_offset=2, count=20)
-
- gateway = _network['gateway']
- interface = _network['installer_vm']['ip']
- self._config_gateway(network, gateway, interface)
-
- def _config_ip_range(self, network, ip_range, interface=None,
- start_offset=None, end_offset=None, count=None):
- """
- Configures IP range for a given setting.
- If the setting is already specified, no change will be made.
- The spec for start_offset, end_offset and count are identical to
- ip_utils.get_ip_range.
- """
- _network = self.get_network(network)
- if ip_range not in _network:
- cidr = _network.get('cidr')
- _ip_range = ip_utils.get_ip_range(start_offset=start_offset,
- end_offset=end_offset,
- count=count,
- cidr=cidr,
- interface=interface)
- _network[ip_range] = _ip_range.split(',')
-
- logging.info("Config IP Range: {} {}".format(network, ip_range))
-
- def _gen_ip(self, network, offset):
- """
- Generate and ip offset within the given network
- """
- _network = self.get_network(network)
- cidr = _network.get('cidr')
- ip = ip_utils.get_ip(offset, cidr)
- logging.info("Config IP: {} {}".format(network, ip))
- return ip
-
- def _config_optional_settings(self, network):
- """
- Configures optional settings:
- - admin_network:
- - provisioner_ip
- - dhcp_range
- - introspection_range
- - public_network:
- - provisioner_ip
- - floating_ip_range
- - gateway
- """
- if network == ADMIN_NETWORK:
- self._config_ip(network, None, 'provisioner_ip', 1)
- self._config_ip_range(network=network,
- ip_range='dhcp_range',
- start_offset=2, count=9)
- self._config_ip_range(network=network,
- ip_range='introspection_range',
- start_offset=11, count=9)
- elif network == EXTERNAL_NETWORK:
- self._config_ip(network, None, 'provisioner_ip', 1)
- self._config_ip_range(network=network,
- ip_range='floating_ip_range',
- end_offset=2, count=20)
- self._config_gateway(network)
-
- def _config_gateway(self, network, gateway, interface):
- """
- Configures gateway setting for a given network.
-
- If cidr is specified, we always use the first address in the address
- space for gateway. Otherwise, we detect the system gateway.
- """
- _network = self.get_network(network)
- if not gateway:
- cidr = _network.get('cidr')
- if cidr:
- _gateway = ip_utils.get_ip(1, cidr)
- else:
- _gateway = ip_utils.find_gateway(interface)
-
- if _gateway:
- _network['gateway'] = _gateway
- else:
- raise NetworkSettingsException("Failed to set gateway")
-
- logging.info("Config Gateway: {} {}".format(network, gateway))
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- def flatten(name, obj, delim=','):
- """
- flatten lists to delim separated strings
- flatten dics to underscored key names and string values
- """
- if isinstance(obj, list):
- return "{}=\'{}\'\n".format(name,
- delim.join(map(lambda x: str(x),
- obj)))
- elif isinstance(obj, dict):
- flat_str = ''
- for k in obj:
- flat_str += flatten("{}_{}".format(name, k), obj[k])
- return flat_str
- elif isinstance(obj, str):
- return "{}='{}'\n".format(name, obj)
- else:
- return "{}={}\n".format(name, str(obj))
-
- bash_str = ''
- for network in self.enabled_network_list:
- _network = self.get_network(network)
- bash_str += flatten(network, _network)
- bash_str += flatten('enabled_network_list',
- self.enabled_network_list, ' ')
- bash_str += flatten('ip_addr_family', self.get_ip_addr_family())
- bash_str += flatten('dns_servers', self['dns_servers'], ' ')
- bash_str += flatten('domain_name', self['dns-domain'], ' ')
- bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
- utils.write_str(bash_str, path)
-
- def get_ip_addr_family(self,):
- """
- Returns IP address family for current deployment.
-
- If any enabled network has IPv6 CIDR, the deployment is classified as
- IPv6.
- """
- return max([
- ipaddress.ip_network(self.get_network(n)['cidr']).version
- for n in self.enabled_network_list])
-
-
-class NetworkSettingsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
deleted file mode 100755
index 70fc592d..00000000
--- a/lib/python/apex_python_utils.py
+++ /dev/null
@@ -1,265 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com), Dan Radez (dradez@redhat.com)
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import apex
-import argparse
-import sys
-import logging
-import os
-import yaml
-
-from jinja2 import Environment
-from jinja2 import FileSystemLoader
-
-from apex import NetworkSettings
-from apex import NetworkEnvironment
-from apex import DeploySettings
-from apex import Inventory
-from apex import ip_utils
-
-
-def parse_net_settings(args):
- """
- Parse OPNFV Apex network_settings.yaml config file
- and dump bash syntax to set environment variables
-
- Args:
- - file: string
- file to network_settings.yaml file
- """
- settings = NetworkSettings(args.net_settings_file)
- net_env = NetworkEnvironment(settings, args.net_env_file,
- args.compute_pre_config,
- args.controller_pre_config)
- target = args.target_dir.split('/')
- target.append('network-environment.yaml')
- dump_yaml(dict(net_env), '/'.join(target))
- settings.dump_bash()
-
-
-def dump_yaml(data, file):
- """
- Dumps data to a file as yaml
- :param data: yaml to be written to file
- :param file: filename to write to
- :return:
- """
- with open(file, "w") as fh:
- yaml.dump(data, fh, default_flow_style=False)
-
-
-def parse_deploy_settings(args):
- settings = DeploySettings(args.file)
- settings.dump_bash()
-
-
-def run_clean(args):
- apex.clean_nodes(args.file)
-
-
-def parse_inventory(args):
- inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
- if args.export_bash is True:
- inventory.dump_bash()
- else:
- inventory.dump_instackenv_json()
-
-
-def find_ip(args):
- """
- Get and print the IP from a specific interface
-
- Args:
- - interface: string
- network interface name
- - address_family: int
- 4 or 6, respective to ipv4 or ipv6
- """
- interface = ip_utils.get_interface(args.interface,
- args.address_family)
- if interface:
- print(interface.ip)
-
-
-def build_nic_template(args):
- """
- Build and print a Triple-O nic template from jinja template
-
- Args:
- - template: string
- path to jinja template to load
- - enabled_networks: comma delimited list
- list of networks defined in net_env.py
- - ext_net_type: string
- interface or br-ex, defines the external network configuration
- - address_family: string
- 4 or 6, respective to ipv4 or ipv6
- - ovs_dpdk_bridge: string
- bridge name to use as ovs_dpdk
- """
- template_dir, template = args.template.rsplit('/', 1)
-
- netsets = NetworkSettings(args.net_settings_file)
- nets = netsets.get('networks')
- ds = DeploySettings(args.deploy_settings_file).get('deploy_options')
- env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
- template = env.get_template(template)
-
- if ds['dataplane'] == 'fdio':
- nets['tenant']['nic_mapping'][args.role]['phys_type'] = 'vpp_interface'
- if ds['sdn_controller'] == 'opendaylight':
- nets['external'][0]['nic_mapping'][args.role]['phys_type'] =\
- 'vpp_interface'
- if ds.get('odl_vpp_routing_node') == 'dvr':
- nets['admin']['nic_mapping'][args.role]['phys_type'] =\
- 'linux_bridge'
- if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
- .get('uio-driver'):
- nets['tenant']['nic_mapping'][args.role]['uio-driver'] =\
- ds['performance'][args.role.title()]['vpp']['uio-driver']
- if ds['sdn_controller'] == 'opendaylight':
- nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\
- ds['performance'][args.role.title()]['vpp']['uio-driver']
- if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
- .get('interface-options'):
- nets['tenant']['nic_mapping'][args.role]['interface-options'] =\
- ds['performance'][args.role.title()]['vpp']['interface-options']
-
- print(template.render(nets=nets,
- role=args.role,
- external_net_af=netsets.get_ip_addr_family(),
- external_net_type=args.ext_net_type,
- ovs_dpdk_bridge=args.ovs_dpdk_bridge))
-
-
-def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument('--debug', action='store_true', default=False,
- help="Turn on debug messages")
- parser.add_argument('-l', '--log-file', default='/var/log/apex/apex.log',
- dest='log_file', help="Log file to log to")
- subparsers = parser.add_subparsers()
- # parse-net-settings
- net_settings = subparsers.add_parser('parse-net-settings',
- help='Parse network settings file')
- net_settings.add_argument('-s', '--net-settings-file',
- default='network-settings.yaml',
- dest='net_settings_file',
- help='path to network settings file')
- net_settings.add_argument('-e', '--net-env-file',
- default="network-environment.yaml",
- dest='net_env_file',
- help='path to network environment file')
- net_settings.add_argument('-td', '--target-dir',
- default="/tmp",
- dest='target_dir',
- help='directory to write the'
- 'network-environment.yaml file')
- net_settings.add_argument('--compute-pre-config',
- default=False,
- action='store_true',
- dest='compute_pre_config',
- help='Boolean to enable Compute Pre Config')
- net_settings.add_argument('--controller-pre-config',
- action='store_true',
- default=False,
- dest='controller_pre_config',
- help='Boolean to enable Controller Pre Config')
-
- net_settings.set_defaults(func=parse_net_settings)
- # find-ip
- get_int_ip = subparsers.add_parser('find-ip',
- help='Find interface ip')
- get_int_ip.add_argument('-i', '--interface', required=True,
- help='Interface name')
- get_int_ip.add_argument('-af', '--address-family', default=4, type=int,
- choices=[4, 6], dest='address_family',
- help='IP Address family')
- get_int_ip.set_defaults(func=find_ip)
- # nic-template
- nic_template = subparsers.add_parser('nic-template',
- help='Build NIC templates')
- nic_template.add_argument('-r', '--role', required=True,
- choices=['controller', 'compute'],
- help='Role template generated for')
- nic_template.add_argument('-t', '--template', required=True,
- dest='template',
- help='Template file to process')
- nic_template.add_argument('-s', '--net-settings-file',
- default='network-settings.yaml',
- dest='net_settings_file',
- help='path to network settings file')
- nic_template.add_argument('-e', '--ext-net-type', default='interface',
- dest='ext_net_type',
- choices=['interface', 'vpp_interface', 'br-ex'],
- help='External network type')
- nic_template.add_argument('-d', '--ovs-dpdk-bridge',
- default=None, dest='ovs_dpdk_bridge',
- help='OVS DPDK Bridge Name')
- nic_template.add_argument('--deploy-settings-file',
- help='path to deploy settings file')
-
- nic_template.set_defaults(func=build_nic_template)
- # parse-deploy-settings
- deploy_settings = subparsers.add_parser('parse-deploy-settings',
- help='Parse deploy settings file')
- deploy_settings.add_argument('-f', '--file',
- default='deploy_settings.yaml',
- help='path to deploy settings file')
- deploy_settings.set_defaults(func=parse_deploy_settings)
- # parse-inventory
- inventory = subparsers.add_parser('parse-inventory',
- help='Parse inventory file')
- inventory.add_argument('-f', '--file',
- default='deploy_settings.yaml',
- help='path to deploy settings file')
- inventory.add_argument('--ha',
- default=False,
- action='store_true',
- help='Indicate if deployment is HA or not')
- inventory.add_argument('--virtual',
- default=False,
- action='store_true',
- help='Indicate if deployment inventory is virtual')
- inventory.add_argument('--export-bash',
- default=False,
- dest='export_bash',
- action='store_true',
- help='Export bash variables from inventory')
- inventory.set_defaults(func=parse_inventory)
-
- clean = subparsers.add_parser('clean',
- help='Parse deploy settings file')
- clean.add_argument('-f', '--file',
- help='path to inventory file')
- clean.set_defaults(func=run_clean)
-
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args(sys.argv[1:])
- if args.debug:
- logging.basicConfig(level=logging.DEBUG)
- else:
- apex_log_filename = args.log_file
- os.makedirs(os.path.dirname(apex_log_filename), exist_ok=True)
- logging.basicConfig(filename=apex_log_filename,
- format='%(asctime)s %(levelname)s: %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging.DEBUG)
- if hasattr(args, 'func'):
- args.func(args)
- else:
- parser.print_help()
- exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/lib/python/build_utils.py b/lib/python/build_utils.py
deleted file mode 100644
index 14327a90..00000000
--- a/lib/python/build_utils.py
+++ /dev/null
@@ -1,108 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import argparse
-import git
-import logging
-import os
-from pygerrit2.rest import GerritRestAPI
-import re
-import shutil
-import sys
-
-
-def clone_fork(args):
- ref = None
- logging.info("Cloning {}".format(args.repo))
-
- try:
- cm = git.Repo(search_parent_directories=True).commit().message
- except git.exc.InvalidGitRepositoryError:
- logging.debug('Current Apex directory is not a git repo: {}'
- .format(os.getcwd()))
- cm = ''
-
- logging.info("Current commit message: {}".format(cm))
- m = re.search('{}:\s*(\S+)'.format(args.repo), cm)
-
- if m:
- change_id = m.group(1)
- logging.info("Using change ID {} from {}".format(change_id, args.repo))
- rest = GerritRestAPI(url=args.url)
- change_str = "changes/{}?o=CURRENT_REVISION".format(change_id)
- change = rest.get(change_str)
- try:
- assert change['status'] not in 'ABANDONED' 'CLOSED',\
- 'Change {} is in {} state'.format(change_id, change['status'])
- if change['status'] == 'MERGED':
- logging.info('Change {} is merged, ignoring...'
- .format(change_id))
- else:
- current_revision = change['current_revision']
- ref = change['revisions'][current_revision]['ref']
- logging.info('setting ref to {}'.format(ref))
- except KeyError:
- logging.error('Failed to get valid change data structure from url '
- '{}/{}, data returned: \n{}'
- .format(change_id, change_str, change))
- raise
-
- # remove existing file or directory named repo
- if os.path.exists(args.repo):
- if os.path.isdir(args.repo):
- shutil.rmtree(args.repo)
- else:
- os.remove(args.repo)
-
- ws = git.Repo.clone_from("{}/{}".format(args.url, args.repo),
- args.repo, b=args.branch)
- if ref:
- git_cmd = ws.git
- git_cmd.fetch("{}/{}".format(args.url, args.repo), ref)
- git_cmd.checkout('FETCH_HEAD')
- logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
-
-
-def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument('--debug', action='store_true', default=False,
- help="Turn on debug messages")
- subparsers = parser.add_subparsers()
- fork = subparsers.add_parser('clone-fork',
- help='Clone fork of dependent repo')
- fork.add_argument('-r', '--repo', required=True, help='Name of repository')
- fork.add_argument('-u', '--url',
- default='https://gerrit.opnfv.org/gerrit',
- help='Gerrit URL of repository')
- fork.add_argument('-b', '--branch',
- default='master',
- help='Branch to checkout')
- fork.set_defaults(func=clone_fork)
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args(sys.argv[1:])
- if args.debug:
- logging_level = logging.DEBUG
- else:
- logging_level = logging.INFO
-
- logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging_level)
- if hasattr(args, 'func'):
- args.func(args)
- else:
- parser.print_help()
- exit(1)
-
-if __name__ == "__main__":
- main()