summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/build_perf_image.sh7
-rw-r--r--build/neutron/agent/interface/interface.py (renamed from build/interface.py)0
-rw-r--r--build/neutron/agent/l3/namespaces.py142
-rw-r--r--build/neutron/agent/l3/router_info.py996
-rwxr-xr-xbuild/overcloud-full.sh6
-rw-r--r--build/rpm_specs/openstack-congress.spec2
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec2
-rwxr-xr-xbuild/undercloud.sh2
-rwxr-xr-xci/deploy.sh28
-rw-r--r--config/deploy/os-nosdn-fdio-noha.yaml2
-rw-r--r--config/deploy/os-nosdn-ovs-ha.yaml2
-rw-r--r--config/deploy/os-nosdn-ovs-noha.yaml2
-rw-r--r--config/deploy/os-odl_l2-fdio-ha.yaml2
-rw-r--r--config/deploy/os-odl_l2-fdio-noha.yaml2
-rw-r--r--docs/installationprocedure/baremetal.rst3
-rwxr-xr-xlib/overcloud-deploy-functions.sh5
-rwxr-xr-xlib/parse-functions.sh86
-rwxr-xr-xlib/post-install-functions.sh2
-rw-r--r--lib/python/apex/__init__.py1
-rw-r--r--lib/python/apex/inventory.py76
-rwxr-xr-xlib/python/apex_python_utils.py29
-rwxr-xr-xlib/undercloud-functions.sh5
-rwxr-xr-xlib/virtual-setup-functions.sh51
-rw-r--r--tests/test_apex_inventory.py61
-rw-r--r--tests/test_apex_python_utils_py.py7
25 files changed, 1369 insertions, 152 deletions
diff --git a/build/build_perf_image.sh b/build/build_perf_image.sh
index a6ca066b..68a1804f 100644
--- a/build/build_perf_image.sh
+++ b/build/build_perf_image.sh
@@ -32,5 +32,12 @@ fi
if [ "$CATEGORY" == "kernel" ]; then
echo "${KEY}=${VALUE}" >> $ROLE-kernel_params.txt
+ if [[ "$dataplane" == 'fdio' && "$KEY" == 'hugepages' ]]; then
+ # set kernel hugepages params for fdio
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf" \
+ --run-command "echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$VALUE | bc)) >> /usr/lib/sysctl.d/00-system.conf" \
+ --run-command "echo kernel.shmmax==$((VALUE * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf" \
+ -a ${IMAGE}
+ fi
fi
diff --git a/build/interface.py b/build/neutron/agent/interface/interface.py
index 709fd677..709fd677 100644
--- a/build/interface.py
+++ b/build/neutron/agent/interface/interface.py
diff --git a/build/neutron/agent/l3/namespaces.py b/build/neutron/agent/l3/namespaces.py
new file mode 100644
index 00000000..aa282052
--- /dev/null
+++ b/build/neutron/agent/l3/namespaces.py
@@ -0,0 +1,142 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import functools
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from neutron.agent.linux.interface import OVSInterfaceDriver
+from neutron._i18n import _LE, _LW
+from neutron.agent.linux import ip_lib
+
+LOG = logging.getLogger(__name__)
+
+NS_PREFIX = 'qrouter-'
+INTERNAL_DEV_PREFIX = 'qr-'
+EXTERNAL_DEV_PREFIX = 'qg-'
+# TODO(Carl) It is odd that this file needs this. It is a dvr detail.
+ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
+
+
+def build_ns_name(prefix, identifier):
+ """Builds a namespace name from the given prefix and identifier
+
+ :param prefix: The prefix which must end with '-' for legacy reasons
+ :param identifier: The id associated with the namespace
+ """
+ return prefix + identifier
+
+
+def get_prefix_from_ns_name(ns_name):
+ """Parses prefix from prefix-identifier
+
+ :param ns_name: The name of a namespace
+ :returns: The prefix ending with a '-' or None if there is no '-'
+ """
+ dash_index = ns_name.find('-')
+ if 0 <= dash_index:
+ return ns_name[:dash_index + 1]
+
+
+def get_id_from_ns_name(ns_name):
+ """Parses identifier from prefix-identifier
+
+ :param ns_name: The name of a namespace
+ :returns: Identifier or None if there is no - to end the prefix
+ """
+ dash_index = ns_name.find('-')
+ if 0 <= dash_index:
+ return ns_name[dash_index + 1:]
+
+
+def check_ns_existence(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if not self.exists():
+ LOG.warning(_LW('Namespace %(name)s does not exists. Skipping '
+ '%(func)s'),
+ {'name': self.name, 'func': f.__name__})
+ return
+ try:
+ return f(self, *args, **kwargs)
+ except RuntimeError:
+ with excutils.save_and_reraise_exception() as ctx:
+ if not self.exists():
+ LOG.debug('Namespace %(name)s was concurrently deleted',
+ self.name)
+ ctx.reraise = False
+ return wrapped
+
+
+class Namespace(object):
+
+ def __init__(self, name, agent_conf, driver, use_ipv6):
+ self.name = name
+ self.ip_wrapper_root = ip_lib.IPWrapper()
+ self.agent_conf = agent_conf
+ self.driver = driver
+ self.use_ipv6 = use_ipv6
+
+ def create(self):
+ ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name)
+ cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1']
+ ip_wrapper.netns.execute(cmd)
+ if self.use_ipv6:
+ cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']
+ ip_wrapper.netns.execute(cmd)
+
+ def delete(self):
+ try:
+ self.ip_wrapper_root.netns.delete(self.name)
+ except RuntimeError:
+ msg = _LE('Failed trying to delete namespace: %s')
+ LOG.exception(msg, self.name)
+
+ def exists(self):
+ return self.ip_wrapper_root.netns.exists(self.name)
+
+
+class RouterNamespace(Namespace):
+
+ def __init__(self, router_id, agent_conf, driver, use_ipv6, ovs_driver):
+ self.router_id = router_id
+ self.ovs_driver = ovs_driver
+ name = self._get_ns_name(router_id)
+ super(RouterNamespace, self).__init__(
+ name, agent_conf, driver, use_ipv6)
+
+ @classmethod
+ def _get_ns_name(cls, router_id):
+ return build_ns_name(NS_PREFIX, router_id)
+
+ @check_ns_existence
+ def delete(self):
+ ns_ip = ip_lib.IPWrapper(namespace=self.name)
+ for d in ns_ip.get_devices(exclude_loopback=True):
+ if d.name.startswith(INTERNAL_DEV_PREFIX):
+ # device is on default bridge
+ self.driver.unplug(d.name, namespace=self.name,
+ prefix=INTERNAL_DEV_PREFIX)
+ elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
+ ns_ip.del_veth(d.name)
+ elif d.name.startswith(EXTERNAL_DEV_PREFIX):
+ self.ovs_driver.unplug(
+ d.name,
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=self.name,
+ prefix=EXTERNAL_DEV_PREFIX)
+
+ super(RouterNamespace, self).delete()
diff --git a/build/neutron/agent/l3/router_info.py b/build/neutron/agent/l3/router_info.py
new file mode 100644
index 00000000..0ddd1db5
--- /dev/null
+++ b/build/neutron/agent/l3/router_info.py
@@ -0,0 +1,996 @@
+# Copyright (c) 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import netaddr
+from oslo_log import log as logging
+
+from neutron._i18n import _, _LE, _LW
+from neutron.agent.l3 import namespaces
+from neutron.agent.linux import ip_lib
+from neutron.agent.linux import iptables_manager
+from neutron.agent.linux import ra
+from neutron.common import constants as l3_constants
+from neutron.common import exceptions as n_exc
+from neutron.common import ipv6_utils
+from neutron.common import utils as common_utils
+from neutron.ipam import utils as ipam_utils
+from neutron.agent.linux.interface import OVSInterfaceDriver
+
+LOG = logging.getLogger(__name__)
+INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
+EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
+
+FLOATINGIP_STATUS_NOCHANGE = object()
+ADDRESS_SCOPE_MARK_MASK = "0xffff0000"
+ADDRESS_SCOPE_MARK_ID_MIN = 1024
+ADDRESS_SCOPE_MARK_ID_MAX = 2048
+DEFAULT_ADDRESS_SCOPE = "noscope"
+
+
+class RouterInfo(object):
+
+ def __init__(self,
+ router_id,
+ router,
+ agent_conf,
+ interface_driver,
+ use_ipv6=False):
+ self.ovs_driver = OVSInterfaceDriver(agent_conf)
+ self.router_id = router_id
+ self.ex_gw_port = None
+ self._snat_enabled = None
+ self.fip_map = {}
+ self.internal_ports = []
+ self.floating_ips = set()
+ # Invoke the setter for establishing initial SNAT action
+ self.router = router
+ self.use_ipv6 = use_ipv6
+ ns = namespaces.RouterNamespace(
+ router_id, agent_conf, interface_driver, use_ipv6, self.ovs_driver)
+ self.router_namespace = ns
+ self.ns_name = ns.name
+ self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN,
+ ADDRESS_SCOPE_MARK_ID_MAX))
+ self._address_scope_to_mark_id = {
+ DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()}
+ self.iptables_manager = iptables_manager.IptablesManager(
+ use_ipv6=use_ipv6,
+ namespace=self.ns_name)
+ self.routes = []
+ self.agent_conf = agent_conf
+ self.driver = interface_driver
+ # radvd is a neutron.agent.linux.ra.DaemonMonitor
+ self.radvd = None
+
+ def initialize(self, process_monitor):
+ """Initialize the router on the system.
+
+ This differs from __init__ in that this method actually affects the
+ system creating namespaces, starting processes, etc. The other merely
+ initializes the python object. This separates in-memory object
+ initialization from methods that actually go do stuff to the system.
+
+ :param process_monitor: The agent's process monitor instance.
+ """
+ self.process_monitor = process_monitor
+ self.radvd = ra.DaemonMonitor(self.router_id,
+ self.ns_name,
+ process_monitor,
+ self.get_internal_device_name,
+ self.agent_conf)
+
+ self.router_namespace.create()
+
+ @property
+ def router(self):
+ return self._router
+
+ @router.setter
+ def router(self, value):
+ self._router = value
+ if not self._router:
+ return
+ # enable_snat by default if it wasn't specified by plugin
+ self._snat_enabled = self._router.get('enable_snat', True)
+
+ def get_internal_device_name(self, port_id):
+ return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
+
+ def get_external_device_name(self, port_id):
+ return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
+
+ def get_external_device_interface_name(self, ex_gw_port):
+ return self.get_external_device_name(ex_gw_port['id'])
+
+ def _update_routing_table(self, operation, route, namespace):
+ cmd = ['ip', 'route', operation, 'to', route['destination'],
+ 'via', route['nexthop']]
+ ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
+ ip_wrapper.netns.execute(cmd, check_exit_code=False)
+
+ def update_routing_table(self, operation, route):
+ self._update_routing_table(operation, route, self.ns_name)
+
+ def routes_updated(self, old_routes, new_routes):
+ adds, removes = common_utils.diff_list_of_dict(old_routes,
+ new_routes)
+ for route in adds:
+ LOG.debug("Added route entry is '%s'", route)
+ # remove replaced route from deleted route
+ for del_route in removes:
+ if route['destination'] == del_route['destination']:
+ removes.remove(del_route)
+ #replace success even if there is no existing route
+ self.update_routing_table('replace', route)
+ for route in removes:
+ LOG.debug("Removed route entry is '%s'", route)
+ self.update_routing_table('delete', route)
+
+ def get_ex_gw_port(self):
+ return self.router.get('gw_port')
+
+ def get_floating_ips(self):
+ """Filter Floating IPs to be hosted on this agent."""
+ return self.router.get(l3_constants.FLOATINGIP_KEY, [])
+
+ def floating_forward_rules(self, floating_ip, fixed_ip):
+ return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
+ (floating_ip, fixed_ip)),
+ ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' %
+ (floating_ip, fixed_ip)),
+ ('float-snat', '-s %s/32 -j SNAT --to-source %s' %
+ (fixed_ip, floating_ip))]
+
+ def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark):
+ mark_traffic_to_floating_ip = (
+ 'floatingip', '-d %s -j MARK --set-xmark %s' % (
+ floating_ip, internal_mark))
+ mark_traffic_from_fixed_ip = (
+ 'FORWARD', '-s %s -j $float-snat' % fixed_ip)
+ return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
+
+ def get_address_scope_mark_mask(self, address_scope=None):
+ if not address_scope:
+ address_scope = DEFAULT_ADDRESS_SCOPE
+
+ if address_scope not in self._address_scope_to_mark_id:
+ self._address_scope_to_mark_id[address_scope] = (
+ self.available_mark_ids.pop())
+
+ mark_id = self._address_scope_to_mark_id[address_scope]
+ # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark
+ return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK)
+
+ def get_port_address_scope_mark(self, port):
+ """Get the IP version 4 and 6 address scope mark for the port
+
+ :param port: A port dict from the RPC call
+ :returns: A dict mapping the address family to the address scope mark
+ """
+ port_scopes = port.get('address_scopes', {})
+
+ address_scope_mark_masks = (
+ (int(k), self.get_address_scope_mark_mask(v))
+ for k, v in port_scopes.items())
+ return collections.defaultdict(self.get_address_scope_mark_mask,
+ address_scope_mark_masks)
+
+ def process_floating_ip_nat_rules(self):
+ """Configure NAT rules for the router's floating IPs.
+
+ Configures iptables rules for the floating ips of the given router
+ """
+ # Clear out all iptables rules for floating ips
+ self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
+
+ floating_ips = self.get_floating_ips()
+ # Loop once to ensure that floating ips are configured.
+ for fip in floating_ips:
+ # Rebuild iptables rules for the floating ip.
+ fixed = fip['fixed_ip_address']
+ fip_ip = fip['floating_ip_address']
+ for chain, rule in self.floating_forward_rules(fip_ip, fixed):
+ self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
+ tag='floating_ip')
+
+ self.iptables_manager.apply()
+
+ def process_floating_ip_address_scope_rules(self):
+ """Configure address scope related iptables rules for the router's
+ floating IPs.
+ """
+
+ # Clear out all iptables rules for floating ips
+ self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip')
+ all_floating_ips = self.get_floating_ips()
+ ext_scope = self._get_external_address_scope()
+ # Filter out the floating ips that have fixed ip in the same address
+ # scope. Because the packets for them will always be in one address
+ # scope, no need to manipulate MARK/CONNMARK for them.
+ floating_ips = [fip for fip in all_floating_ips
+ if fip.get('fixed_ip_address_scope') != ext_scope]
+ if floating_ips:
+ ext_scope_mark = self.get_address_scope_mark_mask(ext_scope)
+ ports_scopemark = self._get_address_scope_mark()
+ devices_in_ext_scope = {
+ device for device, mark
+ in ports_scopemark[l3_constants.IP_VERSION_4].items()
+ if mark == ext_scope_mark}
+ # Add address scope for floatingip egress
+ for device in devices_in_ext_scope:
+ self.iptables_manager.ipv4['mangle'].add_rule(
+ 'float-snat',
+ '-o %s -j MARK --set-xmark %s'
+ % (device, ext_scope_mark),
+ tag='floating_ip')
+
+ # Loop once to ensure that floating ips are configured.
+ for fip in floating_ips:
+ # Rebuild iptables rules for the floating ip.
+ fip_ip = fip['floating_ip_address']
+ # Send the floating ip traffic to the right address scope
+ fixed_ip = fip['fixed_ip_address']
+ fixed_scope = fip.get('fixed_ip_address_scope')
+ internal_mark = self.get_address_scope_mark_mask(fixed_scope)
+ mangle_rules = self.floating_mangle_rules(
+ fip_ip, fixed_ip, internal_mark)
+ for chain, rule in mangle_rules:
+ self.iptables_manager.ipv4['mangle'].add_rule(
+ chain, rule, tag='floating_ip')
+
+ def process_snat_dnat_for_fip(self):
+ try:
+ self.process_floating_ip_nat_rules()
+ except Exception:
+ # TODO(salv-orlando): Less broad catching
+ msg = _('L3 agent failure to setup NAT for floating IPs')
+ LOG.exception(msg)
+ raise n_exc.FloatingIpSetupException(msg)
+
+ def _add_fip_addr_to_device(self, fip, device):
+ """Configures the floating ip address on the device.
+ """
+ try:
+ ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
+ device.addr.add(ip_cidr)
+ return True
+ except RuntimeError:
+ # any exception occurred here should cause the floating IP
+ # to be set in error state
+ LOG.warning(_LW("Unable to configure IP address for "
+ "floating IP: %s"), fip['id'])
+
+ def add_floating_ip(self, fip, interface_name, device):
+ raise NotImplementedError()
+
+ def remove_floating_ip(self, device, ip_cidr):
+ device.delete_addr_and_conntrack_state(ip_cidr)
+
+ def move_floating_ip(self, fip):
+ return l3_constants.FLOATINGIP_STATUS_ACTIVE
+
+ def remove_external_gateway_ip(self, device, ip_cidr):
+ device.delete_addr_and_conntrack_state(ip_cidr)
+
+ def get_router_cidrs(self, device):
+ return set([addr['cidr'] for addr in device.addr.list()])
+
+ def process_floating_ip_addresses(self, interface_name):
+ """Configure IP addresses on router's external gateway interface.
+
+ Ensures addresses for existing floating IPs and cleans up
+ those that should not longer be configured.
+ """
+
+ fip_statuses = {}
+ if interface_name is None:
+ LOG.debug('No Interface for floating IPs router: %s',
+ self.router['id'])
+ return fip_statuses
+
+ device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
+ existing_cidrs = self.get_router_cidrs(device)
+ new_cidrs = set()
+
+ floating_ips = self.get_floating_ips()
+ # Loop once to ensure that floating ips are configured.
+ for fip in floating_ips:
+ fip_ip = fip['floating_ip_address']
+ ip_cidr = common_utils.ip_to_cidr(fip_ip)
+ new_cidrs.add(ip_cidr)
+ fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
+ if ip_cidr not in existing_cidrs:
+ fip_statuses[fip['id']] = self.add_floating_ip(
+ fip, interface_name, device)
+ LOG.debug('Floating ip %(id)s added, status %(status)s',
+ {'id': fip['id'],
+ 'status': fip_statuses.get(fip['id'])})
+ elif (fip_ip in self.fip_map and
+ self.fip_map[fip_ip] != fip['fixed_ip_address']):
+ LOG.debug("Floating IP was moved from fixed IP "
+ "%(old)s to %(new)s",
+ {'old': self.fip_map[fip_ip],
+ 'new': fip['fixed_ip_address']})
+ fip_statuses[fip['id']] = self.move_floating_ip(fip)
+ elif fip_statuses[fip['id']] == fip['status']:
+ # mark the status as not changed. we can't remove it because
+ # that's how the caller determines that it was removed
+ fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE
+ fips_to_remove = (
+ ip_cidr for ip_cidr in existing_cidrs - new_cidrs
+ if common_utils.is_cidr_host(ip_cidr))
+ for ip_cidr in fips_to_remove:
+ LOG.debug("Removing floating ip %s from interface %s in "
+ "namespace %s", ip_cidr, interface_name, self.ns_name)
+ self.remove_floating_ip(device, ip_cidr)
+
+ return fip_statuses
+
+ def configure_fip_addresses(self, interface_name):
+ try:
+ return self.process_floating_ip_addresses(interface_name)
+ except Exception:
+ # TODO(salv-orlando): Less broad catching
+ msg = _('L3 agent failure to setup floating IPs')
+ LOG.exception(msg)
+ raise n_exc.FloatingIpSetupException(msg)
+
+ def put_fips_in_error_state(self):
+ fip_statuses = {}
+ for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
+ fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
+ return fip_statuses
+
+ def delete(self, agent):
+ self.router['gw_port'] = None
+ self.router[l3_constants.INTERFACE_KEY] = []
+ self.router[l3_constants.FLOATINGIP_KEY] = []
+ self.process_delete(agent)
+ self.disable_radvd()
+ self.router_namespace.delete()
+
+ def _internal_network_updated(self, port, subnet_id, prefix, old_prefix,
+ updated_cidrs):
+ interface_name = self.get_internal_device_name(port['id'])
+ if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
+ fixed_ips = port['fixed_ips']
+ for fixed_ip in fixed_ips:
+ if fixed_ip['subnet_id'] == subnet_id:
+ v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'],
+ fixed_ip.get('prefixlen'))
+ if v6addr not in updated_cidrs:
+ self.driver.add_ipv6_addr(interface_name, v6addr,
+ self.ns_name)
+ else:
+ self.driver.delete_ipv6_addr_with_prefix(interface_name,
+ old_prefix,
+ self.ns_name)
+
+ def _internal_network_added(self, ns_name, network_id, port_id,
+ fixed_ips, mac_address,
+ interface_name, prefix, mtu=None):
+ LOG.debug("adding internal network: prefix(%s), port(%s)",
+ prefix, port_id)
+ self.driver.plug(network_id, port_id, interface_name, mac_address,
+ namespace=ns_name,
+ prefix=prefix, mtu=mtu)
+
+ ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
+ self.driver.init_router_port(
+ interface_name, ip_cidrs, namespace=ns_name)
+ for fixed_ip in fixed_ips:
+ ip_lib.send_ip_addr_adv_notif(ns_name,
+ interface_name,
+ fixed_ip['ip_address'],
+ self.agent_conf)
+
+ def internal_network_added(self, port):
+ network_id = port['network_id']
+ port_id = port['id']
+ fixed_ips = port['fixed_ips']
+ mac_address = port['mac_address']
+
+ interface_name = self.get_internal_device_name(port_id)
+
+ self._internal_network_added(self.ns_name,
+ network_id,
+ port_id,
+ fixed_ips,
+ mac_address,
+ interface_name,
+ INTERNAL_DEV_PREFIX,
+ mtu=port.get('mtu'))
+
+ def internal_network_removed(self, port):
+ interface_name = self.get_internal_device_name(port['id'])
+ LOG.debug("removing internal network: port(%s) interface(%s)",
+ port['id'], interface_name)
+ if ip_lib.device_exists(interface_name, namespace=self.ns_name):
+ self.driver.unplug(interface_name, namespace=self.ns_name,
+ prefix=INTERNAL_DEV_PREFIX)
+
+ def _get_existing_devices(self):
+ ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
+ ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
+ return [ip_dev.name for ip_dev in ip_devs]
+
+ @staticmethod
+ def _get_updated_ports(existing_ports, current_ports):
+ updated_ports = dict()
+ current_ports_dict = {p['id']: p for p in current_ports}
+ for existing_port in existing_ports:
+ current_port = current_ports_dict.get(existing_port['id'])
+ if current_port:
+ if (sorted(existing_port['fixed_ips'],
+ key=common_utils.safe_sort_key) !=
+ sorted(current_port['fixed_ips'],
+ key=common_utils.safe_sort_key)):
+ updated_ports[current_port['id']] = current_port
+ return updated_ports
+
+ @staticmethod
+ def _port_has_ipv6_subnet(port):
+ if 'subnets' in port:
+ for subnet in port['subnets']:
+ if (netaddr.IPNetwork(subnet['cidr']).version == 6 and
+ subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX):
+ return True
+
+ def enable_radvd(self, internal_ports=None):
+ LOG.debug('Spawning radvd daemon in router device: %s', self.router_id)
+ if not internal_ports:
+ internal_ports = self.internal_ports
+ self.radvd.enable(internal_ports)
+
+ def disable_radvd(self):
+ LOG.debug('Terminating radvd daemon in router device: %s',
+ self.router_id)
+ self.radvd.disable()
+
+ def internal_network_updated(self, interface_name, ip_cidrs):
+ self.driver.init_router_port(
+ interface_name,
+ ip_cidrs=ip_cidrs,
+ namespace=self.ns_name)
+
+ def address_scope_mangle_rule(self, device_name, mark_mask):
+ return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask)
+
+ def address_scope_filter_rule(self, device_name, mark_mask):
+ return '-o %s -m mark ! --mark %s -j DROP' % (
+ device_name, mark_mask)
+
+ def _process_internal_ports(self, pd):
+ existing_port_ids = set(p['id'] for p in self.internal_ports)
+
+ internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
+ current_port_ids = set(p['id'] for p in internal_ports
+ if p['admin_state_up'])
+
+ new_port_ids = current_port_ids - existing_port_ids
+ new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
+ old_ports = [p for p in self.internal_ports
+ if p['id'] not in current_port_ids]
+ updated_ports = self._get_updated_ports(self.internal_ports,
+ internal_ports)
+
+ enable_ra = False
+ for p in new_ports:
+ self.internal_network_added(p)
+ LOG.debug("appending port %s to internal_ports cache", p)
+ self.internal_ports.append(p)
+ enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+ for subnet in p['subnets']:
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ interface_name = self.get_internal_device_name(p['id'])
+ pd.enable_subnet(self.router_id, subnet['id'],
+ subnet['cidr'],
+ interface_name, p['mac_address'])
+
+ for p in old_ports:
+ self.internal_network_removed(p)
+ LOG.debug("removing port %s from internal_ports cache", p)
+ self.internal_ports.remove(p)
+ enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+ for subnet in p['subnets']:
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ pd.disable_subnet(self.router_id, subnet['id'])
+
+ updated_cidrs = []
+ if updated_ports:
+ for index, p in enumerate(internal_ports):
+ if not updated_ports.get(p['id']):
+ continue
+ self.internal_ports[index] = updated_ports[p['id']]
+ interface_name = self.get_internal_device_name(p['id'])
+ ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
+ LOG.debug("updating internal network for port %s", p)
+ updated_cidrs += ip_cidrs
+ self.internal_network_updated(interface_name, ip_cidrs)
+ enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+
+ # Check if there is any pd prefix update
+ for p in internal_ports:
+ if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
+ for subnet in p.get('subnets', []):
+ if ipv6_utils.is_ipv6_pd_enabled(subnet):
+ old_prefix = pd.update_subnet(self.router_id,
+ subnet['id'],
+ subnet['cidr'])
+ if old_prefix:
+ self._internal_network_updated(p, subnet['id'],
+ subnet['cidr'],
+ old_prefix,
+ updated_cidrs)
+ enable_ra = True
+
+ # Enable RA
+ if enable_ra:
+ self.enable_radvd(internal_ports)
+
+ existing_devices = self._get_existing_devices()
+ current_internal_devs = set(n for n in existing_devices
+ if n.startswith(INTERNAL_DEV_PREFIX))
+ current_port_devs = set(self.get_internal_device_name(port_id)
+ for port_id in current_port_ids)
+ stale_devs = current_internal_devs - current_port_devs
+ for stale_dev in stale_devs:
+ LOG.debug('Deleting stale internal router device: %s',
+ stale_dev)
+ pd.remove_stale_ri_ifname(self.router_id, stale_dev)
+ self.driver.unplug(stale_dev,
+ namespace=self.ns_name,
+ prefix=INTERNAL_DEV_PREFIX)
+
+ def _list_floating_ip_cidrs(self):
+ # Compute a list of addresses this router is supposed to have.
+ # This avoids unnecessarily removing those addresses and
+ # causing a momentarily network outage.
+ floating_ips = self.get_floating_ips()
+ return [common_utils.ip_to_cidr(ip['floating_ip_address'])
+ for ip in floating_ips]
+
+ def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
+ self.ovs_driver.plug(ex_gw_port['network_id'],
+ ex_gw_port['id'],
+ interface_name,
+ ex_gw_port['mac_address'],
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=ns_name,
+ prefix=EXTERNAL_DEV_PREFIX,
+ mtu=ex_gw_port.get('mtu'))
+
+ def _get_external_gw_ips(self, ex_gw_port):
+ gateway_ips = []
+ if 'subnets' in ex_gw_port:
+ gateway_ips = [subnet['gateway_ip']
+ for subnet in ex_gw_port['subnets']
+ if subnet['gateway_ip']]
+ if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
+ # No IPv6 gateway is available, but IPv6 is enabled.
+ if self.agent_conf.ipv6_gateway:
+ # ipv6_gateway configured, use address for default route.
+ gateway_ips.append(self.agent_conf.ipv6_gateway)
+ return gateway_ips
+
+ def _add_route_to_gw(self, ex_gw_port, device_name,
+ namespace, preserve_ips):
+ # Note: ipv6_gateway is an ipv6 LLA
+ # and so doesn't need a special route
+ for subnet in ex_gw_port.get('subnets', []):
+ is_gateway_not_in_subnet = (subnet['gateway_ip'] and
+ not ipam_utils.check_subnet_ip(
+ subnet['cidr'],
+ subnet['gateway_ip']))
+ if is_gateway_not_in_subnet:
+ preserve_ips.append(subnet['gateway_ip'])
+ device = ip_lib.IPDevice(device_name, namespace=namespace)
+ device.route.add_route(subnet['gateway_ip'], scope='link')
+
+ def _external_gateway_added(self, ex_gw_port, interface_name,
+ ns_name, preserve_ips):
+ LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)",
+ ex_gw_port, interface_name, ns_name)
+ self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
+
+ # Build up the interface and gateway IP addresses that
+ # will be added to the interface.
+ ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
+
+ gateway_ips = self._get_external_gw_ips(ex_gw_port)
+ enable_ra_on_gw = False
+ if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
+ # There is no IPv6 gw_ip, use RouterAdvt for default route.
+ enable_ra_on_gw = True
+
+ self._add_route_to_gw(ex_gw_port, device_name=interface_name,
+ namespace=ns_name, preserve_ips=preserve_ips)
+ self.ovs_driver.init_router_port(
+ interface_name,
+ ip_cidrs,
+ namespace=ns_name,
+ extra_subnets=ex_gw_port.get('extra_subnets', []),
+ preserve_ips=preserve_ips,
+ clean_connections=True)
+
+ device = ip_lib.IPDevice(interface_name, namespace=ns_name)
+ for ip in gateway_ips or []:
+ device.route.add_gateway(ip)
+
+ if enable_ra_on_gw:
+ self.driver.configure_ipv6_ra(ns_name, interface_name)
+
+ for fixed_ip in ex_gw_port['fixed_ips']:
+ ip_lib.send_ip_addr_adv_notif(ns_name,
+ interface_name,
+ fixed_ip['ip_address'],
+ self.agent_conf)
+
+ def is_v6_gateway_set(self, gateway_ips):
+ """Check to see if list of gateway_ips has an IPv6 gateway.
+ """
+ # Note - don't require a try-except here as all
+ # gateway_ips elements are valid addresses, if they exist.
+ return any(netaddr.IPAddress(gw_ip).version == 6
+ for gw_ip in gateway_ips)
+
+ def external_gateway_added(self, ex_gw_port, interface_name):
+ preserve_ips = self._list_floating_ip_cidrs()
+ self._external_gateway_added(
+ ex_gw_port, interface_name, self.ns_name, preserve_ips)
+
+ def external_gateway_updated(self, ex_gw_port, interface_name):
+ preserve_ips = self._list_floating_ip_cidrs()
+ self._external_gateway_added(
+ ex_gw_port, interface_name, self.ns_name, preserve_ips)
+
+ def external_gateway_removed(self, ex_gw_port, interface_name):
+ LOG.debug("External gateway removed: port(%s), interface(%s)",
+ ex_gw_port, interface_name)
+ device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
+ for ip_addr in ex_gw_port['fixed_ips']:
+ self.remove_external_gateway_ip(device,
+ common_utils.ip_to_cidr(
+ ip_addr['ip_address'],
+ ip_addr['prefixlen']))
+ self.ovs_driver.unplug(interface_name,
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=self.ns_name,
+ prefix=EXTERNAL_DEV_PREFIX)
+
+ @staticmethod
+ def _gateway_ports_equal(port1, port2):
+ return port1 == port2
+
+ def _process_external_gateway(self, ex_gw_port, pd):
+ # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
+ ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
+ self.ex_gw_port and self.ex_gw_port['id'])
+
+ interface_name = None
+ if ex_gw_port_id:
+ interface_name = self.get_external_device_name(ex_gw_port_id)
+ if ex_gw_port:
+ if not self.ex_gw_port:
+ self.external_gateway_added(ex_gw_port, interface_name)
+ pd.add_gw_interface(self.router['id'], interface_name)
+ elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
+ self.external_gateway_updated(ex_gw_port, interface_name)
+ elif not ex_gw_port and self.ex_gw_port:
+ self.external_gateway_removed(self.ex_gw_port, interface_name)
+ pd.remove_gw_interface(self.router['id'])
+
+ existing_devices = self._get_existing_devices()
+ stale_devs = [dev for dev in existing_devices
+ if dev.startswith(EXTERNAL_DEV_PREFIX)
+ and dev != interface_name]
+ for stale_dev in stale_devs:
+ LOG.debug('Deleting stale external router device: %s', stale_dev)
+ pd.remove_gw_interface(self.router['id'])
+ self.ovs_driver.unplug(stale_dev,
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=self.ns_name,
+ prefix=EXTERNAL_DEV_PREFIX)
+
+ # Process SNAT rules for external gateway
+ gw_port = self._router.get('gw_port')
+ self._handle_router_snat_rules(gw_port, interface_name)
+
+ def _prevent_snat_for_internal_traffic_rule(self, interface_name):
+ return (
+ 'POSTROUTING', '! -i %(interface_name)s '
+ '! -o %(interface_name)s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' %
+ {'interface_name': interface_name})
+
+ def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name):
+ dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = (
+ self._prevent_snat_for_internal_traffic_rule(interface_name))
+ # Makes replies come back through the router to reverse DNAT
+ ext_in_mark = self.agent_conf.external_ingress_mark
+ snat_internal_traffic_to_floating_ip = (
+ 'snat', '-m mark ! --mark %s/%s '
+ '-m conntrack --ctstate DNAT '
+ '-j SNAT --to-source %s'
+ % (ext_in_mark, l3_constants.ROUTER_MARK_MASK, ex_gw_ip))
+ return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip,
+ snat_internal_traffic_to_floating_ip]
+
+ def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name):
+ snat_normal_external_traffic = (
+ 'snat', '-o %s -j SNAT --to-source %s' %
+ (interface_name, ex_gw_ip))
+ return [snat_normal_external_traffic]
+
+ def external_gateway_mangle_rules(self, interface_name):
+ mark = self.agent_conf.external_ingress_mark
+ mark_packets_entering_external_gateway_port = (
+ 'mark', '-i %s -j MARK --set-xmark %s/%s' %
+ (interface_name, mark, l3_constants.ROUTER_MARK_MASK))
+ return [mark_packets_entering_external_gateway_port]
+
+ def _empty_snat_chains(self, iptables_manager):
+ iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
+ iptables_manager.ipv4['nat'].empty_chain('snat')
+ iptables_manager.ipv4['mangle'].empty_chain('mark')
+ iptables_manager.ipv4['mangle'].empty_chain('POSTROUTING')
+
+ def _add_snat_rules(self, ex_gw_port, iptables_manager,
+ interface_name):
+ self.process_external_port_address_scope_routing(iptables_manager)
+
+ if ex_gw_port:
+ # ex_gw_port should not be None in this case
+ # NAT rules are added only if ex_gw_port has an IPv4 address
+ for ip_addr in ex_gw_port['fixed_ips']:
+ ex_gw_ip = ip_addr['ip_address']
+ if netaddr.IPAddress(ex_gw_ip).version == 4:
+ if self._snat_enabled:
+ rules = self.external_gateway_nat_snat_rules(
+ ex_gw_ip, interface_name)
+ for rule in rules:
+ iptables_manager.ipv4['nat'].add_rule(*rule)
+
+ rules = self.external_gateway_nat_fip_rules(
+ ex_gw_ip, interface_name)
+ for rule in rules:
+ iptables_manager.ipv4['nat'].add_rule(*rule)
+ rules = self.external_gateway_mangle_rules(interface_name)
+ for rule in rules:
+ iptables_manager.ipv4['mangle'].add_rule(*rule)
+
+ break
+
+ def _handle_router_snat_rules(self, ex_gw_port, interface_name):
+ self._empty_snat_chains(self.iptables_manager)
+
+ self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
+
+ self._add_snat_rules(ex_gw_port,
+ self.iptables_manager,
+ interface_name)
+
+ def _process_external_on_delete(self, agent):
+ fip_statuses = {}
+ try:
+ ex_gw_port = self.get_ex_gw_port()
+ self._process_external_gateway(ex_gw_port, agent.pd)
+ if not ex_gw_port:
+ return
+
+ interface_name = self.get_external_device_interface_name(
+ ex_gw_port)
+ fip_statuses = self.configure_fip_addresses(interface_name)
+
+ except (n_exc.FloatingIpSetupException):
+ # All floating IPs must be put in error state
+ LOG.exception(_LE("Failed to process floating IPs."))
+ fip_statuses = self.put_fips_in_error_state()
+ finally:
+ self.update_fip_statuses(agent, fip_statuses)
+
+ def process_external(self, agent):
+ fip_statuses = {}
+ try:
+ with self.iptables_manager.defer_apply():
+ ex_gw_port = self.get_ex_gw_port()
+ self._process_external_gateway(ex_gw_port, agent.pd)
+ if not ex_gw_port:
+ return
+
+ # Process SNAT/DNAT rules and addresses for floating IPs
+ self.process_snat_dnat_for_fip()
+
+ # Once NAT rules for floating IPs are safely in place
+ # configure their addresses on the external gateway port
+ interface_name = self.get_external_device_interface_name(
+ ex_gw_port)
+ fip_statuses = self.configure_fip_addresses(interface_name)
+
+ except (n_exc.FloatingIpSetupException,
+ n_exc.IpTablesApplyException):
+ # All floating IPs must be put in error state
+ LOG.exception(_LE("Failed to process floating IPs."))
+ fip_statuses = self.put_fips_in_error_state()
+ finally:
+ self.update_fip_statuses(agent, fip_statuses)
+
+ def update_fip_statuses(self, agent, fip_statuses):
+ # Identify floating IPs which were disabled
+ existing_floating_ips = self.floating_ips
+ self.floating_ips = set(fip_statuses.keys())
+ for fip_id in existing_floating_ips - self.floating_ips:
+ fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
+ # filter out statuses that didn't change
+ fip_statuses = {f: stat for f, stat in fip_statuses.items()
+ if stat != FLOATINGIP_STATUS_NOCHANGE}
+ if not fip_statuses:
+ return
+ LOG.debug('Sending floating ip statuses: %s', fip_statuses)
+ # Update floating IP status on the neutron server
+ agent.plugin_rpc.update_floatingip_statuses(
+ agent.context, self.router_id, fip_statuses)
+
+ def _get_port_devicename_scopemark(self, ports, name_generator):
+ devicename_scopemark = {l3_constants.IP_VERSION_4: dict(),
+ l3_constants.IP_VERSION_6: dict()}
+ for p in ports:
+ device_name = name_generator(p['id'])
+ ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
+ port_as_marks = self.get_port_address_scope_mark(p)
+ for ip_version in {ip_lib.get_ip_version(cidr)
+ for cidr in ip_cidrs}:
+ devicename_scopemark[ip_version][device_name] = (
+ port_as_marks[ip_version])
+
+ return devicename_scopemark
+
+ def _get_address_scope_mark(self):
+ # Prepare address scope iptables rule for internal ports
+ internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
+ ports_scopemark = self._get_port_devicename_scopemark(
+ internal_ports, self.get_internal_device_name)
+
+ # Prepare address scope iptables rule for external port
+ external_port = self.get_ex_gw_port()
+ if external_port:
+ external_port_scopemark = self._get_port_devicename_scopemark(
+ [external_port], self.get_external_device_name)
+ for ip_version in (l3_constants.IP_VERSION_4,
+ l3_constants.IP_VERSION_6):
+ ports_scopemark[ip_version].update(
+ external_port_scopemark[ip_version])
+ return ports_scopemark
+
+ def _add_address_scope_mark(self, iptables_manager, ports_scopemark):
+ external_device_name = None
+ external_port = self.get_ex_gw_port()
+ if external_port:
+ external_device_name = self.get_external_device_name(
+ external_port['id'])
+
+ # Process address scope iptables rules
+ for ip_version in (l3_constants.IP_VERSION_4,
+ l3_constants.IP_VERSION_6):
+ scopemarks = ports_scopemark[ip_version]
+ iptables = iptables_manager.get_tables(ip_version)
+ iptables['mangle'].empty_chain('scope')
+ iptables['filter'].empty_chain('scope')
+ dont_block_external = (ip_version == l3_constants.IP_VERSION_4
+ and self._snat_enabled and external_port)
+ for device_name, mark in scopemarks.items():
+ # Add address scope iptables rule
+ iptables['mangle'].add_rule(
+ 'scope',
+ self.address_scope_mangle_rule(device_name, mark))
+ if dont_block_external and device_name == external_device_name:
+ continue
+ iptables['filter'].add_rule(
+ 'scope',
+ self.address_scope_filter_rule(device_name, mark))
+
+ def process_ports_address_scope_iptables(self):
+ ports_scopemark = self._get_address_scope_mark()
+ self._add_address_scope_mark(self.iptables_manager, ports_scopemark)
+
+ def _get_external_address_scope(self):
+ external_port = self.get_ex_gw_port()
+ if not external_port:
+ return
+
+ scopes = external_port.get('address_scopes', {})
+ return scopes.get(str(l3_constants.IP_VERSION_4))
+
+ def process_external_port_address_scope_routing(self, iptables_manager):
+ if not self._snat_enabled:
+ return
+
+ external_port = self.get_ex_gw_port()
+ if not external_port:
+ return
+
+ external_devicename = self.get_external_device_name(
+ external_port['id'])
+
+ # Saves the originating address scope by saving the packet MARK to
+ # the CONNMARK for new connections so that returning traffic can be
+ # match to it.
+ rule = ('-o %s -m connmark --mark 0x0/0xffff0000 '
+ '-j CONNMARK --save-mark '
+ '--nfmask 0xffff0000 --ctmask 0xffff0000' %
+ external_devicename)
+
+ iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', rule)
+
+ address_scope = self._get_external_address_scope()
+ if not address_scope:
+ return
+
+ # Prevents snat within the same address scope
+ rule = '-o %s -m connmark --mark %s -j ACCEPT' % (
+ external_devicename,
+ self.get_address_scope_mark_mask(address_scope))
+ iptables_manager.ipv4['nat'].add_rule('snat', rule)
+
+ def process_address_scope(self):
+ with self.iptables_manager.defer_apply():
+ self.process_ports_address_scope_iptables()
+ self.process_floating_ip_address_scope_rules()
+
+ @common_utils.exception_logger()
+ def process_delete(self, agent):
+ """Process the delete of this router
+
+ This method is the point where the agent requests that this router
+ be deleted. This is a separate code path from process in that it
+ avoids any changes to the qrouter namespace that will be removed
+ at the end of the operation.
+
+ :param agent: Passes the agent in order to send RPC messages.
+ """
+ LOG.debug("process router delete")
+ if self.router_namespace.exists():
+ self._process_internal_ports(agent.pd)
+ agent.pd.sync_router(self.router['id'])
+ self._process_external_on_delete(agent)
+ else:
+ LOG.warning(_LW("Can't gracefully delete the router %s: "
+ "no router namespace found."), self.router['id'])
+
+ @common_utils.exception_logger()
+ def process(self, agent):
+ """Process updates to this router
+
+ This method is the point where the agent requests that updates be
+ applied to this router.
+
+ :param agent: Passes the agent in order to send RPC messages.
+ """
+ LOG.debug("process router updates")
+ self._process_internal_ports(agent.pd)
+ agent.pd.sync_router(self.router['id'])
+ self.process_external(agent)
+ self.process_address_scope()
+ # Process static routes for router
+ self.routes_updated(self.routes, self.router['routes'])
+ self.routes = self.router['routes']
+
+ # Update ex_gw_port and enable_snat on the router info cache
+ self.ex_gw_port = self.get_ex_gw_port()
+ self.fip_map = dict([(fip['floating_ip_address'],
+ fip['fixed_ip_address'])
+ for fip in self.get_floating_ips()])
+ # TODO(Carl) FWaaS uses this. Why is it set after processing is done?
+ self.enable_snat = self.router.get('enable_snat')
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index 5166bff1..0357ba05 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -130,6 +130,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cd /usr/lib/python2.7/site-packages/congress/datasources && curl -O $doctor_driver" \
--run-command "sed -i \"s/'--detailed-exitcodes',/'--detailed-exitcodes','-l','syslog','-l','console',/g\" /var/lib/heat-config/hooks/puppet" \
--run-command "yum install -y /root/fdio/*.rpm" \
+ --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
--run-command "tar zxvf /root/fdio/vpp_papi*.tar.gz -C /" \
--install unzip \
--upload puppet-fdio.tar.gz:/etc/puppet/modules \
@@ -144,7 +145,10 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "cd /etc/puppet/modules/ && tar xzf puppet-tacker.tar.gz" \
--run-command "yum install -y https://dl.dropboxusercontent.com/u/7079970/rabbitmq-server-3.6.3-5.el7ost.noarch.rpm" \
--run-command "pip install python-senlinclient" \
- --upload ../interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \
+ --upload ../neutron/agent/interface/interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \
+ --run-command "mkdir /root/fdio_neutron_l3" \
+ --upload ../neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \
+ --upload ../neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \
-a overcloud-full_build.qcow2
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
diff --git a/build/rpm_specs/openstack-congress.spec b/build/rpm_specs/openstack-congress.spec
index 0dd3491c..4a109fb2 100644
--- a/build/rpm_specs/openstack-congress.spec
+++ b/build/rpm_specs/openstack-congress.spec
@@ -11,7 +11,7 @@ URL: https://wiki.openstack.org/wiki/Congress/Installation
Source0: openstack-congress.tar.gz
BuildArch: noarch
-BuildRequires: python-setuptools python2-oslo-config python2-debtcollector
+BuildRequires: python-setuptools python2-oslo-config python2-debtcollector libffi-devel python-devel openssl-devel
#Requires: pbr>=0.8 Paste PasteDeploy>=1.5.0 Routes>=1.12.3!=2.0 anyjson>=0.3.3 argparse
#Requires: Babel>=1.3 eventlet>=0.16.1!=0.17.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.2.0!=2.4.0
#Requires: iso8601>=0.1.9 kombu>=2.5.0 netaddr>=0.7.12 SQLAlchemy<1.1.0>=0.9.7
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index f3d14277..8fd241b4 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -11,7 +11,7 @@ Source0: opnfv-apex-common.tar.gz
BuildArch: noarch
BuildRequires: python-docutils python34-devel
Requires: openstack-tripleo opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools
-Requires: initscripts net-tools iputils iproute iptables python34 python34-yaml python3-jinja2
+Requires: initscripts net-tools iputils iproute iptables python34 python34-yaml python3-jinja2 python3-ipmi
Requires: ipxe-roms-qemu >= 20160127-1
%description
diff --git a/build/undercloud.sh b/build/undercloud.sh
index 10c46e74..a4d008ee 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -51,7 +51,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "sed -i '/SERVICE_LIST/a\\ \x27tacker\x27: {\x27password_field\x27: \x27OVERCLOUD_TACKER_PASSWORD\x27},' /usr/lib/python2.7/site-packages/tripleoclient/constants.py" \
--run-command "sed -i '/PASSWORD_NAMES =/a\\ \"OVERCLOUD_TACKER_PASSWORD\",' /usr/lib/python2.7/site-packages/tripleoclient/utils.py" \
--run-command "sed -i '/AodhPassword/a\\ parameters\[\x27TackerPassword\x27\] = passwords\[\x27OVERCLOUD_TACKER_PASSWORD\x27\]' /usr/lib/python2.7/site-packages/tripleoclient/v1/overcloud_deploy.py" \
- --run-command "sed -i '/^SERVICES/a\ \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 1789 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
+ --run-command "sed -i '/^SERVICES/a\ \x27tacker\x27: {\x27description\x27: \x27Tacker Service\x27, \x27type\x27: \x27servicevm\x27, \x27path\x27: \x27/\x27, \x27port\x27: 8888 },' /usr/lib/python2.7/site-packages/os_cloud_config/keystone.py" \
--upload ../noarch/python-tackerclient-2015.2-1.trozet.noarch.rpm:/root/ \
--install /root/python-tackerclient-2015.2-1.trozet.noarch.rpm \
--install "python2-aodhclient" \
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 91663dfc..00441975 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -179,8 +179,19 @@ parse_cmdline() {
exit 1
fi
- if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You should not specify an inventory with virtual deployments${reset}"
+ # inventory file usage validation
+ if [[ -n "$virtual" ]]; then
+ if [[ -n "$INVENTORY_FILE" ]]; then
+ echo -e "${red}ERROR: You should not specify an inventory file with virtual deployments${reset}"
+ exit 1
+ else
+ INVENTORY_FILE='/tmp/inventory-virt.yaml'
+ fi
+ elif [[ -z "$INVENTORY_FILE" ]]; then
+ echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
+ exit 1
+ elif [[ ! -f "$INVENTORY_FILE" ]]; then
+ echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
exit 1
fi
@@ -194,16 +205,6 @@ parse_cmdline() {
exit 1
fi
- if [[ ! -z "$INVENTORY_FILE" && ! -f "$INVENTORY_FILE" ]]; then
- echo -e "{$red}ERROR: Inventory File: ${INVENTORY_FILE} does not exist! Exiting...${reset}"
- exit 1
- fi
-
- if [[ -z "$virtual" && -z "$INVENTORY_FILE" ]]; then
- echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
- exit 1
- fi
-
if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
post_config="FALSE"
@@ -226,9 +227,8 @@ main() {
setup_undercloud_vm
if [ "$virtual" == "TRUE" ]; then
setup_virtual_baremetal $VM_CPUS $VM_RAM
- elif [ -n "$INVENTORY_FILE" ]; then
- parse_inventory_file
fi
+ parse_inventory_file
configure_undercloud
overcloud_deploy
if [ "$post_config" == "TRUE" ]; then
diff --git a/config/deploy/os-nosdn-fdio-noha.yaml b/config/deploy/os-nosdn-fdio-noha.yaml
index 81ff781d..4d27ae87 100644
--- a/config/deploy/os-nosdn-fdio-noha.yaml
+++ b/config/deploy/os-nosdn-fdio-noha.yaml
@@ -16,8 +16,6 @@ deploy_options:
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
diff --git a/config/deploy/os-nosdn-ovs-ha.yaml b/config/deploy/os-nosdn-ovs-ha.yaml
index 89be0c3d..a72fef35 100644
--- a/config/deploy/os-nosdn-ovs-ha.yaml
+++ b/config/deploy/os-nosdn-ovs-ha.yaml
@@ -15,8 +15,6 @@ deploy_options:
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
diff --git a/config/deploy/os-nosdn-ovs-noha.yaml b/config/deploy/os-nosdn-ovs-noha.yaml
index d13fd9ca..7d054cec 100644
--- a/config/deploy/os-nosdn-ovs-noha.yaml
+++ b/config/deploy/os-nosdn-ovs-noha.yaml
@@ -15,8 +15,6 @@ deploy_options:
hugepagesz: 2M
hugepages: 1024
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
diff --git a/config/deploy/os-odl_l2-fdio-ha.yaml b/config/deploy/os-odl_l2-fdio-ha.yaml
index 75d79ce7..f1297e82 100644
--- a/config/deploy/os-odl_l2-fdio-ha.yaml
+++ b/config/deploy/os-odl_l2-fdio-ha.yaml
@@ -17,8 +17,6 @@ deploy_options:
hugepages: 1024
hugepagesz: 2M
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
diff --git a/config/deploy/os-odl_l2-fdio-noha.yaml b/config/deploy/os-odl_l2-fdio-noha.yaml
index ad54fbdc..207c6f34 100644
--- a/config/deploy/os-odl_l2-fdio-noha.yaml
+++ b/config/deploy/os-odl_l2-fdio-noha.yaml
@@ -19,8 +19,6 @@ deploy_options:
intel_iommu: 'on'
iommu: pt
Compute:
- nova:
- libvirtpin: 1
kernel:
hugepagesz: 2M
hugepages: 2048
diff --git a/docs/installationprocedure/baremetal.rst b/docs/installationprocedure/baremetal.rst
index 1b22a74c..8507b445 100644
--- a/docs/installationprocedure/baremetal.rst
+++ b/docs/installationprocedure/baremetal.rst
@@ -124,6 +124,7 @@ Install Bare Metal Jumphost
- opnfv-apex-common - (reqed) Supporting config files and scripts
- python34-markupsafe - (reqed) Dependency of opnfv-apex-common **
- python3-jinja2 - (reqed) Dependency of opnfv-apex-common **
+ - python3-ipmi - (reqed) Dependency of opnfv-apex-common **
\* One or more of these RPMs is required
Only one of opnfv-apex, opnfv-apex-onos and opnfv-apex-opendaylight-sfc is
@@ -139,7 +140,7 @@ Install Bare Metal Jumphost
To install these RPMs download them to the local disk on your CentOS 7
install and pass the file names directly to yum:
``sudo yum install python34-markupsafe-<version>.rpm
- python3-jinja2-<version>.rpm``
+ python3-jinja2-<version>.rpm python3-ipmi-<version>.rpm``
``sudo yum install opnfv-apex-<version>.rpm
opnfv-apex-undercloud-<version>.rpm opnfv-apex-common-<version>.rpm``
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index 1127f049..53c7eef2 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -97,6 +97,9 @@ EOF
if [ "${deploy_options_array['dataplane']}" == 'fdio' ]; then
sudo sed -i '/FdioEnabled:/c\ FdioEnabled: true' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+ LIBGUESTFS_BACKEND=direct virt-customize --run-command "cp -f /root/fdio_neutron_l3/namespaces.py /usr/lib/python2.7/site-packages/neutron/agent/l3/" \
+ --run-command "cp -f /root/fdio_neutron_l3/router_info.py /usr/lib/python2.7/site-packages/neutron/agent/l3/" \
+ -a overcloud-full.qcow2
if [ "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]; then
LIBGUESTFS_BACKEND=direct virt-customize --run-command "cd /root/ && tar zxvf networking-odl.tar.gz" \
--run-command "cd /root/networking-odl && git init && pip install -r requirements.txt" \
@@ -137,7 +140,7 @@ EOI
# Push performance options to subscript to modify per-role images as needed
for option in "${performance_options[@]}" ; do
echo -e "${blue}Setting performance option $option${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "dataplane=${deploy_options_array['dataplane']} bash build_perf_image.sh $option"
done
# Build IPA kernel option ramdisks
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
index 9c2ebff5..40cdb826 100755
--- a/lib/parse-functions.sh
+++ b/lib/parse-functions.sh
@@ -65,7 +65,7 @@ parse_network_settings() {
done
fi
- if output=$(python3.4 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -e $CONFIG/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -88,7 +88,7 @@ parse_network_settings() {
##parses deploy settings yaml into globals
parse_deploy_settings() {
local output
- if output=$(python3.4 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -99,85 +99,15 @@ parse_deploy_settings() {
}
##parses baremetal yaml settings into compatible json
-##writes the json to $CONFIG/instackenv_tmp.json
+##writes the json to undercloud:instackenv.json
##params: none
##usage: parse_inventory_file
parse_inventory_file() {
- local inventory=$(parse_yaml $INVENTORY_FILE)
- local node_list
- local node_prefix="node"
- local node_count=0
- local node_total
- local inventory_list
-
- # detect number of nodes
- for entry in $inventory; do
- if echo $entry | grep -Eo "^nodes_node[0-9]+_" > /dev/null; then
- this_node=$(echo $entry | grep -Eo "^nodes_node[0-9]+_")
- if [[ "$inventory_list" != *"$this_node"* ]]; then
- inventory_list+="$this_node "
- fi
- fi
- done
-
- inventory_list=$(echo $inventory_list | sed 's/ $//')
-
- for node in $inventory_list; do
- ((node_count+=1))
- done
-
- node_total=$node_count
-
- if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then
- echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
- exit 1
- elif [[ "$node_total" -lt 2 ]]; then
- echo -e "${red}ERROR: You must provide at least 2 nodes for non-HA baremetal deployment${reset}"
- exit 1
- fi
-
- eval $(parse_yaml $INVENTORY_FILE) || {
- echo "${red}Failed to parse inventory.yaml. Aborting.${reset}"
- exit 1
- }
-
- instackenv_output="
-{
- \"nodes\" : [
-
-"
- node_count=0
- for node in $inventory_list; do
- ((node_count+=1))
- node_output="
- {
- \"pm_password\": \"$(eval echo \${${node}ipmi_pass})\",
- \"pm_type\": \"$(eval echo \${${node}pm_type})\",
- \"mac\": [
- \"$(eval echo \${${node}mac_address})\"
- ],
- \"cpu\": \"$(eval echo \${${node}cpus})\",
- \"memory\": \"$(eval echo \${${node}memory})\",
- \"disk\": \"$(eval echo \${${node}disk})\",
- \"arch\": \"$(eval echo \${${node}arch})\",
- \"pm_user\": \"$(eval echo \${${node}ipmi_user})\",
- \"pm_addr\": \"$(eval echo \${${node}ipmi_ip})\",
- \"capabilities\": \"$(eval echo \${${node}capabilities})\"
-"
- instackenv_output+=${node_output}
- if [ $node_count -lt $node_total ]; then
- instackenv_output+=" },"
- else
- instackenv_output+=" }"
- fi
- done
-
- instackenv_output+='
- ]
-}
-'
- #Copy instackenv.json to undercloud for baremetal
- echo -e "{blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
+ if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
+ if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
+ instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
+ #Copy instackenv.json to undercloud
+ echo -e "${blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
cat > instackenv.json << EOF
$instackenv_output
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index 6e9859a8..a23e0877 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -209,5 +209,7 @@ if [[ "$ha_enabled" == 'True' ]]; then
echo "${blue}\nChecking pacemaker service status\n${reset}"
fi
overcloud_connect "controller0" "for i in \$(sudo pcs status | grep '^* ' | cut -d ' ' -f 2 | cut -d '_' -f 1 | uniq); do echo \"WARNING: Service: \$i not running\"; done"
+ # trozet disable congress in HA until congress bugs are fixed
+ overcloud_connect "controller0" "sudo pcs resource ban openstack-congress overcloud-controller-1; sudo pcs resource ban openstack-congress overcloud-controller-2; sudo systemctl restart openstack-congress"
fi
}
diff --git a/lib/python/apex/__init__.py b/lib/python/apex/__init__.py
index 9993dc97..b2a45f7d 100644
--- a/lib/python/apex/__init__.py
+++ b/lib/python/apex/__init__.py
@@ -12,3 +12,4 @@ from .network_settings import NetworkSettings
from .deploy_settings import DeploySettings
from .network_environment import NetworkEnvironment
from .clean import clean_nodes
+from .inventory import Inventory
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
new file mode 100644
index 00000000..f4a33b28
--- /dev/null
+++ b/lib/python/apex/inventory.py
@@ -0,0 +1,76 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (dradez@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import json
+
+
+class Inventory(dict):
+ """
+ This class parses an APEX inventory yaml file into an object. It
+ generates or detects all missing fields for deployment.
+
+ It then collapses one level of identifcation from the object to
+ convert it to a structure that can be dumped into a json file formatted
+ such that Triple-O can read the resulting json as an instackenv.json file.
+ """
+ def __init__(self, source, ha=True, virtual=False):
+ init_dict = {}
+ if type(source) is str:
+ with open(source, 'r') as network_settings_file:
+ yaml_dict = yaml.load(network_settings_file)
+ # collapse node identifiers from the structure
+ init_dict['nodes'] = list(map(lambda n: n[1],
+ yaml_dict['nodes'].items()))
+ else:
+ # assume input is a dict to build from
+ init_dict = source
+
+ # move ipmi_* to pm_*
+ # make mac a list
+ def munge_nodes(node):
+ node['pm_addr'] = node['ipmi_ip']
+ node['pm_password'] = node['ipmi_pass']
+ node['pm_user'] = node['ipmi_user']
+ node['mac'] = [node['mac_address']]
+
+ for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address'):
+ del i
+
+ return node
+
+ super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
+
+ # verify number of nodes
+ if ha and len(self['nodes']) < 5:
+ raise InventoryException('You must provide at least 5 '
+ 'nodes for HA baremetal deployment')
+ elif len(self['nodes']) < 2:
+ raise InventoryException('You must provide at least 2 nodes '
+ 'for non-HA baremetal deployment${reset}')
+
+ if virtual:
+ self['arch'] = 'x86_64'
+ self['host-ip'] = '192.168.122.1'
+ self['power_manager'] = \
+ 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
+ self['seed-ip'] = ''
+ self['ssh-key'] = 'INSERT_STACK_USER_PRIV_KEY'
+ self['ssh-user'] = 'root'
+
+ def dump_instackenv_json(self):
+ print(json.dumps(dict(self), sort_keys=True, indent=4))
+
+
+class InventoryException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
index 1f5e407c..ebc49dc5 100755
--- a/lib/python/apex_python_utils.py
+++ b/lib/python/apex_python_utils.py
@@ -22,6 +22,7 @@ from jinja2 import FileSystemLoader
from apex import NetworkSettings
from apex import NetworkEnvironment
from apex import DeploySettings
+from apex import Inventory
from apex import ip_utils
from apex.common.constants import ADMIN_NETWORK
@@ -66,6 +67,11 @@ def run_clean(args):
apex.clean_nodes(args.file)
+def parse_inventory(args):
+ inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
+ inventory.dump_instackenv_json()
+
+
def find_ip(args):
"""
Get and print the IP from a specific interface
@@ -128,7 +134,7 @@ def get_parser():
parser.add_argument('-l', '--log-file', default='/var/log/apex/apex.log',
dest='log_file', help="Log file to log to")
subparsers = parser.add_subparsers()
-
+ # parse-net-settings
net_settings = subparsers.add_parser('parse-net-settings',
help='Parse network settings file')
net_settings.add_argument('-s', '--net-settings-file',
@@ -154,7 +160,7 @@ def get_parser():
help='Boolean to enable Controller Pre Config')
net_settings.set_defaults(func=parse_net_settings)
-
+ # find-ip
get_int_ip = subparsers.add_parser('find-ip',
help='Find interface ip')
get_int_ip.add_argument('-i', '--interface', required=True,
@@ -163,7 +169,7 @@ def get_parser():
choices=[4, 6], dest='address_family',
help='IP Address family')
get_int_ip.set_defaults(func=find_ip)
-
+ # nic-template
nic_template = subparsers.add_parser('nic-template',
help='Build NIC templates')
nic_template.add_argument('-r', '--role', required=True,
@@ -189,13 +195,28 @@ def get_parser():
default=None, dest='ovs_dpdk_bridge',
help='OVS DPDK Bridge Name')
nic_template.set_defaults(func=build_nic_template)
-
+ # parse-deploy-settings
deploy_settings = subparsers.add_parser('parse-deploy-settings',
help='Parse deploy settings file')
deploy_settings.add_argument('-f', '--file',
default='deploy_settings.yaml',
help='path to deploy settings file')
deploy_settings.set_defaults(func=parse_deploy_settings)
+ # parse-inventory
+ inventory = subparsers.add_parser('parse-inventory',
+ help='Parse inventory file')
+ inventory.add_argument('-f', '--file',
+ default='deploy_settings.yaml',
+ help='path to deploy settings file')
+ inventory.add_argument('--ha',
+ default=False,
+ action='store_true',
+ help='Indicate if deployment is HA or not')
+ inventory.add_argument('--virtual',
+ default=False,
+ action='store_true',
+ help='Indicate if deployment inventory is virtual')
+ inventory.set_defaults(func=parse_inventory)
clean = subparsers.add_parser('clean',
help='Parse deploy settings file')
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index 5e9a5caa..177fe443 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -165,11 +165,6 @@ EOI
# root's auth keys so that Undercloud can control
# vm power on the hypervisor
ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys
-
- INSTACKENV=$CONFIG/instackenv-virt.json
-
- # upload instackenv file to Undercloud for virtual deployment
- scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
fi
# allow stack to control power management on the hypervisor via sshkey
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
index 903e3bcd..61dc6799 100755
--- a/lib/virtual-setup-functions.sh
+++ b/lib/virtual-setup-functions.sh
@@ -22,10 +22,9 @@ function setup_virtual_baremetal {
vcpus=$1
ramsize=$(($2*1024))
fi
- #start by generating the opening json for instackenv.json
- cat > $CONFIG/instackenv-virt.json << EOF
-{
- "nodes": [
+ #start by generating the opening yaml for the inventory-virt.yaml file
+ cat > /tmp/inventory-virt.yaml << EOF
+nodes:
EOF
# next create the virtual machines and add their definitions to the file
@@ -60,44 +59,26 @@ EOF
fi
done
else
- echo "Found Baremetal ${i} VM, using existing VM"
+ echo "Found baremetal${i} VM, using existing VM"
fi
#virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
- cat >> $CONFIG/instackenv-virt.json << EOF
- {
- "pm_addr": "192.168.122.1",
- "pm_user": "root",
- "pm_password": "INSERT_STACK_USER_PRIV_KEY",
- "pm_type": "pxe_ssh",
- "mac": [
- "$mac"
- ],
- "cpu": "$vcpus",
- "memory": "$ramsize",
- "disk": "41",
- "arch": "x86_64",
- "capabilities": "$capability"
- },
+ cat >> /tmp/inventory-virt.yaml << EOF
+ node${i}:
+ mac_address: "$mac"
+ ipmi_ip: 192.168.122.1
+ ipmi_user: root
+ ipmi_pass: "INSERT_STACK_USER_PRIV_KEY"
+ pm_type: "pxe_ssh"
+ cpus: $vcpus
+ memory: $ramsize
+ disk: 41
+ arch: "x86_64"
+ capabilities: "$capability"
EOF
done
- #truncate the last line to remove the comma behind the bracket
- tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{}
-
- #finally reclose the bracket and close the instackenv.json file
- cat >> $CONFIG/instackenv-virt.json << EOF
- }
- ],
- "arch": "x86_64",
- "host-ip": "192.168.122.1",
- "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager",
- "seed-ip": "",
- "ssh-key": "INSERT_STACK_USER_PRIV_KEY",
- "ssh-user": "root"
-}
-EOF
#Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
/usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
diff --git a/tests/test_apex_inventory.py b/tests/test_apex_inventory.py
new file mode 100644
index 00000000..08a34152
--- /dev/null
+++ b/tests/test_apex_inventory.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from apex.inventory import Inventory
+from apex.inventory import InventoryException
+
+from nose.tools import assert_is_instance
+from nose.tools import assert_raises
+from nose.tools import assert_equal
+
+inventory_files = ('intel_pod2_settings.yaml',
+ 'nokia_pod1_settings.yaml',
+ 'pod_example_settings.yaml')
+
+
+class TestInventory(object):
+ @classmethod
+ def setup_class(klass):
+ """This method is run once for each class before any tests are run"""
+
+ @classmethod
+ def teardown_class(klass):
+ """This method is run once for each class _after_ all tests are run"""
+
+ def setUp(self):
+ """This method is run once before _each_ test method is executed"""
+
+ def teardown(self):
+ """This method is run once after _each_ test method is executed"""
+
+ def test_init(self):
+ for f in inventory_files:
+ i = Inventory('../config/inventory/{}'.format(f))
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # test virtual
+ i = Inventory(i, virtual=True)
+ assert_equal(i.dump_instackenv_json(), None)
+
+ # Remove nodes to violate HA node count
+ while len(i['nodes']) >= 5:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i)
+
+ # Remove nodes to violate non-HA node count
+ while len(i['nodes']) >= 2:
+ i['nodes'].pop()
+ assert_raises(InventoryException,
+ Inventory, i, ha=False)
+
+ def test_exception(sefl):
+ e = InventoryException("test")
+ print(e)
+ assert_is_instance(e, InventoryException)
diff --git a/tests/test_apex_python_utils_py.py b/tests/test_apex_python_utils_py.py
index 1f280356..237c5589 100644
--- a/tests/test_apex_python_utils_py.py
+++ b/tests/test_apex_python_utils_py.py
@@ -16,6 +16,7 @@ from apex_python_utils import parse_net_settings
from apex_python_utils import parse_deploy_settings
from apex_python_utils import find_ip
from apex_python_utils import build_nic_template
+from apex_python_utils import parse_inventory
from nose.tools import assert_equal
from nose.tools import assert_raises
@@ -25,6 +26,7 @@ net_sets = '../config/network/network_settings.yaml'
net_env = '../build/network-environment.yaml'
deploy_sets = '../config/deploy/deploy_settings.yaml'
nic_template = '../build/nics-template.yaml.jinja2'
+inventory = '../config/inventory/pod_example_settings.yaml'
class TestCommonUtils(object):
@@ -77,3 +79,8 @@ class TestCommonUtils(object):
'-r', 'compute',
'-t', nic_template])
assert_equal(build_nic_template(args), None)
+
+ def test_parse_inventory(self):
+ args = self.parser.parse_args(['parse-inventory',
+ '-f', inventory])
+ assert_equal(parse_inventory(args), None)