diff options
22 files changed, 582 insertions, 2066 deletions
diff --git a/build/build_perf_image.sh b/build/build_perf_image.sh deleted file mode 100644 index 68f74ea2..00000000 --- a/build/build_perf_image.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2016 Red Hat Inc. -# Michael Chapman <michapma@redhat.com> -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -ROLE=$1 -shift -CATEGORY=$1 -shift -KEY=$1 -shift -VALUE=$1 -shift - -IMAGE=$ROLE-overcloud-full.qcow2 - -# Create image copy for this role -if [ ! -f $IMAGE ] ; then - cp overcloud-full.qcow2 $IMAGE -fi - -if [ "$CATEGORY" == "nova" ]; then - if [ "$KEY" == "libvirtpin" ]; then - sudo sed -i "s/#LibvirtCPUPinSet:.*/LibvirtCPUPinSet: '${VALUE}'/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi -fi - -if [ "$CATEGORY" == "kernel" ]; then - echo "${KEY}=${VALUE}" >> $ROLE-kernel_params.txt - if [[ "$dataplane" == 'fdio' && "$KEY" == 'hugepages' ]]; then - # set kernel hugepages params for fdio - LIBGUESTFS_BACKEND=direct virt-customize --run-command "echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf" \ - --run-command "echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$VALUE | bc)) >> /usr/lib/sysctl.d/00-system.conf" \ - --run-command "echo kernel.shmmax==$((VALUE * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf" \ - -a ${IMAGE} - fi -fi - -if [ "$CATEGORY" == "vpp" ]; then - if [ "$KEY" == "main-core" ]; then - sudo sed -i "/${ROLE}VPPMainCore:/c\ ${ROLE}VPPMainCore: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi - if [ "$KEY" == "corelist-workers" ]; then - sudo sed -i "/${ROLE}VPPCorelistWorkers:/c\ ${ROLE}VPPCorelistWorkers: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi - if [ "$KEY" == "uio-driver" ]; then - sudo sed -i "/${ROLE}UIODriver:/c\ ${ROLE}UIODriver: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi -fi
\ No newline at end of file diff --git a/build/first-boot.yaml b/build/first-boot.yaml new file mode 100644 index 00000000..6cd874cc --- /dev/null +++ b/build/first-boot.yaml @@ -0,0 +1,53 @@ +heat_template_version: 2014-10-16 + +description: > + This is an example showing how you can do firstboot configuration + of the nodes via cloud-init. To enable this, replace the default + mapping of OS::TripleO::NodeUserData in ../overcloud_resource_registry* + +parameters: + ComputeKernelArgs: + description: > + Space seprated list of Kernel args to be update to grub. + The given args will be appended to existing args of GRUB_CMDLINE_LINUX in file /etc/default/grub + Example: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048" + type: string + default: "" + +resources: + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: compute_kernel_args} + + # Verify the logs on /var/log/cloud-init.log on the overcloud node + compute_kernel_args: + type: OS::Heat::SoftwareConfig + properties: + config: + str_replace: + template: | + #!/bin/bash + set -x + sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' -i /etc/default/grub ; + grub2-mkconfig -o /etc/grub2.cfg + hugepage_count=`echo $KERNEL_ARGS | grep -oP ' ?hugepages=\K[0-9]+'` + if [ -z "$hugepage_count" ]; then + hugepage_count=1024 + fi + echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf + echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc)) >> /usr/lib/sysctl.d/00-system.conf + echo kernel.shmmax=$(($hugepage_count * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf + + reboot + params: + $KERNEL_ARGS: {get_param: ComputeKernelArgs} + +outputs: + # This means get_resource from the parent template will get the userdata, see: + # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent + # Note this is new-for-kilo, an alternative is returning a value then using + # get_attr in the parent template instead. + OS::stack_id: + value: {get_resource: userdata} diff --git a/build/neutron-patch-NSDriver.patch b/build/neutron-patch-NSDriver.patch new file mode 100644 index 00000000..e015064c --- /dev/null +++ b/build/neutron-patch-NSDriver.patch @@ -0,0 +1,208 @@ +From ff4e918d21970a81604a0aaa2af888141f93cdac Mon Sep 17 00:00:00 2001 +From: Feng Pan <fpan@redhat.com> +Date: Sun, 5 Feb 2017 21:34:19 -0500 +Subject: [PATCH] Add NSDriver + +--- + neutron/agent/l3/namespaces.py | 6 ++-- + neutron/agent/l3/router_info.py | 14 ++++---- + neutron/agent/linux/interface.py | 76 +++++++++++++++++++++++++++++++++++++++- + 3 files changed, 87 insertions(+), 9 deletions(-) + +diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py +index e70d7bb..3c932a8 100644 +--- a/neutron/agent/l3/namespaces.py ++++ b/neutron/agent/l3/namespaces.py +@@ -18,6 +18,7 @@ import functools + from oslo_log import log as logging + from oslo_utils import excutils + ++from neutron.agent.linux.interface import OVSInterfaceDriver + from neutron._i18n import _LE, _LW + from neutron.agent.linux import ip_lib + +@@ -110,8 +111,9 @@ class Namespace(object): + + class RouterNamespace(Namespace): + +- def __init__(self, router_id, agent_conf, driver, use_ipv6): ++ def __init__(self, router_id, agent_conf, driver, use_ipv6, ovs_driver): + self.router_id = router_id ++ self.ovs_driver = ovs_driver + name = self._get_ns_name(router_id) + super(RouterNamespace, self).__init__( + name, agent_conf, driver, use_ipv6) +@@ -131,7 +133,7 @@ class RouterNamespace(Namespace): + elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): + ns_ip.del_veth(d.name) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): +- self.driver.unplug( ++ self.ovs_driver.unplug( + d.name, + bridge=self.agent_conf.external_network_bridge, + namespace=self.name, +diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py +index 3fd3934..b33fb7e 100644 +--- a/neutron/agent/l3/router_info.py ++++ b/neutron/agent/l3/router_info.py +@@ -27,6 +27,7 @@ from neutron.common import exceptions as n_exc + from neutron.common import ipv6_utils + from neutron.common import utils as common_utils + from neutron.ipam import utils as ipam_utils ++from neutron.agent.linux.interface import OVSInterfaceDriver + + LOG = logging.getLogger(__name__) + INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX +@@ -47,6 +48,7 @@ class RouterInfo(object): + agent_conf, + interface_driver, + use_ipv6=False): ++ self.ovs_driver = OVSInterfaceDriver(agent_conf) + self.router_id = router_id + self.ex_gw_port = None + self._snat_enabled = None +@@ -57,7 +59,7 @@ class RouterInfo(object): + self.router = router + self.use_ipv6 = use_ipv6 + ns = self.create_router_namespace_object( +- router_id, agent_conf, interface_driver, use_ipv6) ++ router_id, agent_conf, interface_driver, use_ipv6, self.ovs_driver) + self.router_namespace = ns + self.ns_name = ns.name + self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN, +@@ -94,9 +96,9 @@ class RouterInfo(object): + self.router_namespace.create() + + def create_router_namespace_object( +- self, router_id, agent_conf, iface_driver, use_ipv6): ++ self, router_id, agent_conf, iface_driver, use_ipv6, ovs_driver): + return namespaces.RouterNamespace( +- router_id, agent_conf, iface_driver, use_ipv6) ++ router_id, agent_conf, iface_driver, use_ipv6, ovs_driver) + + @property + def router(self): +@@ -583,7 +585,7 @@ class RouterInfo(object): + for ip in floating_ips] + + def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name): +- self.driver.plug(ex_gw_port['network_id'], ++ self.ovs_driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], + interface_name, + ex_gw_port['mac_address'], +@@ -641,7 +643,7 @@ class RouterInfo(object): + + self._add_route_to_gw(ex_gw_port, device_name=interface_name, + namespace=ns_name, preserve_ips=preserve_ips) +- self.driver.init_router_port( ++ self.ovs_driver.init_router_port( + interface_name, + ip_cidrs, + namespace=ns_name, +@@ -735,7 +737,7 @@ class RouterInfo(object): + for stale_dev in stale_devs: + LOG.debug('Deleting stale external router device: %s', stale_dev) + pd.remove_gw_interface(self.router['id']) +- self.driver.unplug(stale_dev, ++ self.ovs_driver.unplug(stale_dev, + bridge=self.agent_conf.external_network_bridge, + namespace=self.ns_name, + prefix=EXTERNAL_DEV_PREFIX) +diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py +index c2eb06e..80da16f 100644 +--- a/neutron/agent/linux/interface.py ++++ b/neutron/agent/linux/interface.py +@@ -15,7 +15,7 @@ + + import abc + import time +- ++import eventlet + import netaddr + from neutron_lib import constants + from oslo_config import cfg +@@ -288,6 +288,80 @@ class NullDriver(LinuxInterfaceDriver): + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + pass + ++class NSDriver(LinuxInterfaceDriver): ++ """Device independent driver enabling creation of a non device specific ++ interface in network spaces. Attachment to the device is not performed. ++ """ ++ MAX_TIME_FOR_DEVICE_EXISTENCE = 30 ++ ++ @classmethod ++ def _device_is_created_in_time(cls, device_name): ++ """See if device is created, within time limit.""" ++ attempt = 0 ++ while attempt < NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE: ++ if ip_lib.device_exists(device_name): ++ return True ++ attempt += 1 ++ eventlet.sleep(1) ++ LOG.error(_LE("Device %(dev)s was not created in %(time)d seconds"), ++ {'dev': device_name, ++ 'time': NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE}) ++ return False ++ ++ def _configure_mtu(self, ns_dev, mtu=None): ++ # Need to set MTU, after added to namespace. See review ++ # https://review.openstack.org/327651 ++ try: ++ # Note: network_device_mtu will be deprecated in future ++ mtu_override = self.conf.network_device_mtu ++ except cfg.NoSuchOptError: ++ LOG.warning(_LW("Config setting for MTU deprecated - any " ++ "override will be ignored.")) ++ mtu_override = None ++ if mtu_override: ++ mtu = mtu_override ++ LOG.debug("Overriding MTU to %d", mtu) ++ if mtu: ++ ns_dev.link.set_mtu(mtu) ++ else: ++ LOG.debug("No MTU provided - skipping setting value") ++ ++ def plug(self, network_id, port_id, device_name, mac_address, ++ bridge=None, namespace=None, prefix=None, mtu=None): ++ ++ # Overriding this, we still want to add an existing device into the ++ # namespace. ++ self.plug_new(network_id, port_id, device_name, mac_address, ++ bridge, namespace, prefix, mtu) ++ ++ def plug_new(self, network_id, port_id, device_name, mac_address, ++ bridge=None, namespace=None, prefix=None, mtu=None): ++ ++ ip = ip_lib.IPWrapper() ++ ns_dev = ip.device(device_name) ++ ++ LOG.debug("Plugging dev: '%s' into namespace: '%s' ", ++ device_name, namespace) ++ ++ # Wait for device creation ++ if not self._device_is_created_in_time(device_name): ++ return ++ ++ ns_dev.link.set_address(mac_address) ++ ++ if namespace: ++ namespace_obj = ip.ensure_namespace(namespace) ++ namespace_obj.add_device_to_namespace(ns_dev) ++ ++ self._configure_mtu(ns_dev, mtu) ++ ++ ns_dev.link.set_up() ++ ++ def unplug(self, device_name, bridge=None, namespace=None, prefix=None): ++ # Device removal is done externally. Just remove the namespace ++ LOG.debug("Removing namespace: '%s'", namespace) ++ ip_lib.IPWrapper(namespace).garbage_collect_namespace() ++ + + class OVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an OVS bridge.""" +-- +2.9.3 + diff --git a/build/neutron/agent/interface/interface.py b/build/neutron/agent/interface/interface.py deleted file mode 100644 index 709fd677..00000000 --- a/build/neutron/agent/interface/interface.py +++ /dev/null @@ -1,552 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import eventlet -import netaddr -from oslo_config import cfg -from oslo_log import log as logging -import six - -from neutron._i18n import _, _LE, _LI, _LW -from neutron.agent.common import ovs_lib -from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils -from neutron.common import constants as n_const -from neutron.common import exceptions -from neutron.common import ipv6_utils - - -LOG = logging.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('ovs_integration_bridge', - default='br-int', - help=_('Name of Open vSwitch bridge to use')), - cfg.BoolOpt('ovs_use_veth', - default=False, - help=_('Uses veth for an OVS interface or not. ' - 'Support kernels with limited namespace support ' - '(e.g. RHEL 6.5) so long as ovs_use_veth is set to ' - 'True.')), - cfg.IntOpt('network_device_mtu', - deprecated_for_removal=True, - help=_('MTU setting for device. This option will be removed in ' - 'Newton. Please use the system-wide segment_mtu setting ' - 'which the agents will take into account when wiring ' - 'VIFs.')), -] - - -@six.add_metaclass(abc.ABCMeta) -class LinuxInterfaceDriver(object): - - # from linux IF_NAMESIZE - DEV_NAME_LEN = 14 - DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX - - def __init__(self, conf): - self.conf = conf - if self.conf.network_device_mtu: - self._validate_network_device_mtu() - - def _validate_network_device_mtu(self): - if (ipv6_utils.is_enabled() and - self.conf.network_device_mtu < n_const.IPV6_MIN_MTU): - LOG.error(_LE("IPv6 protocol requires a minimum MTU of " - "%(min_mtu)s, while the configured value is " - "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU, - 'current_mtu': self.conf.network_device_mtu}) - raise SystemExit(1) - - @property - def use_gateway_ips(self): - """Whether to use gateway IPs instead of unique IP allocations. - - In each place where the DHCP agent runs, and for each subnet for - which DHCP is handling out IP addresses, the DHCP port needs - - at the Linux level - to have an IP address within that subnet. - Generally this needs to be a unique Neutron-allocated IP - address, because the subnet's underlying L2 domain is bridged - across multiple compute hosts and network nodes, and for HA - there may be multiple DHCP agents running on that same bridged - L2 domain. - - However, if the DHCP ports - on multiple compute/network nodes - but for the same network - are _not_ bridged to each other, - they do not need each to have a unique IP address. Instead - they can all share the same address from the relevant subnet. - This works, without creating any ambiguity, because those - ports are not all present on the same L2 domain, and because - no data within the network is ever sent to that address. - (DHCP requests are broadcast, and it is the network's job to - ensure that such a broadcast will reach at least one of the - available DHCP servers. DHCP responses will be sent _from_ - the DHCP port address.) - - Specifically, for networking backends where it makes sense, - the DHCP agent allows all DHCP ports to use the subnet's - gateway IP address, and thereby to completely avoid any unique - IP address allocation. This behaviour is selected by running - the DHCP agent with a configured interface driver whose - 'use_gateway_ips' property is True. - - When an operator deploys Neutron with an interface driver that - makes use_gateway_ips True, they should also ensure that a - gateway IP address is defined for each DHCP-enabled subnet, - and that the gateway IP address doesn't change during the - subnet's lifetime. - """ - return False - - def init_l3(self, device_name, ip_cidrs, namespace=None, - preserve_ips=None, clean_connections=False): - """Set the L3 settings for the interface using data from the port. - - ip_cidrs: list of 'X.X.X.X/YY' strings - preserve_ips: list of ip cidrs that should not be removed from device - clean_connections: Boolean to indicate if we should cleanup connections - associated to removed ips - """ - preserve_ips = preserve_ips or [] - device = ip_lib.IPDevice(device_name, namespace=namespace) - - # The LLA generated by the operating system is not known to - # Neutron, so it would be deleted if we added it to the 'previous' - # list here - default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address) - previous = {addr['cidr'] for addr in device.addr.list( - filters=['permanent'])} - {default_ipv6_lla} - - # add new addresses - for ip_cidr in ip_cidrs: - - net = netaddr.IPNetwork(ip_cidr) - # Convert to compact IPv6 address because the return values of - # "ip addr list" are compact. - if net.version == 6: - ip_cidr = str(net) - if ip_cidr in previous: - previous.remove(ip_cidr) - continue - - device.addr.add(ip_cidr) - - # clean up any old addresses - for ip_cidr in previous: - if ip_cidr not in preserve_ips: - if clean_connections: - device.delete_addr_and_conntrack_state(ip_cidr) - else: - device.addr.delete(ip_cidr) - - def init_router_port(self, - device_name, - ip_cidrs, - namespace, - preserve_ips=None, - extra_subnets=None, - clean_connections=False): - """Set the L3 settings for a router interface using data from the port. - - ip_cidrs: list of 'X.X.X.X/YY' strings - preserve_ips: list of ip cidrs that should not be removed from device - clean_connections: Boolean to indicate if we should cleanup connections - associated to removed ips - extra_subnets: An iterable of cidrs to add as routes without address - """ - LOG.debug("init_router_port: device_name(%s), namespace(%s)", - device_name, namespace) - self.init_l3(device_name=device_name, - ip_cidrs=ip_cidrs, - namespace=namespace, - preserve_ips=preserve_ips or [], - clean_connections=clean_connections) - - device = ip_lib.IPDevice(device_name, namespace=namespace) - - # Manage on-link routes (routes without an associated address) - new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or []) - - v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4) - v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6) - existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink) - - for route in new_onlink_cidrs - existing_onlink_cidrs: - LOG.debug("adding onlink route(%s)", route) - device.route.add_onlink_route(route) - for route in (existing_onlink_cidrs - new_onlink_cidrs - - set(preserve_ips or [])): - LOG.debug("deleting onlink route(%s)", route) - device.route.delete_onlink_route(route) - - def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'): - device = ip_lib.IPDevice(device_name, - namespace=namespace) - net = netaddr.IPNetwork(v6addr) - device.addr.add(str(net), scope) - - def delete_ipv6_addr(self, device_name, v6addr, namespace): - device = ip_lib.IPDevice(device_name, - namespace=namespace) - device.delete_addr_and_conntrack_state(v6addr) - - def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace): - """Delete the first listed IPv6 address that falls within a given - prefix. - """ - device = ip_lib.IPDevice(device_name, namespace=namespace) - net = netaddr.IPNetwork(prefix) - for address in device.addr.list(scope='global', filters=['permanent']): - ip_address = netaddr.IPNetwork(address['cidr']) - if ip_address in net: - device.delete_addr_and_conntrack_state(address['cidr']) - break - - def get_ipv6_llas(self, device_name, namespace): - device = ip_lib.IPDevice(device_name, - namespace=namespace) - - return device.addr.list(scope='link', ip_version=6) - - def check_bridge_exists(self, bridge): - if not ip_lib.device_exists(bridge): - raise exceptions.BridgeDoesNotExist(bridge=bridge) - - def get_device_name(self, port): - return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] - - @staticmethod - def configure_ipv6_ra(namespace, dev_name): - """Configure acceptance of IPv6 route advertisements on an intf.""" - # Learn the default router's IP address via RAs - ip_lib.IPWrapper(namespace=namespace).netns.execute( - ['sysctl', '-w', 'net.ipv6.conf.%s.accept_ra=2' % dev_name]) - - @abc.abstractmethod - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - """Plug in the interface only for new devices that don't exist yet.""" - - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - if not ip_lib.device_exists(device_name, - namespace=namespace): - try: - self.plug_new(network_id, port_id, device_name, mac_address, - bridge, namespace, prefix, mtu) - except TypeError: - self.plug_new(network_id, port_id, device_name, mac_address, - bridge, namespace, prefix) - else: - LOG.info(_LI("Device %s already exists"), device_name) - - @abc.abstractmethod - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - - @property - def bridged(self): - """Whether the DHCP port is bridged to the VM TAP interfaces. - - When the DHCP port is bridged to the TAP interfaces for the - VMs for which it is providing DHCP service - as is the case - for most Neutron network implementations - the DHCP server - only needs to listen on the DHCP port, and will still receive - DHCP requests from all the relevant VMs. - - If the DHCP port is not bridged to the relevant VM TAP - interfaces, the DHCP server needs to listen explicitly on - those TAP interfaces, and to treat those as aliases of the - DHCP port where the IP subnet is defined. - """ - return True - - -class NullDriver(LinuxInterfaceDriver): - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - pass - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - pass - -class NSDriver(LinuxInterfaceDriver): - """Device independent driver enabling creation of a non device specific - interface in network spaces. Attachment to the device is not performed. - """ - MAX_TIME_FOR_DEVICE_EXISTENCE = 30 - - @classmethod - def _device_is_created_in_time(cls, device_name): - """See if device is created, within time limit.""" - attempt = 0 - while attempt < NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE: - if ip_lib.device_exists(device_name): - return True - attempt += 1 - eventlet.sleep(1) - LOG.error(_LE("Device %(dev)s was not created in %(time)d seconds"), - {'dev': device_name, - 'time': NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE}) - return False - - def _configure_mtu(self, ns_dev, mtu=None): - # Need to set MTU, after added to namespace. See review - # https://review.openstack.org/327651 - try: - # Note: network_device_mtu will be deprecated in future - mtu_override = self.conf.network_device_mtu - except cfg.NoSuchOptError: - LOG.warning(_LW("Config setting for MTU deprecated - any " - "override will be ignored.")) - mtu_override = None - if mtu_override: - mtu = mtu_override - LOG.debug("Overriding MTU to %d", mtu) - if mtu: - ns_dev.link.set_mtu(mtu) - else: - LOG.debug("No MTU provided - skipping setting value") - - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - - # Overriding this, we still want to add an existing device into the - # namespace. - self.plug_new(network_id, port_id, device_name, mac_address, - bridge, namespace, prefix, mtu) - - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - - ip = ip_lib.IPWrapper() - ns_dev = ip.device(device_name) - - LOG.debug("Plugging dev: '%s' into namespace: '%s' ", - device_name, namespace) - - # Wait for device creation - if not self._device_is_created_in_time(device_name): - return - - ns_dev.link.set_address(mac_address) - - if namespace: - namespace_obj = ip.ensure_namespace(namespace) - namespace_obj.add_device_to_namespace(ns_dev) - - self._configure_mtu(ns_dev, mtu) - - ns_dev.link.set_up() - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - # Device removal is done externally. Just remove the namespace - LOG.debug("Removing namespace: '%s'", namespace) - ip_lib.IPWrapper(namespace).garbage_collect_namespace() - - -class OVSInterfaceDriver(LinuxInterfaceDriver): - """Driver for creating an internal interface on an OVS bridge.""" - - DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX - - def __init__(self, conf): - super(OVSInterfaceDriver, self).__init__(conf) - if self.conf.ovs_use_veth: - self.DEV_NAME_PREFIX = 'ns-' - - def _get_tap_name(self, dev_name, prefix=None): - if self.conf.ovs_use_veth: - dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, - n_const.TAP_DEVICE_PREFIX) - return dev_name - - def _ovs_add_port(self, bridge, device_name, port_id, mac_address, - internal=True): - attrs = [('external_ids', {'iface-id': port_id, - 'iface-status': 'active', - 'attached-mac': mac_address})] - if internal: - attrs.insert(0, ('type', 'internal')) - - ovs = ovs_lib.OVSBridge(bridge) - ovs.replace_port(device_name, *attrs) - - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - """Plug in the interface.""" - if not bridge: - bridge = self.conf.ovs_integration_bridge - - self.check_bridge_exists(bridge) - - ip = ip_lib.IPWrapper() - tap_name = self._get_tap_name(device_name, prefix) - - if self.conf.ovs_use_veth: - # Create ns_dev in a namespace if one is configured. - root_dev, ns_dev = ip.add_veth(tap_name, - device_name, - namespace2=namespace) - root_dev.disable_ipv6() - else: - ns_dev = ip.device(device_name) - - internal = not self.conf.ovs_use_veth - self._ovs_add_port(bridge, tap_name, port_id, mac_address, - internal=internal) - - ns_dev.link.set_address(mac_address) - - # Add an interface created by ovs to the namespace. - if not self.conf.ovs_use_veth and namespace: - namespace_obj = ip.ensure_namespace(namespace) - namespace_obj.add_device_to_namespace(ns_dev) - - # NOTE(ihrachys): the order here is significant: we must set MTU after - # the device is moved into a namespace, otherwise OVS bridge does not - # allow to set MTU that is higher than the least of all device MTUs on - # the bridge - mtu = self.conf.network_device_mtu or mtu - if mtu: - ns_dev.link.set_mtu(mtu) - if self.conf.ovs_use_veth: - root_dev.link.set_mtu(mtu) - else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) - - ns_dev.link.set_up() - if self.conf.ovs_use_veth: - root_dev.link.set_up() - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - if not bridge: - bridge = self.conf.ovs_integration_bridge - - tap_name = self._get_tap_name(device_name, prefix) - self.check_bridge_exists(bridge) - ovs = ovs_lib.OVSBridge(bridge) - - try: - ovs.delete_port(tap_name) - if self.conf.ovs_use_veth: - device = ip_lib.IPDevice(device_name, namespace=namespace) - device.link.delete() - LOG.debug("Unplugged interface '%s'", device_name) - except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), - device_name) - - -class IVSInterfaceDriver(LinuxInterfaceDriver): - """Driver for creating an internal interface on an IVS bridge.""" - - DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX - - def __init__(self, conf): - super(IVSInterfaceDriver, self).__init__(conf) - self.DEV_NAME_PREFIX = 'ns-' - - def _get_tap_name(self, dev_name, prefix=None): - dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, - n_const.TAP_DEVICE_PREFIX) - return dev_name - - def _ivs_add_port(self, device_name, port_id, mac_address): - cmd = ['ivs-ctl', 'add-port', device_name] - utils.execute(cmd, run_as_root=True) - - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - """Plug in the interface.""" - ip = ip_lib.IPWrapper() - tap_name = self._get_tap_name(device_name, prefix) - - root_dev, ns_dev = ip.add_veth(tap_name, device_name) - root_dev.disable_ipv6() - - self._ivs_add_port(tap_name, port_id, mac_address) - - ns_dev = ip.device(device_name) - ns_dev.link.set_address(mac_address) - - mtu = self.conf.network_device_mtu or mtu - if mtu: - ns_dev.link.set_mtu(mtu) - root_dev.link.set_mtu(mtu) - else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) - - if namespace: - namespace_obj = ip.ensure_namespace(namespace) - namespace_obj.add_device_to_namespace(ns_dev) - - ns_dev.link.set_up() - root_dev.link.set_up() - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - tap_name = self._get_tap_name(device_name, prefix) - try: - cmd = ['ivs-ctl', 'del-port', tap_name] - utils.execute(cmd, run_as_root=True) - device = ip_lib.IPDevice(device_name, namespace=namespace) - device.link.delete() - LOG.debug("Unplugged interface '%s'", device_name) - except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), - device_name) - - -class BridgeInterfaceDriver(LinuxInterfaceDriver): - """Driver for creating bridge interfaces.""" - - DEV_NAME_PREFIX = 'ns-' - - def plug_new(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None, mtu=None): - """Plugin the interface.""" - ip = ip_lib.IPWrapper() - - # Enable agent to define the prefix - tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, - n_const.TAP_DEVICE_PREFIX) - # Create ns_veth in a namespace if one is configured. - root_veth, ns_veth = ip.add_veth(tap_name, device_name, - namespace2=namespace) - root_veth.disable_ipv6() - ns_veth.link.set_address(mac_address) - - mtu = self.conf.network_device_mtu or mtu - if mtu: - root_veth.link.set_mtu(mtu) - ns_veth.link.set_mtu(mtu) - else: - LOG.warning(_LW("No MTU configured for port %s"), port_id) - - root_veth.link.set_up() - ns_veth.link.set_up() - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - device = ip_lib.IPDevice(device_name, namespace=namespace) - try: - device.link.delete() - LOG.debug("Unplugged interface '%s'", device_name) - except RuntimeError: - LOG.error(_LE("Failed unplugging interface '%s'"), - device_name) diff --git a/build/neutron/agent/l3/namespaces.py b/build/neutron/agent/l3/namespaces.py deleted file mode 100644 index aa282052..00000000 --- a/build/neutron/agent/l3/namespaces.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import functools - -from oslo_log import log as logging -from oslo_utils import excutils - -from neutron.agent.linux.interface import OVSInterfaceDriver -from neutron._i18n import _LE, _LW -from neutron.agent.linux import ip_lib - -LOG = logging.getLogger(__name__) - -NS_PREFIX = 'qrouter-' -INTERNAL_DEV_PREFIX = 'qr-' -EXTERNAL_DEV_PREFIX = 'qg-' -# TODO(Carl) It is odd that this file needs this. It is a dvr detail. -ROUTER_2_FIP_DEV_PREFIX = 'rfp-' - - -def build_ns_name(prefix, identifier): - """Builds a namespace name from the given prefix and identifier - - :param prefix: The prefix which must end with '-' for legacy reasons - :param identifier: The id associated with the namespace - """ - return prefix + identifier - - -def get_prefix_from_ns_name(ns_name): - """Parses prefix from prefix-identifier - - :param ns_name: The name of a namespace - :returns: The prefix ending with a '-' or None if there is no '-' - """ - dash_index = ns_name.find('-') - if 0 <= dash_index: - return ns_name[:dash_index + 1] - - -def get_id_from_ns_name(ns_name): - """Parses identifier from prefix-identifier - - :param ns_name: The name of a namespace - :returns: Identifier or None if there is no - to end the prefix - """ - dash_index = ns_name.find('-') - if 0 <= dash_index: - return ns_name[dash_index + 1:] - - -def check_ns_existence(f): - @functools.wraps(f) - def wrapped(self, *args, **kwargs): - if not self.exists(): - LOG.warning(_LW('Namespace %(name)s does not exists. Skipping ' - '%(func)s'), - {'name': self.name, 'func': f.__name__}) - return - try: - return f(self, *args, **kwargs) - except RuntimeError: - with excutils.save_and_reraise_exception() as ctx: - if not self.exists(): - LOG.debug('Namespace %(name)s was concurrently deleted', - self.name) - ctx.reraise = False - return wrapped - - -class Namespace(object): - - def __init__(self, name, agent_conf, driver, use_ipv6): - self.name = name - self.ip_wrapper_root = ip_lib.IPWrapper() - self.agent_conf = agent_conf - self.driver = driver - self.use_ipv6 = use_ipv6 - - def create(self): - ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name) - cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1'] - ip_wrapper.netns.execute(cmd) - if self.use_ipv6: - cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1'] - ip_wrapper.netns.execute(cmd) - - def delete(self): - try: - self.ip_wrapper_root.netns.delete(self.name) - except RuntimeError: - msg = _LE('Failed trying to delete namespace: %s') - LOG.exception(msg, self.name) - - def exists(self): - return self.ip_wrapper_root.netns.exists(self.name) - - -class RouterNamespace(Namespace): - - def __init__(self, router_id, agent_conf, driver, use_ipv6, ovs_driver): - self.router_id = router_id - self.ovs_driver = ovs_driver - name = self._get_ns_name(router_id) - super(RouterNamespace, self).__init__( - name, agent_conf, driver, use_ipv6) - - @classmethod - def _get_ns_name(cls, router_id): - return build_ns_name(NS_PREFIX, router_id) - - @check_ns_existence - def delete(self): - ns_ip = ip_lib.IPWrapper(namespace=self.name) - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(INTERNAL_DEV_PREFIX): - # device is on default bridge - self.driver.unplug(d.name, namespace=self.name, - prefix=INTERNAL_DEV_PREFIX) - elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): - ns_ip.del_veth(d.name) - elif d.name.startswith(EXTERNAL_DEV_PREFIX): - self.ovs_driver.unplug( - d.name, - bridge=self.agent_conf.external_network_bridge, - namespace=self.name, - prefix=EXTERNAL_DEV_PREFIX) - - super(RouterNamespace, self).delete() diff --git a/build/neutron/agent/l3/router_info.py b/build/neutron/agent/l3/router_info.py deleted file mode 100644 index 0ddd1db5..00000000 --- a/build/neutron/agent/l3/router_info.py +++ /dev/null @@ -1,996 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import netaddr -from oslo_log import log as logging - -from neutron._i18n import _, _LE, _LW -from neutron.agent.l3 import namespaces -from neutron.agent.linux import ip_lib -from neutron.agent.linux import iptables_manager -from neutron.agent.linux import ra -from neutron.common import constants as l3_constants -from neutron.common import exceptions as n_exc -from neutron.common import ipv6_utils -from neutron.common import utils as common_utils -from neutron.ipam import utils as ipam_utils -from neutron.agent.linux.interface import OVSInterfaceDriver - -LOG = logging.getLogger(__name__) -INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX -EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX - -FLOATINGIP_STATUS_NOCHANGE = object() -ADDRESS_SCOPE_MARK_MASK = "0xffff0000" -ADDRESS_SCOPE_MARK_ID_MIN = 1024 -ADDRESS_SCOPE_MARK_ID_MAX = 2048 -DEFAULT_ADDRESS_SCOPE = "noscope" - - -class RouterInfo(object): - - def __init__(self, - router_id, - router, - agent_conf, - interface_driver, - use_ipv6=False): - self.ovs_driver = OVSInterfaceDriver(agent_conf) - self.router_id = router_id - self.ex_gw_port = None - self._snat_enabled = None - self.fip_map = {} - self.internal_ports = [] - self.floating_ips = set() - # Invoke the setter for establishing initial SNAT action - self.router = router - self.use_ipv6 = use_ipv6 - ns = namespaces.RouterNamespace( - router_id, agent_conf, interface_driver, use_ipv6, self.ovs_driver) - self.router_namespace = ns - self.ns_name = ns.name - self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN, - ADDRESS_SCOPE_MARK_ID_MAX)) - self._address_scope_to_mark_id = { - DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()} - self.iptables_manager = iptables_manager.IptablesManager( - use_ipv6=use_ipv6, - namespace=self.ns_name) - self.routes = [] - self.agent_conf = agent_conf - self.driver = interface_driver - # radvd is a neutron.agent.linux.ra.DaemonMonitor - self.radvd = None - - def initialize(self, process_monitor): - """Initialize the router on the system. - - This differs from __init__ in that this method actually affects the - system creating namespaces, starting processes, etc. The other merely - initializes the python object. This separates in-memory object - initialization from methods that actually go do stuff to the system. - - :param process_monitor: The agent's process monitor instance. - """ - self.process_monitor = process_monitor - self.radvd = ra.DaemonMonitor(self.router_id, - self.ns_name, - process_monitor, - self.get_internal_device_name, - self.agent_conf) - - self.router_namespace.create() - - @property - def router(self): - return self._router - - @router.setter - def router(self, value): - self._router = value - if not self._router: - return - # enable_snat by default if it wasn't specified by plugin - self._snat_enabled = self._router.get('enable_snat', True) - - def get_internal_device_name(self, port_id): - return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_external_device_name(self, port_id): - return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_external_device_interface_name(self, ex_gw_port): - return self.get_external_device_name(ex_gw_port['id']) - - def _update_routing_table(self, operation, route, namespace): - cmd = ['ip', 'route', operation, 'to', route['destination'], - 'via', route['nexthop']] - ip_wrapper = ip_lib.IPWrapper(namespace=namespace) - ip_wrapper.netns.execute(cmd, check_exit_code=False) - - def update_routing_table(self, operation, route): - self._update_routing_table(operation, route, self.ns_name) - - def routes_updated(self, old_routes, new_routes): - adds, removes = common_utils.diff_list_of_dict(old_routes, - new_routes) - for route in adds: - LOG.debug("Added route entry is '%s'", route) - # remove replaced route from deleted route - for del_route in removes: - if route['destination'] == del_route['destination']: - removes.remove(del_route) - #replace success even if there is no existing route - self.update_routing_table('replace', route) - for route in removes: - LOG.debug("Removed route entry is '%s'", route) - self.update_routing_table('delete', route) - - def get_ex_gw_port(self): - return self.router.get('gw_port') - - def get_floating_ips(self): - """Filter Floating IPs to be hosted on this agent.""" - return self.router.get(l3_constants.FLOATINGIP_KEY, []) - - def floating_forward_rules(self, floating_ip, fixed_ip): - return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % - (floating_ip, fixed_ip)), - ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' % - (floating_ip, fixed_ip)), - ('float-snat', '-s %s/32 -j SNAT --to-source %s' % - (fixed_ip, floating_ip))] - - def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): - mark_traffic_to_floating_ip = ( - 'floatingip', '-d %s -j MARK --set-xmark %s' % ( - floating_ip, internal_mark)) - mark_traffic_from_fixed_ip = ( - 'FORWARD', '-s %s -j $float-snat' % fixed_ip) - return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] - - def get_address_scope_mark_mask(self, address_scope=None): - if not address_scope: - address_scope = DEFAULT_ADDRESS_SCOPE - - if address_scope not in self._address_scope_to_mark_id: - self._address_scope_to_mark_id[address_scope] = ( - self.available_mark_ids.pop()) - - mark_id = self._address_scope_to_mark_id[address_scope] - # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark - return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK) - - def get_port_address_scope_mark(self, port): - """Get the IP version 4 and 6 address scope mark for the port - - :param port: A port dict from the RPC call - :returns: A dict mapping the address family to the address scope mark - """ - port_scopes = port.get('address_scopes', {}) - - address_scope_mark_masks = ( - (int(k), self.get_address_scope_mark_mask(v)) - for k, v in port_scopes.items()) - return collections.defaultdict(self.get_address_scope_mark_mask, - address_scope_mark_masks) - - def process_floating_ip_nat_rules(self): - """Configure NAT rules for the router's floating IPs. - - Configures iptables rules for the floating ips of the given router - """ - # Clear out all iptables rules for floating ips - self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') - - floating_ips = self.get_floating_ips() - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - # Rebuild iptables rules for the floating ip. - fixed = fip['fixed_ip_address'] - fip_ip = fip['floating_ip_address'] - for chain, rule in self.floating_forward_rules(fip_ip, fixed): - self.iptables_manager.ipv4['nat'].add_rule(chain, rule, - tag='floating_ip') - - self.iptables_manager.apply() - - def process_floating_ip_address_scope_rules(self): - """Configure address scope related iptables rules for the router's - floating IPs. - """ - - # Clear out all iptables rules for floating ips - self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip') - all_floating_ips = self.get_floating_ips() - ext_scope = self._get_external_address_scope() - # Filter out the floating ips that have fixed ip in the same address - # scope. Because the packets for them will always be in one address - # scope, no need to manipulate MARK/CONNMARK for them. - floating_ips = [fip for fip in all_floating_ips - if fip.get('fixed_ip_address_scope') != ext_scope] - if floating_ips: - ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) - ports_scopemark = self._get_address_scope_mark() - devices_in_ext_scope = { - device for device, mark - in ports_scopemark[l3_constants.IP_VERSION_4].items() - if mark == ext_scope_mark} - # Add address scope for floatingip egress - for device in devices_in_ext_scope: - self.iptables_manager.ipv4['mangle'].add_rule( - 'float-snat', - '-o %s -j MARK --set-xmark %s' - % (device, ext_scope_mark), - tag='floating_ip') - - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - # Rebuild iptables rules for the floating ip. - fip_ip = fip['floating_ip_address'] - # Send the floating ip traffic to the right address scope - fixed_ip = fip['fixed_ip_address'] - fixed_scope = fip.get('fixed_ip_address_scope') - internal_mark = self.get_address_scope_mark_mask(fixed_scope) - mangle_rules = self.floating_mangle_rules( - fip_ip, fixed_ip, internal_mark) - for chain, rule in mangle_rules: - self.iptables_manager.ipv4['mangle'].add_rule( - chain, rule, tag='floating_ip') - - def process_snat_dnat_for_fip(self): - try: - self.process_floating_ip_nat_rules() - except Exception: - # TODO(salv-orlando): Less broad catching - msg = _('L3 agent failure to setup NAT for floating IPs') - LOG.exception(msg) - raise n_exc.FloatingIpSetupException(msg) - - def _add_fip_addr_to_device(self, fip, device): - """Configures the floating ip address on the device. - """ - try: - ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) - device.addr.add(ip_cidr) - return True - except RuntimeError: - # any exception occurred here should cause the floating IP - # to be set in error state - LOG.warning(_LW("Unable to configure IP address for " - "floating IP: %s"), fip['id']) - - def add_floating_ip(self, fip, interface_name, device): - raise NotImplementedError() - - def remove_floating_ip(self, device, ip_cidr): - device.delete_addr_and_conntrack_state(ip_cidr) - - def move_floating_ip(self, fip): - return l3_constants.FLOATINGIP_STATUS_ACTIVE - - def remove_external_gateway_ip(self, device, ip_cidr): - device.delete_addr_and_conntrack_state(ip_cidr) - - def get_router_cidrs(self, device): - return set([addr['cidr'] for addr in device.addr.list()]) - - def process_floating_ip_addresses(self, interface_name): - """Configure IP addresses on router's external gateway interface. - - Ensures addresses for existing floating IPs and cleans up - those that should not longer be configured. - """ - - fip_statuses = {} - if interface_name is None: - LOG.debug('No Interface for floating IPs router: %s', - self.router['id']) - return fip_statuses - - device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) - existing_cidrs = self.get_router_cidrs(device) - new_cidrs = set() - - floating_ips = self.get_floating_ips() - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - fip_ip = fip['floating_ip_address'] - ip_cidr = common_utils.ip_to_cidr(fip_ip) - new_cidrs.add(ip_cidr) - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE - if ip_cidr not in existing_cidrs: - fip_statuses[fip['id']] = self.add_floating_ip( - fip, interface_name, device) - LOG.debug('Floating ip %(id)s added, status %(status)s', - {'id': fip['id'], - 'status': fip_statuses.get(fip['id'])}) - elif (fip_ip in self.fip_map and - self.fip_map[fip_ip] != fip['fixed_ip_address']): - LOG.debug("Floating IP was moved from fixed IP " - "%(old)s to %(new)s", - {'old': self.fip_map[fip_ip], - 'new': fip['fixed_ip_address']}) - fip_statuses[fip['id']] = self.move_floating_ip(fip) - elif fip_statuses[fip['id']] == fip['status']: - # mark the status as not changed. we can't remove it because - # that's how the caller determines that it was removed - fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE - fips_to_remove = ( - ip_cidr for ip_cidr in existing_cidrs - new_cidrs - if common_utils.is_cidr_host(ip_cidr)) - for ip_cidr in fips_to_remove: - LOG.debug("Removing floating ip %s from interface %s in " - "namespace %s", ip_cidr, interface_name, self.ns_name) - self.remove_floating_ip(device, ip_cidr) - - return fip_statuses - - def configure_fip_addresses(self, interface_name): - try: - return self.process_floating_ip_addresses(interface_name) - except Exception: - # TODO(salv-orlando): Less broad catching - msg = _('L3 agent failure to setup floating IPs') - LOG.exception(msg) - raise n_exc.FloatingIpSetupException(msg) - - def put_fips_in_error_state(self): - fip_statuses = {} - for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []): - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR - return fip_statuses - - def delete(self, agent): - self.router['gw_port'] = None - self.router[l3_constants.INTERFACE_KEY] = [] - self.router[l3_constants.FLOATINGIP_KEY] = [] - self.process_delete(agent) - self.disable_radvd() - self.router_namespace.delete() - - def _internal_network_updated(self, port, subnet_id, prefix, old_prefix, - updated_cidrs): - interface_name = self.get_internal_device_name(port['id']) - if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX: - fixed_ips = port['fixed_ips'] - for fixed_ip in fixed_ips: - if fixed_ip['subnet_id'] == subnet_id: - v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'], - fixed_ip.get('prefixlen')) - if v6addr not in updated_cidrs: - self.driver.add_ipv6_addr(interface_name, v6addr, - self.ns_name) - else: - self.driver.delete_ipv6_addr_with_prefix(interface_name, - old_prefix, - self.ns_name) - - def _internal_network_added(self, ns_name, network_id, port_id, - fixed_ips, mac_address, - interface_name, prefix, mtu=None): - LOG.debug("adding internal network: prefix(%s), port(%s)", - prefix, port_id) - self.driver.plug(network_id, port_id, interface_name, mac_address, - namespace=ns_name, - prefix=prefix, mtu=mtu) - - ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips) - self.driver.init_router_port( - interface_name, ip_cidrs, namespace=ns_name) - for fixed_ip in fixed_ips: - ip_lib.send_ip_addr_adv_notif(ns_name, - interface_name, - fixed_ip['ip_address'], - self.agent_conf) - - def internal_network_added(self, port): - network_id = port['network_id'] - port_id = port['id'] - fixed_ips = port['fixed_ips'] - mac_address = port['mac_address'] - - interface_name = self.get_internal_device_name(port_id) - - self._internal_network_added(self.ns_name, - network_id, - port_id, - fixed_ips, - mac_address, - interface_name, - INTERNAL_DEV_PREFIX, - mtu=port.get('mtu')) - - def internal_network_removed(self, port): - interface_name = self.get_internal_device_name(port['id']) - LOG.debug("removing internal network: port(%s) interface(%s)", - port['id'], interface_name) - if ip_lib.device_exists(interface_name, namespace=self.ns_name): - self.driver.unplug(interface_name, namespace=self.ns_name, - prefix=INTERNAL_DEV_PREFIX) - - def _get_existing_devices(self): - ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name) - ip_devs = ip_wrapper.get_devices(exclude_loopback=True) - return [ip_dev.name for ip_dev in ip_devs] - - @staticmethod - def _get_updated_ports(existing_ports, current_ports): - updated_ports = dict() - current_ports_dict = {p['id']: p for p in current_ports} - for existing_port in existing_ports: - current_port = current_ports_dict.get(existing_port['id']) - if current_port: - if (sorted(existing_port['fixed_ips'], - key=common_utils.safe_sort_key) != - sorted(current_port['fixed_ips'], - key=common_utils.safe_sort_key)): - updated_ports[current_port['id']] = current_port - return updated_ports - - @staticmethod - def _port_has_ipv6_subnet(port): - if 'subnets' in port: - for subnet in port['subnets']: - if (netaddr.IPNetwork(subnet['cidr']).version == 6 and - subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX): - return True - - def enable_radvd(self, internal_ports=None): - LOG.debug('Spawning radvd daemon in router device: %s', self.router_id) - if not internal_ports: - internal_ports = self.internal_ports - self.radvd.enable(internal_ports) - - def disable_radvd(self): - LOG.debug('Terminating radvd daemon in router device: %s', - self.router_id) - self.radvd.disable() - - def internal_network_updated(self, interface_name, ip_cidrs): - self.driver.init_router_port( - interface_name, - ip_cidrs=ip_cidrs, - namespace=self.ns_name) - - def address_scope_mangle_rule(self, device_name, mark_mask): - return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask) - - def address_scope_filter_rule(self, device_name, mark_mask): - return '-o %s -m mark ! --mark %s -j DROP' % ( - device_name, mark_mask) - - def _process_internal_ports(self, pd): - existing_port_ids = set(p['id'] for p in self.internal_ports) - - internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) - current_port_ids = set(p['id'] for p in internal_ports - if p['admin_state_up']) - - new_port_ids = current_port_ids - existing_port_ids - new_ports = [p for p in internal_ports if p['id'] in new_port_ids] - old_ports = [p for p in self.internal_ports - if p['id'] not in current_port_ids] - updated_ports = self._get_updated_ports(self.internal_ports, - internal_ports) - - enable_ra = False - for p in new_ports: - self.internal_network_added(p) - LOG.debug("appending port %s to internal_ports cache", p) - self.internal_ports.append(p) - enable_ra = enable_ra or self._port_has_ipv6_subnet(p) - for subnet in p['subnets']: - if ipv6_utils.is_ipv6_pd_enabled(subnet): - interface_name = self.get_internal_device_name(p['id']) - pd.enable_subnet(self.router_id, subnet['id'], - subnet['cidr'], - interface_name, p['mac_address']) - - for p in old_ports: - self.internal_network_removed(p) - LOG.debug("removing port %s from internal_ports cache", p) - self.internal_ports.remove(p) - enable_ra = enable_ra or self._port_has_ipv6_subnet(p) - for subnet in p['subnets']: - if ipv6_utils.is_ipv6_pd_enabled(subnet): - pd.disable_subnet(self.router_id, subnet['id']) - - updated_cidrs = [] - if updated_ports: - for index, p in enumerate(internal_ports): - if not updated_ports.get(p['id']): - continue - self.internal_ports[index] = updated_ports[p['id']] - interface_name = self.get_internal_device_name(p['id']) - ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) - LOG.debug("updating internal network for port %s", p) - updated_cidrs += ip_cidrs - self.internal_network_updated(interface_name, ip_cidrs) - enable_ra = enable_ra or self._port_has_ipv6_subnet(p) - - # Check if there is any pd prefix update - for p in internal_ports: - if p['id'] in (set(current_port_ids) & set(existing_port_ids)): - for subnet in p.get('subnets', []): - if ipv6_utils.is_ipv6_pd_enabled(subnet): - old_prefix = pd.update_subnet(self.router_id, - subnet['id'], - subnet['cidr']) - if old_prefix: - self._internal_network_updated(p, subnet['id'], - subnet['cidr'], - old_prefix, - updated_cidrs) - enable_ra = True - - # Enable RA - if enable_ra: - self.enable_radvd(internal_ports) - - existing_devices = self._get_existing_devices() - current_internal_devs = set(n for n in existing_devices - if n.startswith(INTERNAL_DEV_PREFIX)) - current_port_devs = set(self.get_internal_device_name(port_id) - for port_id in current_port_ids) - stale_devs = current_internal_devs - current_port_devs - for stale_dev in stale_devs: - LOG.debug('Deleting stale internal router device: %s', - stale_dev) - pd.remove_stale_ri_ifname(self.router_id, stale_dev) - self.driver.unplug(stale_dev, - namespace=self.ns_name, - prefix=INTERNAL_DEV_PREFIX) - - def _list_floating_ip_cidrs(self): - # Compute a list of addresses this router is supposed to have. - # This avoids unnecessarily removing those addresses and - # causing a momentarily network outage. - floating_ips = self.get_floating_ips() - return [common_utils.ip_to_cidr(ip['floating_ip_address']) - for ip in floating_ips] - - def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name): - self.ovs_driver.plug(ex_gw_port['network_id'], - ex_gw_port['id'], - interface_name, - ex_gw_port['mac_address'], - bridge=self.agent_conf.external_network_bridge, - namespace=ns_name, - prefix=EXTERNAL_DEV_PREFIX, - mtu=ex_gw_port.get('mtu')) - - def _get_external_gw_ips(self, ex_gw_port): - gateway_ips = [] - if 'subnets' in ex_gw_port: - gateway_ips = [subnet['gateway_ip'] - for subnet in ex_gw_port['subnets'] - if subnet['gateway_ip']] - if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): - # No IPv6 gateway is available, but IPv6 is enabled. - if self.agent_conf.ipv6_gateway: - # ipv6_gateway configured, use address for default route. - gateway_ips.append(self.agent_conf.ipv6_gateway) - return gateway_ips - - def _add_route_to_gw(self, ex_gw_port, device_name, - namespace, preserve_ips): - # Note: ipv6_gateway is an ipv6 LLA - # and so doesn't need a special route - for subnet in ex_gw_port.get('subnets', []): - is_gateway_not_in_subnet = (subnet['gateway_ip'] and - not ipam_utils.check_subnet_ip( - subnet['cidr'], - subnet['gateway_ip'])) - if is_gateway_not_in_subnet: - preserve_ips.append(subnet['gateway_ip']) - device = ip_lib.IPDevice(device_name, namespace=namespace) - device.route.add_route(subnet['gateway_ip'], scope='link') - - def _external_gateway_added(self, ex_gw_port, interface_name, - ns_name, preserve_ips): - LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)", - ex_gw_port, interface_name, ns_name) - self._plug_external_gateway(ex_gw_port, interface_name, ns_name) - - # Build up the interface and gateway IP addresses that - # will be added to the interface. - ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) - - gateway_ips = self._get_external_gw_ips(ex_gw_port) - enable_ra_on_gw = False - if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): - # There is no IPv6 gw_ip, use RouterAdvt for default route. - enable_ra_on_gw = True - - self._add_route_to_gw(ex_gw_port, device_name=interface_name, - namespace=ns_name, preserve_ips=preserve_ips) - self.ovs_driver.init_router_port( - interface_name, - ip_cidrs, - namespace=ns_name, - extra_subnets=ex_gw_port.get('extra_subnets', []), - preserve_ips=preserve_ips, - clean_connections=True) - - device = ip_lib.IPDevice(interface_name, namespace=ns_name) - for ip in gateway_ips or []: - device.route.add_gateway(ip) - - if enable_ra_on_gw: - self.driver.configure_ipv6_ra(ns_name, interface_name) - - for fixed_ip in ex_gw_port['fixed_ips']: - ip_lib.send_ip_addr_adv_notif(ns_name, - interface_name, - fixed_ip['ip_address'], - self.agent_conf) - - def is_v6_gateway_set(self, gateway_ips): - """Check to see if list of gateway_ips has an IPv6 gateway. - """ - # Note - don't require a try-except here as all - # gateway_ips elements are valid addresses, if they exist. - return any(netaddr.IPAddress(gw_ip).version == 6 - for gw_ip in gateway_ips) - - def external_gateway_added(self, ex_gw_port, interface_name): - preserve_ips = self._list_floating_ip_cidrs() - self._external_gateway_added( - ex_gw_port, interface_name, self.ns_name, preserve_ips) - - def external_gateway_updated(self, ex_gw_port, interface_name): - preserve_ips = self._list_floating_ip_cidrs() - self._external_gateway_added( - ex_gw_port, interface_name, self.ns_name, preserve_ips) - - def external_gateway_removed(self, ex_gw_port, interface_name): - LOG.debug("External gateway removed: port(%s), interface(%s)", - ex_gw_port, interface_name) - device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) - for ip_addr in ex_gw_port['fixed_ips']: - self.remove_external_gateway_ip(device, - common_utils.ip_to_cidr( - ip_addr['ip_address'], - ip_addr['prefixlen'])) - self.ovs_driver.unplug(interface_name, - bridge=self.agent_conf.external_network_bridge, - namespace=self.ns_name, - prefix=EXTERNAL_DEV_PREFIX) - - @staticmethod - def _gateway_ports_equal(port1, port2): - return port1 == port2 - - def _process_external_gateway(self, ex_gw_port, pd): - # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port - ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or - self.ex_gw_port and self.ex_gw_port['id']) - - interface_name = None - if ex_gw_port_id: - interface_name = self.get_external_device_name(ex_gw_port_id) - if ex_gw_port: - if not self.ex_gw_port: - self.external_gateway_added(ex_gw_port, interface_name) - pd.add_gw_interface(self.router['id'], interface_name) - elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port): - self.external_gateway_updated(ex_gw_port, interface_name) - elif not ex_gw_port and self.ex_gw_port: - self.external_gateway_removed(self.ex_gw_port, interface_name) - pd.remove_gw_interface(self.router['id']) - - existing_devices = self._get_existing_devices() - stale_devs = [dev for dev in existing_devices - if dev.startswith(EXTERNAL_DEV_PREFIX) - and dev != interface_name] - for stale_dev in stale_devs: - LOG.debug('Deleting stale external router device: %s', stale_dev) - pd.remove_gw_interface(self.router['id']) - self.ovs_driver.unplug(stale_dev, - bridge=self.agent_conf.external_network_bridge, - namespace=self.ns_name, - prefix=EXTERNAL_DEV_PREFIX) - - # Process SNAT rules for external gateway - gw_port = self._router.get('gw_port') - self._handle_router_snat_rules(gw_port, interface_name) - - def _prevent_snat_for_internal_traffic_rule(self, interface_name): - return ( - 'POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name}) - - def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name): - dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = ( - self._prevent_snat_for_internal_traffic_rule(interface_name)) - # Makes replies come back through the router to reverse DNAT - ext_in_mark = self.agent_conf.external_ingress_mark - snat_internal_traffic_to_floating_ip = ( - 'snat', '-m mark ! --mark %s/%s ' - '-m conntrack --ctstate DNAT ' - '-j SNAT --to-source %s' - % (ext_in_mark, l3_constants.ROUTER_MARK_MASK, ex_gw_ip)) - return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip, - snat_internal_traffic_to_floating_ip] - - def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name): - snat_normal_external_traffic = ( - 'snat', '-o %s -j SNAT --to-source %s' % - (interface_name, ex_gw_ip)) - return [snat_normal_external_traffic] - - def external_gateway_mangle_rules(self, interface_name): - mark = self.agent_conf.external_ingress_mark - mark_packets_entering_external_gateway_port = ( - 'mark', '-i %s -j MARK --set-xmark %s/%s' % - (interface_name, mark, l3_constants.ROUTER_MARK_MASK)) - return [mark_packets_entering_external_gateway_port] - - def _empty_snat_chains(self, iptables_manager): - iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') - iptables_manager.ipv4['nat'].empty_chain('snat') - iptables_manager.ipv4['mangle'].empty_chain('mark') - iptables_manager.ipv4['mangle'].empty_chain('POSTROUTING') - - def _add_snat_rules(self, ex_gw_port, iptables_manager, - interface_name): - self.process_external_port_address_scope_routing(iptables_manager) - - if ex_gw_port: - # ex_gw_port should not be None in this case - # NAT rules are added only if ex_gw_port has an IPv4 address - for ip_addr in ex_gw_port['fixed_ips']: - ex_gw_ip = ip_addr['ip_address'] - if netaddr.IPAddress(ex_gw_ip).version == 4: - if self._snat_enabled: - rules = self.external_gateway_nat_snat_rules( - ex_gw_ip, interface_name) - for rule in rules: - iptables_manager.ipv4['nat'].add_rule(*rule) - - rules = self.external_gateway_nat_fip_rules( - ex_gw_ip, interface_name) - for rule in rules: - iptables_manager.ipv4['nat'].add_rule(*rule) - rules = self.external_gateway_mangle_rules(interface_name) - for rule in rules: - iptables_manager.ipv4['mangle'].add_rule(*rule) - - break - - def _handle_router_snat_rules(self, ex_gw_port, interface_name): - self._empty_snat_chains(self.iptables_manager) - - self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - - self._add_snat_rules(ex_gw_port, - self.iptables_manager, - interface_name) - - def _process_external_on_delete(self, agent): - fip_statuses = {} - try: - ex_gw_port = self.get_ex_gw_port() - self._process_external_gateway(ex_gw_port, agent.pd) - if not ex_gw_port: - return - - interface_name = self.get_external_device_interface_name( - ex_gw_port) - fip_statuses = self.configure_fip_addresses(interface_name) - - except (n_exc.FloatingIpSetupException): - # All floating IPs must be put in error state - LOG.exception(_LE("Failed to process floating IPs.")) - fip_statuses = self.put_fips_in_error_state() - finally: - self.update_fip_statuses(agent, fip_statuses) - - def process_external(self, agent): - fip_statuses = {} - try: - with self.iptables_manager.defer_apply(): - ex_gw_port = self.get_ex_gw_port() - self._process_external_gateway(ex_gw_port, agent.pd) - if not ex_gw_port: - return - - # Process SNAT/DNAT rules and addresses for floating IPs - self.process_snat_dnat_for_fip() - - # Once NAT rules for floating IPs are safely in place - # configure their addresses on the external gateway port - interface_name = self.get_external_device_interface_name( - ex_gw_port) - fip_statuses = self.configure_fip_addresses(interface_name) - - except (n_exc.FloatingIpSetupException, - n_exc.IpTablesApplyException): - # All floating IPs must be put in error state - LOG.exception(_LE("Failed to process floating IPs.")) - fip_statuses = self.put_fips_in_error_state() - finally: - self.update_fip_statuses(agent, fip_statuses) - - def update_fip_statuses(self, agent, fip_statuses): - # Identify floating IPs which were disabled - existing_floating_ips = self.floating_ips - self.floating_ips = set(fip_statuses.keys()) - for fip_id in existing_floating_ips - self.floating_ips: - fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN - # filter out statuses that didn't change - fip_statuses = {f: stat for f, stat in fip_statuses.items() - if stat != FLOATINGIP_STATUS_NOCHANGE} - if not fip_statuses: - return - LOG.debug('Sending floating ip statuses: %s', fip_statuses) - # Update floating IP status on the neutron server - agent.plugin_rpc.update_floatingip_statuses( - agent.context, self.router_id, fip_statuses) - - def _get_port_devicename_scopemark(self, ports, name_generator): - devicename_scopemark = {l3_constants.IP_VERSION_4: dict(), - l3_constants.IP_VERSION_6: dict()} - for p in ports: - device_name = name_generator(p['id']) - ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) - port_as_marks = self.get_port_address_scope_mark(p) - for ip_version in {ip_lib.get_ip_version(cidr) - for cidr in ip_cidrs}: - devicename_scopemark[ip_version][device_name] = ( - port_as_marks[ip_version]) - - return devicename_scopemark - - def _get_address_scope_mark(self): - # Prepare address scope iptables rule for internal ports - internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) - ports_scopemark = self._get_port_devicename_scopemark( - internal_ports, self.get_internal_device_name) - - # Prepare address scope iptables rule for external port - external_port = self.get_ex_gw_port() - if external_port: - external_port_scopemark = self._get_port_devicename_scopemark( - [external_port], self.get_external_device_name) - for ip_version in (l3_constants.IP_VERSION_4, - l3_constants.IP_VERSION_6): - ports_scopemark[ip_version].update( - external_port_scopemark[ip_version]) - return ports_scopemark - - def _add_address_scope_mark(self, iptables_manager, ports_scopemark): - external_device_name = None - external_port = self.get_ex_gw_port() - if external_port: - external_device_name = self.get_external_device_name( - external_port['id']) - - # Process address scope iptables rules - for ip_version in (l3_constants.IP_VERSION_4, - l3_constants.IP_VERSION_6): - scopemarks = ports_scopemark[ip_version] - iptables = iptables_manager.get_tables(ip_version) - iptables['mangle'].empty_chain('scope') - iptables['filter'].empty_chain('scope') - dont_block_external = (ip_version == l3_constants.IP_VERSION_4 - and self._snat_enabled and external_port) - for device_name, mark in scopemarks.items(): - # Add address scope iptables rule - iptables['mangle'].add_rule( - 'scope', - self.address_scope_mangle_rule(device_name, mark)) - if dont_block_external and device_name == external_device_name: - continue - iptables['filter'].add_rule( - 'scope', - self.address_scope_filter_rule(device_name, mark)) - - def process_ports_address_scope_iptables(self): - ports_scopemark = self._get_address_scope_mark() - self._add_address_scope_mark(self.iptables_manager, ports_scopemark) - - def _get_external_address_scope(self): - external_port = self.get_ex_gw_port() - if not external_port: - return - - scopes = external_port.get('address_scopes', {}) - return scopes.get(str(l3_constants.IP_VERSION_4)) - - def process_external_port_address_scope_routing(self, iptables_manager): - if not self._snat_enabled: - return - - external_port = self.get_ex_gw_port() - if not external_port: - return - - external_devicename = self.get_external_device_name( - external_port['id']) - - # Saves the originating address scope by saving the packet MARK to - # the CONNMARK for new connections so that returning traffic can be - # match to it. - rule = ('-o %s -m connmark --mark 0x0/0xffff0000 ' - '-j CONNMARK --save-mark ' - '--nfmask 0xffff0000 --ctmask 0xffff0000' % - external_devicename) - - iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', rule) - - address_scope = self._get_external_address_scope() - if not address_scope: - return - - # Prevents snat within the same address scope - rule = '-o %s -m connmark --mark %s -j ACCEPT' % ( - external_devicename, - self.get_address_scope_mark_mask(address_scope)) - iptables_manager.ipv4['nat'].add_rule('snat', rule) - - def process_address_scope(self): - with self.iptables_manager.defer_apply(): - self.process_ports_address_scope_iptables() - self.process_floating_ip_address_scope_rules() - - @common_utils.exception_logger() - def process_delete(self, agent): - """Process the delete of this router - - This method is the point where the agent requests that this router - be deleted. This is a separate code path from process in that it - avoids any changes to the qrouter namespace that will be removed - at the end of the operation. - - :param agent: Passes the agent in order to send RPC messages. - """ - LOG.debug("process router delete") - if self.router_namespace.exists(): - self._process_internal_ports(agent.pd) - agent.pd.sync_router(self.router['id']) - self._process_external_on_delete(agent) - else: - LOG.warning(_LW("Can't gracefully delete the router %s: " - "no router namespace found."), self.router['id']) - - @common_utils.exception_logger() - def process(self, agent): - """Process updates to this router - - This method is the point where the agent requests that updates be - applied to this router. - - :param agent: Passes the agent in order to send RPC messages. - """ - LOG.debug("process router updates") - self._process_internal_ports(agent.pd) - agent.pd.sync_router(self.router['id']) - self.process_external(agent) - self.process_address_scope() - # Process static routes for router - self.routes_updated(self.routes, self.router['routes']) - self.routes = self.router['routes'] - - # Update ex_gw_port and enable_snat on the router info cache - self.ex_gw_port = self.get_ex_gw_port() - self.fip_map = dict([(fip['floating_ip_address'], - fip['fixed_ip_address']) - for fip in self.get_floating_ips()]) - # TODO(Carl) FWaaS uses this. Why is it set after processing is done? - self.enable_snat = self.router.get('enable_snat') diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2 index c7d0a1b8..920e9a1b 100644 --- a/build/nics-template.yaml.jinja2 +++ b/build/nics-template.yaml.jinja2 @@ -174,8 +174,11 @@ resources: use_dhcp: false {%- else %} - - type: interface + type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }} name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }} + {%- if 'uio_driver' in nets['tenant']['nic_mapping'][role] %} + uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }} + {%- endif %} use_dhcp: false addresses: - diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml index 7e0f8017..3781e791 100644 --- a/build/opnfv-environment.yaml +++ b/build/opnfv-environment.yaml @@ -2,17 +2,16 @@ #types parameters: -# CloudDomain: + #CloudDomain: parameter_defaults: CeilometerStoreEvents: true NeutronEnableForceMetadata: true NeutronEnableDHCPMetadata: true NeutronEnableIsolatedMetadata: true - OvercloudControlFlavor: control - OvercloudComputeFlavor: compute - controllerImage: overcloud-full - + #NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter" + # Kernel arguments, this value will be set to kernel arguments specified for compute nodes in deploy setting file. + #ComputeKernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=2MB hugepagesz=2MB hugepages=2048" ExtraConfig: tripleo::ringbuilder::build_ring: False nova::nova_public_key: @@ -26,6 +25,8 @@ parameter_defaults: key: 'os_compute_api:servers:show:host_status' value: 'rule:admin_or_owner' nova::api::default_floating_pool: 'external' + #neutron::agents::dhcp::interface_driver: "neutron.agent.linux.interface.NSDriver" + #neutron::agents::l3::interface_driver: "neutron.agent.linux.interface.NSDriver" ControllerServices: - OS::TripleO::Services::CACerts # - OS::TripleO::Services::CephClient @@ -106,6 +107,7 @@ parameter_defaults: - OS::TripleO::Services::Etcd - OS::TripleO::Services::Gluon - OS::TripleO::Services::Tacker + - OS::TripleO::Services::NeutronHoneycombAgent ComputeServices: - OS::TripleO::Services::CACerts - OS::TripleO::Services::CephClient @@ -129,3 +131,4 @@ parameter_defaults: - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::VipHosts + - OS::TripleO::Services::NeutronHoneycombAgent diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh index db695daf..a360689b 100755 --- a/build/overcloud-full.sh +++ b/build/overcloud-full.sh @@ -31,8 +31,7 @@ git archive --format=tar.gz --prefix=tripleo/ HEAD > ${BUILD_DIR}/opnfv-puppet-t popd > /dev/null # download customized os-net-config -rm -fr os-net-config -git clone https://github.com/trozet/os-net-config.git -b stable/danube +clone_fork os-net-config pushd os-net-config/os_net_config > /dev/null git archive --format=tar.gz --prefix=os_net_config/ HEAD > ${BUILD_DIR}/os-net-config.tar.gz popd > /dev/null @@ -135,16 +134,11 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --upload ${BUILD_DIR}/noarch/$tackerclient_pkg:/root/ \ --install /root/$tackerclient_pkg \ --run-command "pip install python-senlinclient" \ - --upload ${BUILD_ROOT}/neutron/agent/interface/interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \ - --run-command "mkdir /root/fdio_neutron_l3" \ - --upload ${BUILD_ROOT}/neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \ - --upload ${BUILD_ROOT}/neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \ - --upload ${BUILD_ROOT}/puppet-neutron/manifests/agents/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/agents/ml2/ \ - --upload ${BUILD_ROOT}/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \ - --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb:/etc/puppet/modules/neutron/lib/puppet/type/ \ - --mkdir /etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp \ - --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb:/etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp/ \ --run-command "sed -i -E 's/timeout=[0-9]+/timeout=60/g' /usr/share/openstack-puppet/modules/rabbitmq/lib/puppet/provider/rabbitmqctl.rb" \ + --upload ${BUILD_ROOT}/neutron-patch-NSDriver.patch:/usr/lib/python2.7/site-packages/ \ + --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \ + --upload ${BUILD_ROOT}/puppet-neutron-add-odl-settings.patch:/usr/share/openstack-puppet/modules/neutron/ \ + --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-add-odl-settings.patch" \ -a overcloud-full_build.qcow2 mv -f overcloud-full_build.qcow2 overcloud-full.qcow2 diff --git a/build/puppet-neutron-add-odl-settings.patch b/build/puppet-neutron-add-odl-settings.patch new file mode 100644 index 00000000..aa0b35a1 --- /dev/null +++ b/build/puppet-neutron-add-odl-settings.patch @@ -0,0 +1,47 @@ +diff --git a/manifests/plugins/ml2/opendaylight.pp b/manifests/plugins/ml2/opendaylight.pp +index a27c4d6..13b56c4 100644 +--- a/manifests/plugins/ml2/opendaylight.pp ++++ b/manifests/plugins/ml2/opendaylight.pp +@@ -29,12 +29,22 @@ + # (optional) The URI used to connect to the local OVSDB server + # Defaults to 'tcp:127.0.0.1:6639' + # ++# [*port_binding_controller*] ++# (optional) Name of the controller to be used for port binding. ++# Defaults to $::os_service_default ++# ++# [*odl_hostconf_uri*] ++# (optional) Path for ODL host configuration REST interface. ++# Defaults to $::os_service_default ++# + class neutron::plugins::ml2::opendaylight ( +- $package_ensure = 'present', +- $odl_username = $::os_service_default, +- $odl_password = $::os_service_default, +- $odl_url = $::os_service_default, +- $ovsdb_connection = 'tcp:127.0.0.1:6639', ++ $package_ensure = 'present', ++ $odl_username = $::os_service_default, ++ $odl_password = $::os_service_default, ++ $odl_url = $::os_service_default, ++ $ovsdb_connection = 'tcp:127.0.0.1:6639', ++ $port_binding_controller = $::os_service_default, ++ $odl_hostconf_uri = $::os_service_default, + ) { + + include ::neutron::deps +@@ -48,9 +58,11 @@ class neutron::plugins::ml2::opendaylight ( + ) + + neutron_plugin_ml2 { +- 'ml2_odl/username': value => $odl_username; +- 'ml2_odl/password': value => $odl_password; +- 'ml2_odl/url': value => $odl_url; ++ 'ml2_odl/username': value => $odl_username; ++ 'ml2_odl/password': value => $odl_password; ++ 'ml2_odl/url': value => $odl_url; ++ 'ml2_odl/port_binding_controller': value => $port_binding_controller; ++ 'ml2_odl/odl_hostconf_uri': value => $odl_hostconf_uri; + } + + neutron_config { diff --git a/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb b/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb deleted file mode 100644 index 595904ce..00000000 --- a/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb +++ /dev/null @@ -1,15 +0,0 @@ -Puppet::Type.type(:neutron_agent_vpp).provide( - :ini_setting, - :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting) -) do - - def self.file_path - '/etc/neutron/plugins/ml2/vpp_agent.ini' - end - - # added for backwards compatibility with older versions of inifile - def file_path - self.class.file_path - end - -end
\ No newline at end of file diff --git a/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb b/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb deleted file mode 100644 index f43a8b41..00000000 --- a/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb +++ /dev/null @@ -1,28 +0,0 @@ -Puppet::Type.newtype(:neutron_agent_vpp) do - - ensurable - - newparam(:name, :namevar => true) do - desc 'Section/setting name to manage from vpp agent config.' - newvalues(/\S+\/\S+/) - end - - newproperty(:value) do - desc 'The value of the setting to be defined.' - munge do |value| - value = value.to_s.strip - value.capitalize! if value =~ /^(true|false)$/i - value - end - end - - newparam(:ensure_absent_val) do - desc 'A value that is specified as the value property will behave as if ensure => absent was specified' - defaultto('<SERVICE DEFAULT>') - end - - autorequire(:package) do - 'networking-vpp' - end - -end
\ No newline at end of file diff --git a/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp b/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp deleted file mode 100644 index 6184e006..00000000 --- a/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp +++ /dev/null @@ -1,65 +0,0 @@ -# == Class: neutron::agents::ml2::networking-vpp -# -# Setups networking-vpp Neutron agent for ML2 plugin. -# -# === Parameters -# -# [*package_ensure*] -# (optional) Package ensure state. -# Defaults to 'present'. -# -# [*enabled*] -# (required) Whether or not to enable the agent. -# Defaults to true. -# -# [*manage_service*] -# (optional) Whether to start/stop the service -# Defaults to true -# -# [*physnets*] -# List of <physical_network>:<physical_interface> -# tuples mapping physical network names to agent's node-specific physical -# network interfaces. Defaults to empty list. -# -# [*etcd_host*] -# etcd server host name/ip -# Defaults to 127.0.0.1. -# -# [*etcd_port*] -# etcd server listening port. -# Defaults to 4001. -# -class neutron::agents::ml2::networking-vpp ( - $package_ensure = 'present', - $enabled = true, - $manage_service = true, - $physnets = '', - $etcd_host = '127.0.0.1', - $etcd_port = 4001, -) { - - include ::neutron::params - - Neutron_agent_vpp<||> ~> Service['networking-vpp-agent'] - - neutron_agent_vpp { - 'ml2_vpp/physnets': value => $physnets; - 'ml2_vpp/etcd_host': value => $etcd_host; - 'ml2_vpp/etcd_port': value => $etcd_port; - 'DEFAULT/host': value => $::fqdn; - } - - if $manage_service { - if $enabled { - $service_ensure = 'running' - } else { - $service_ensure = 'stopped' - } - } - - service { 'networking-vpp-agent': - ensure => $service_ensure, - name => 'networking-vpp-agent', - enable => $enabled, - } -}
\ No newline at end of file diff --git a/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp b/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp deleted file mode 100644 index cf8fe178..00000000 --- a/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp +++ /dev/null @@ -1,51 +0,0 @@ -# -# Install the networking-vpp ML2 mechanism driver and generate config file -# from parameters in the other classes. -# -# === Parameters -# -# [*package_ensure*] -# (optional) The intended state of the networking-vpp -# package, i.e. any of the possible values of the 'ensure' -# property for a package resource type. -# Defaults to 'present' -# -# [*etcd_host*] -# (required) etcd server host name or IP. -# Defaults to '127.0.0.1' -# -# [*etcd_port*] -# (optional) etcd server listening port. -# Defaults to 4001. -# -# [*etcd_user*] -# (optional) User name for etcd authentication -# Defaults to ''. -# -# [*etcd_pass*] -# (optional) Password for etcd authentication -# Defaults to ''. -# -class neutron::plugins::ml2::networking-vpp ( - $package_ensure = 'present', - $etcd_host = '127.0.0.1', - $etcd_port = 4001, - $etcd_user = '', - $etcd_pass = '', -) { - require ::neutron::plugins::ml2 - - ensure_resource('package', 'networking-vpp', - { - ensure => $package_ensure, - tag => 'openstack', - } - ) - - neutron_plugin_ml2 { - 'ml2_vpp/etcd_host': value => $etcd_host; - 'ml2_vpp/etcd_port': value => $etcd_port; - 'ml2_vpp/etcd_user': value => $etcd_user; - 'ml2_vpp/etcd_pass': value => $etcd_pass; - } -} diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec index be181c0a..ec0735fc 100644 --- a/build/rpm_specs/opnfv-apex-common.spec +++ b/build/rpm_specs/opnfv-apex-common.spec @@ -56,6 +56,7 @@ install config/deploy/os-onos-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/ install config/deploy/os-ocl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml install config/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml install config/network/network_settings_v6.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml +install config/network/network_settings_vpp.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/python/apex @@ -92,6 +93,7 @@ install docs/release/release-notes/release-notes.html %{buildroot}%{_docdir}/opn install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example install config/network/network_settings_v6.yaml %{buildroot}%{_docdir}/opnfv/network_settings_v6.yaml.example +install config/network/network_settings_vpp.yaml %{buildroot}%{_docdir}/opnfv/network_settings_vpp.yaml.example install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/inventory.yaml.example %files @@ -131,6 +133,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml %{_sysconfdir}/opnfv-apex/network_settings.yaml %{_sysconfdir}/opnfv-apex/network_settings_v6.yaml +%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml %doc %{_docdir}/opnfv/LICENSE.rst %doc %{_docdir}/opnfv/installation-instructions.html %doc %{_docdir}/opnfv/release-notes.rst @@ -138,9 +141,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %doc %{_docdir}/opnfv/deploy_settings.yaml.example %doc %{_docdir}/opnfv/network_settings.yaml.example %doc %{_docdir}/opnfv/network_settings_v6.yaml.example +%doc %{_docdir}/opnfv/network_settings_vpp.yaml.example %doc %{_docdir}/opnfv/inventory.yaml.example %changelog +* Tue Feb 14 2017 Feng Pan <fpan@redhat.com> - 4.0-4 +- Add network_settings_vpp.yaml * Fri Feb 3 2017 Nikolas Hermanns <nikolas.hermanns@ericsson.com> - 4.0-3 - change odl_l3-gluon-noha to odl-gluon-noha * Thu Feb 2 2017 Feng Pan <fpan@redhat.com> - 4.0-2 diff --git a/build/set_perf_images.sh b/build/set_perf_images.sh deleted file mode 100644 index d91c20ec..00000000 --- a/build/set_perf_images.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -############################################################################## -# Copyright (c) 2016 Red Hat Inc. -# Michael Chapman <michapma@redhat.com>, Tim Rozet <trozet@redhat.com> -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -for ROLE in $@; do - RAMDISK=${ROLE}-bm-deploy-ramdisk - - if [ -f $ROLE-overcloud-full.qcow2 ]; then - echo "Uploading ${RAMDISK}" - glance image-create --name ${RAMDISK} --disk-format ari --container-format ari --file ${ROLE}-ironic-python-agent.initramfs --is-public True - echo "Uploading $ROLE-overcloud-full.qcow2 " - KERNEL=$(glance image-show overcloud-full | grep 'kernel_id' | cut -d '|' -f 3 | xargs) - RAMDISK_ID=$(glance image-show ${RAMDISK} | grep id | awk {'print $4'}) - glance image-create --name $ROLE-overcloud-full --disk-format qcow2 --file $ROLE-overcloud-full.qcow2 --container-format bare --property ramdisk_id=$RAMDISK_ID --property kernel_id=$KERNEL --is-public True - rm -f $ROLE-overcloud-full.qcow2 - fi - - if [ "$ROLE" == "Controller" ]; then - sed -i "s/overcloud-full/Controller-overcloud-full/" opnfv-environment.yaml - sed -i '/OvercloudControlFlavor:/c\ OvercloudControlFlavor: control' opnfv-environment.yaml - fi - - if [ "$ROLE" == "Compute" ]; then - sudo sed -i "s/NovaImage: .*/NovaImage: Compute-overcloud-full/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - sudo sed -i '/OvercloudComputeFlavor:/c\ OvercloudComputeFlavor: compute' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi - - if [ "$ROLE" == "BlockStorage" ]; then - sudo sed -i "s/BlockStorageImage: .*/BlockStorageImage: BlockStorage-overcloud-full/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml - fi - - RAMDISK_ID=$(glance image-show ${RAMDISK} | grep id | awk {'print $4'}) - nodes=$(ironic node-list | awk {'print $2'} | grep -Eo [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}) - role=$(echo $ROLE | awk '{print tolower($0)}') - if [ "$role" == "controller" ]; then - role="control" - fi - for node in $nodes; do - if ironic node-show $node | grep profile:${role}; then - ironic node-update $node replace driver_info/deploy_ramdisk=${RAMDISK_ID} - fi - done -done diff --git a/build/undercloud.sh b/build/undercloud.sh index b27b9108..dbe7d2f6 100755 --- a/build/undercloud.sh +++ b/build/undercloud.sh @@ -24,10 +24,6 @@ pushd opnfv-tht > /dev/null git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ${BUILD_DIR}/opnfv-tht.tar.gz popd > /dev/null -# Add custom IPA to allow kernel params -curl -fO https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py -python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")' - # installing forked opnfv-tht # enabling ceph OSDs to live on the controller # OpenWSMan package update supports the AMT Ironic driver for the TealBox @@ -51,6 +47,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --run-command "yum update -y openwsman*" \ --run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \ --upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \ + --upload ${BUILD_ROOT}/first-boot.yaml:/home/stack/ \ --upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \ --upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \ --install "python2-congressclient" \ @@ -65,10 +62,6 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --install "openstack-heat-engine" \ --install "openstack-heat-api-cfn" \ --install "openstack-heat-api" \ - --upload ${BUILD_ROOT}/build_perf_image.sh:/home/stack \ - --upload ${BUILD_ROOT}/set_perf_images.sh:/home/stack \ - --upload ${BUILD_DIR}/image.py:/root \ - --upload ${BUILD_DIR}/image.pyc:/root \ --upload ${BUILD_ROOT}/0001-Removes-doing-yum-update.patch:/usr/lib/python2.7/site-packages/ \ --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < 0001-Removes-doing-yum-update.patch" \ --root-password password:stack \ diff --git a/config/network/network_settings_vpp.yaml b/config/network/network_settings_vpp.yaml new file mode 100644 index 00000000..4b50745f --- /dev/null +++ b/config/network/network_settings_vpp.yaml @@ -0,0 +1,221 @@ +# This configuration file defines Network Environment for a +# Baremetal Deployment of OPNFV. It contains default values +# for 5 following networks: +# +# - admin +# - tenant* +# - external* +# - storage* +# - api* +# *) optional networks +# +# Optional networks will be consolidated with the admin network +# if not explicitly configured. +# +# See short description of the networks in the comments below. +# +# "admin" is the short name for Control Plane Network. +# This network should be IPv4 even it is an IPv6 deployment +# IPv6 does not have PXE boot support. +# During OPNFV deployment it is used for node provisioning which will require +# PXE booting as well as running a DHCP server on this network. Be sure to +# disable any other DHCP/TFTP server on this network. +# +# "tenant" is the network used for tenant traffic. +# +# "external" is the network which should have internet or external +# connectivity. External OpenStack networks will be configured to egress this +# network. There can be multiple external networks, but only one assigned as +# "public" which OpenStack public API's will register. +# +# "storage" is the network for storage I/O. +# +# "api" is an optional network for splitting out OpenStack service API +# communication. This should be used for IPv6 deployments. + + +#Meta data for the network configuration +network-config-metadata: + title: LF-POD-1 Network config + version: 0.1 + created: Mon Dec 28 2015 + comment: None + +# DNS Settings +dns-domain: opnfvlf.org +dns-search: opnfvlf.org +dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 +# NTP servers +ntp: + - 0.se.pool.ntp.org + - 1.se.pool.ntp.org +# Syslog server +syslog: + server: 10.128.1.24 + transport: 'tcp' + +# Common network settings +networks: # Network configurations + admin: # Admin configuration (pxe and jumpstart), + enabled: true + installer_vm: # Network settings for the Installer VM on admin network + nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond + members: + - em1 # Member Interface to bridge to for installer VM (use multiple values for bond) + vlan: native # VLAN tag to use for this network on Installer VM, native means none + ip: 192.0.2.1 # IP to assign to Installer VM on this network + overcloud_ip_range: + - 192.0.2.11 + - 192.0.2.99 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be + # used for host bridge (i.e. br-admin). If empty entire range is usable. + # Cannot overlap with dhcp_range or introspection_range. + gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled) + cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24 + dhcp_range: + - 192.0.2.2 + - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: interface # Physical interface type (interface or bond) + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth0 + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: interface + members: + - eth0 + # + tenant: # Tenant network configuration + enabled: true + cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24 + mtu: 1500 # Tenant network MTU + overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range: + # VNI, VLAN-ID, etc. + segmentation_type: vxlan # Tenant network segmentation type: + # vlan, vxlan, gre + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: vpp_interface # Physical interface type (interface/vpp_interface) + uio_driver: uio_pci_generic # uio driver, for vpp interfaces only + vlan: native # VLAN tag to use with this NIC + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth1 # Note that logic nic name like nic1 cannot be used for fdio deployment yet. + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: vpp_interface # Physical interface type (interface/vpp_interface) + uio_driver: uio_pci_generic # uio driver, for vpp interfaces only + vlan: native + members: + - eth1 # Note that logic nic name like nic1 cannot be used for fdio deployment yet. + # + external: # Can contain 1 or more external networks + - public: # "public" network will be the network the installer VM attaches to + enabled: true + mtu: 1500 # Public network MTU + installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network) + nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond + vlan: native + members: + - em1 # Member Interface to bridge to for installer VM (use multiple values for bond) + ip: 192.168.37.1 # IP to assign to Installer VM on this network + cidr: 192.168.37.0/24 + gateway: 192.168.37.1 + floating_ip_range: + - 192.168.37.200 + - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron + overcloud_ip_range: + - 192.168.37.10 + - 192.168.37.199 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host + # bridge (i.e. br-public). If empty entire range is usable. Cannot overlap with dhcp_range or introspection_range. + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: interface # Physical interface type (interface or bond) + vlan: native # VLAN tag to use with this NIC + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth2 + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: interface + vlan: native + members: + - eth2 + external_overlay: # External network to be created in OpenStack by Services tenant + name: Public_internet + type: flat + gateway: 192.168.37.1 + - private_cloud: # another external network + enabled: false + mtu: 1500 + installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network) + nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond + vlan: 101 + members: + - em1 # Member Interface to bridge to for installer VM (use multiple values for bond) + ip: 192.168.38.1 # IP to assign to Installer VM on this network + cidr: 192.168.38.0/24 + gateway: 192.168.38.1 + floating_ip_range: + - 192.168.38.200 + - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron + overcloud_ip_range: + - 192.168.38.10 + - 192.168.38.199 # Usable IP range for overcloud nodes (including VIPs), usually this is a shared subnet. + # Cannot overlap with dhcp_range or introspection_range. + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: interface # Physical interface type (interface or bond) + vlan: 101 # VLAN tag to use with this NIC + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth3 # Note that logic nic name like nic1 cannot be used for fdio deployment yet. + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: interface + vlan: 101 + members: + - eth3 + external_overlay: # External network to be created in OpenStack by Services tenant + name: private_cloud + type: vlan + segmentation_id: 101 + gateway: 192.168.38.1 + # + storage: # Storage network configuration + enabled: true + cidr: 12.0.0.0/24 # Subnet in CIDR format + mtu: 1500 # Storage network MTU + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: interface # Physical interface type (interface or bond) + vlan: native # VLAN tag to use with this NIC + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth3 # Note that logic nic name like nic1 cannot be used for fdio deployment yet. + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: interface + vlan: native + members: + - eth3 + # + api: # API network configuration + enabled: false + cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format + vlan: 13 # VLAN tag to use for Overcloud hosts on this network + mtu: 1500 # Api network MTU + nic_mapping: # Mapping of network configuration for Overcloud Nodes + compute: # Mapping for compute profile (nodes that will be used as Compute nodes) + phys_type: interface # Physical interface type (interface or bond) + vlan: native # VLAN tag to use with this NIC + members: # Physical NIC members of this mapping (Single value allowed for interface phys_type) + - eth4 # Note that logic nic name like nic1 cannot be used for fdio deployment yet. + controller: # Mapping for controller profile (nodes that will be used as Controller nodes) + phys_type: interface + vlan: native + members: + - eth4 + +# Apex specific settings +apex: + networks: + admin: + introspection_range: + - 192.0.2.100 + - 192.0.2.120 # Range used for introspection phase (examining nodes). This cannot overlap with dhcp_range or overcloud_ip_range. + # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range + # for the overcloud default external network diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst index a5d52f0c..da5aac92 100644 --- a/docs/release/release-notes/release-notes.rst +++ b/docs/release/release-notes/release-notes.rst @@ -279,6 +279,9 @@ Known Issues | JIRA: APEX-138 | Unclear error message when interface | | | set to dhcp | +--------------------------------------+--------------------------------------+ +| JIRA: APEX-389 (Danube) | Compute kernel parameters are used | +| | for all nodes | ++--------------------------------------+--------------------------------------+ Workarounds diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh index f6522b8a..2df5fb63 100755 --- a/lib/overcloud-deploy-functions.sh +++ b/lib/overcloud-deploy-functions.sh @@ -23,7 +23,7 @@ function overcloud_deploy { DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/services/gluon.yaml" fi elif [ "${deploy_options_array['vpp']}" == 'True' ]; then - DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_fdio.yaml" + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-honeycomb.yaml" else DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-l3.yaml" fi @@ -92,7 +92,6 @@ EOF -a overcloud-full.qcow2 if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then - sudo sed -i '/NeutronOVSDataPathType:/c\ NeutronOVSDataPathType: netdev' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/dpdk_rpms/*" \ --run-command "sed -i '/RuntimeDirectoryMode=.*/d' /usr/lib/systemd/system/openvswitch-nonetwork.service" \ --run-command "printf \"%s\\n\" RuntimeDirectoryMode=0775 Group=qemu UMask=0002 >> /usr/lib/systemd/system/openvswitch-nonetwork.service" \ @@ -120,6 +119,29 @@ EOI EOI fi + if [ -n "${deploy_options_array['performance']}" ]; then + for option in "${performance_options[@]}" ; do + arr=($option) + # use compute's kernel settings for all nodes for now. + if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "kernel" ]; then + kernel_args+=" ${arr[2]}=${arr[3]}" + fi + done + + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI + sed -i "/ComputeKernelArgs:/c\ ComputeKernelArgs: '$kernel_args'" ${ENV_FILE} + sed -i "$ a\resource_registry:\n OS::TripleO::NodeUserData: first-boot.yaml" ${ENV_FILE} + sed -i "/NovaSchedulerDefaultFilters:/c\ NovaSchedulerDefaultFilters: 'RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter'" ${ENV_FILE} +EOI + fi + + if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['dataplane']}" == 'fdio' ]]; then + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI + sed -i "/neutron::agents::dhcp::interface_driver:/c\ neutron::agents::dhcp::interface_driver: neutron.agent.linux.interface.NSDriver" ${ENV_FILE} + sed -i "/neutron::agents::l3::interface_driver:/c\ neutron::agents::l3::interface_driver: neutron.agent.linux.interface.NSDriver" ${ENV_FILE} +EOI + fi + # Set ODL version accordingly if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then case "${deploy_options_array['odl_version']}" in @@ -141,73 +163,6 @@ EOI EOI fi - # Add performance deploy options if they have been set - if [ ! -z "${deploy_options_array['performance']}" ]; then - - # Remove previous kernel args files per role - ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt" - ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt" - - # Push performance options to subscript to modify per-role images as needed - for option in "${performance_options[@]}" ; do - echo -e "${blue}Setting performance option $option${reset}" - ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "dataplane=${deploy_options_array['dataplane']} bash build_perf_image.sh $option" - done - - # Build IPA kernel option ramdisks - ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI -/bin/cp -f /home/stack/ironic-python-agent.initramfs /root/ -mkdir -p ipa/ -pushd ipa -gunzip -c ../ironic-python-agent.initramfs | cpio -i -if [ ! -f /home/stack/Compute-kernel_params.txt ]; then - touch /home/stack/Compute-kernel_params.txt - chown stack /home/stack/Compute-kernel_params.txt -fi -/bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt -echo "Compute params set: " -cat tmp/kernel_params.txt -/bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py -/bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc -find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs -chown stack /home/stack/Compute-ironic-python-agent.initramfs -if [ ! -f /home/stack/Controller-kernel_params.txt ]; then - touch /home/stack/Controller-kernel_params.txt - chown stack /home/stack/Controller-kernel_params.txt -fi -/bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt -echo "Controller params set: " -cat tmp/kernel_params.txt -find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs -chown stack /home/stack/Controller-ironic-python-agent.initramfs -popd -/bin/rm -rf ipa/ -EOI - - # set NIC heat params and resource registry - ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI -if [ -n "${private_network_compute_interface}" ]; then - sudo sed -i '/ComputeTenantNIC:/c\ ComputeTenantNIC: '${private_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml -fi -if [ -n "${private_network_controller_interface}" ]; then - sudo sed -i '/ControllerTenantNIC:/c\ ControllerTenantNIC: '${private_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml -fi -# TODO: PublicNIC is not used today, however, in the future, we'll bind public nic to DPDK as well for certain scenarios. At that time, -# we'll need to make sure public network is enabled. -if [ -n "${public_network_compute_interface}" ]; then - sudo sed -i '/ComputePublicNIC:/c\ ComputePublicNIC: '${public_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml -fi -if [ -n "${public_network_controller_interface}" ]; then - sudo sed -i '/ControllerPublicNIC:/c\ ControllerPublicNIC: '${public_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml -fi -EOI - - echo -e "${blue}INFO: Including /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml ${reset}" - if [ "$debug" == 'TRUE' ]; then - ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml" - fi - DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml" - fi # check if ceph should be enabled if [ "${deploy_options_array['ceph']}" == 'True' ]; then @@ -279,7 +234,6 @@ openstack overcloud image upload echo "Configuring undercloud and discovering nodes" openstack baremetal import --json instackenv.json -bash -x set_perf_images.sh ${performance_roles[@]} if [[ -z "$virtual" ]]; then openstack baremetal introspection bulk start if [[ -n "$root_disk_list" ]]; then diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh index 94eac01a..2114c0b7 100755 --- a/lib/parse-functions.sh +++ b/lib/parse-functions.sh @@ -12,20 +12,9 @@ ##parses network settings yaml into globals parse_network_settings() { - local output parse_ext - parse_ext='' - - if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then - for val in ${performance_roles[@]}; do - if [ "$val" == "Compute" ]; then - parse_ext="${parse_ext} --compute-pre-config " - elif [ "$val" == "Controller" ]; then - parse_ext="${parse_ext} --controller-pre-config " - fi - done - fi + local output - if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml $parse_ext); then + if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml); then echo -e "${blue}${output}${reset}" eval "$output" else diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py index 4fc6f583..dbe89b21 100644 --- a/lib/python/apex/network_environment.py +++ b/lib/python/apex/network_environment.py @@ -172,11 +172,6 @@ class NetworkEnvironment(dict): # apply resource registry update for API_RESOURCES self._config_resource_reg(API_RESOURCES, postfix) - if compute_pre_config: - self[reg][COMPUTE_PRE] = PRE_CONFIG_DIR + "compute/numa.yaml" - if controller_pre_config: - self[reg][CONTROLLER_PRE] = PRE_CONFIG_DIR + "controller/numa.yaml" - # Set IPv6 related flags to True. Not that we do not set those to False # when IPv4 is configured, we'll use the default or whatever the user # may have set. |