summaryrefslogtreecommitdiffstats
path: root/networking-odl/networking_odl/ml2
diff options
context:
space:
mode:
Diffstat (limited to 'networking-odl/networking_odl/ml2')
-rw-r--r--networking-odl/networking_odl/ml2/legacy_port_binding.py9
-rw-r--r--networking-odl/networking_odl/ml2/mech_driver.py106
-rw-r--r--networking-odl/networking_odl/ml2/mech_driver_v2.py117
-rw-r--r--networking-odl/networking_odl/ml2/network_topology.py23
-rw-r--r--networking-odl/networking_odl/ml2/ovsdb_topology.py6
-rw-r--r--networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py263
-rw-r--r--networking-odl/networking_odl/ml2/vpp_ml2.tarbin0 -> 24064 bytes
-rw-r--r--networking-odl/networking_odl/ml2/vpp_topology.py194
8 files changed, 322 insertions, 396 deletions
diff --git a/networking-odl/networking_odl/ml2/legacy_port_binding.py b/networking-odl/networking_odl/ml2/legacy_port_binding.py
index 7b9b918..18cf95f 100644
--- a/networking-odl/networking_odl/ml2/legacy_port_binding.py
+++ b/networking-odl/networking_odl/ml2/legacy_port_binding.py
@@ -16,10 +16,10 @@
from oslo_log import log
+from neutron.common import constants as n_const
from neutron.extensions import portbindings
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api
-from neutron_lib import constants as n_const
from networking_odl.ml2 import port_binding
@@ -31,18 +31,11 @@ class LegacyPortBindingManager(port_binding.PortBindingController):
def __init__(self):
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
- self.supported_vnic_types = [portbindings.VNIC_NORMAL]
def bind_port(self, port_context):
"""Set binding for all valid segments
"""
- vnic_type = port_context.current.get(portbindings.VNIC_TYPE,
- portbindings.VNIC_NORMAL)
- if vnic_type not in self.supported_vnic_types:
- LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
- vnic_type)
- return
valid_segment = None
for segment in port_context.segments_to_bind:
diff --git a/networking-odl/networking_odl/ml2/mech_driver.py b/networking-odl/networking_odl/ml2/mech_driver.py
index adde8d9..2d60e7a 100644
--- a/networking-odl/networking_odl/ml2/mech_driver.py
+++ b/networking-odl/networking_odl/ml2/mech_driver.py
@@ -23,13 +23,13 @@ from oslo_log import log as logging
from oslo_utils import excutils
import requests
+from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron import context as neutron_context
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import securitygroup as sg
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
-from neutron_lib import exceptions as n_exc
from networking_odl._i18n import _LE
from networking_odl.common import callback as odl_call
@@ -67,46 +67,17 @@ class ResourceFilterBase(object):
def filter_create_attributes_with_plugin(resource, plugin, dbcontext):
pass
- @staticmethod
- def _filter_unmapped_null(resource_dict, unmapped_keys):
- # NOTE(yamahata): bug work around
- # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
- # Null-value for an unmapped element causes next mapped
- # collection to contain a null value
- # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
- #
- # Java Object:
- # class Root {
- # Collection<String> mappedCollection = new ArrayList<String>;
- # }
- #
- # Result:
- # Field B contains one element; null
- #
- # TODO(yamahata): update along side with neutron and ODL
- # add when neutron adds more extensions
- # delete when ODL neutron northbound supports it
- # TODO(yamahata): do same thing for other resources
- keys_to_del = [key for key in unmapped_keys
- if resource_dict.get(key) is None]
- if keys_to_del:
- odl_utils.try_del(resource_dict, keys_to_del)
-
class NetworkFilter(ResourceFilterBase):
- _UNMAPPED_KEYS = ['qos_policy_id']
-
- @classmethod
- def filter_create_attributes(cls, network, context):
+ @staticmethod
+ def filter_create_attributes(network, context):
"""Filter out network attributes not required for a create."""
odl_utils.try_del(network, ['status', 'subnets'])
- cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
- @classmethod
- def filter_update_attributes(cls, network, context):
+ @staticmethod
+ def filter_update_attributes(network, context):
"""Filter out network attributes for an update operation."""
odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
- cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
@classmethod
def filter_create_attributes_with_plugin(cls, network, plugin, dbcontext):
@@ -135,9 +106,6 @@ class SubnetFilter(ResourceFilterBase):
class PortFilter(ResourceFilterBase):
- _UNMAPPED_KEYS = ['binding:profile', 'dns_name',
- 'port_security_enabled', 'qos_policy_id']
-
@staticmethod
def _add_security_groups(port, context):
"""Populate the 'security_groups' field with entire records."""
@@ -154,12 +122,38 @@ class PortFilter(ResourceFilterBase):
network_address = str(netaddr.IPNetwork(ip_address))
address_pair['ip_address'] = network_address
+ @staticmethod
+ def _filter_unmapped_null(port):
+ # NOTE(yamahata): bug work around
+ # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
+ # Null-value for an unmapped element causes next mapped
+ # collection to contain a null value
+ # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
+ #
+ # Java Object:
+ # class Root {
+ # Collection<String> mappedCollection = new ArrayList<String>;
+ # }
+ #
+ # Result:
+ # Field B contains one element; null
+ #
+ # TODO(yamahata): update along side with neutron and ODL
+ # add when neutron adds more extensions
+ # delete when ODL neutron northbound supports it
+ # TODO(yamahata): do same thing for other resources
+ unmapped_keys = ['dns_name', 'port_security_enabled',
+ 'binding:profile']
+ keys_to_del = [key for key in unmapped_keys if port.get(key) is None]
+ if keys_to_del:
+ odl_utils.try_del(port, keys_to_del)
+
@classmethod
def filter_create_attributes(cls, port, context):
"""Filter out port attributes not required for a create."""
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
- cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+ cls._filter_unmapped_null(port)
odl_utils.try_del(port, ['status'])
# NOTE(yamahata): work around for port creation for router
@@ -181,7 +175,7 @@ class PortFilter(ResourceFilterBase):
"""Filter out port attributes for an update operation."""
cls._add_security_groups(port, context)
cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
- cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+ cls._filter_unmapped_null(port)
odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])
@classmethod
@@ -363,8 +357,8 @@ class OpenDaylightDriver(object):
'object_id': obj_id})
self.out_of_sync = True
- def sync_from_callback(self, operation, res_type, res_id, resource_dict):
- object_type = res_type.plural.replace('_', '-')
+ def sync_from_callback(self, operation, object_type, res_id,
+ resource_dict):
try:
if operation == odl_const.ODL_DELETE:
self.out_of_sync |= not self.client.try_delete(
@@ -380,8 +374,7 @@ class OpenDaylightDriver(object):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to perform %(operation)s on "
- "%(object_type)s %(res_id)s "
- "%(resource_dict)s"),
+ "%(object_type)s %(res_id)s %(resource_dict)s"),
{'operation': operation,
'object_type': object_type,
'res_id': res_id,
@@ -419,40 +412,31 @@ class OpenDaylightMechanismDriver(driver_api.MechanismDriver):
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_NETWORKS,
- context)
+ self.odl_drv.synchronize('create', odl_const.ODL_NETWORKS, context)
def update_network_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_NETWORKS,
- context)
+ self.odl_drv.synchronize('update', odl_const.ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_NETWORKS,
- context)
+ self.odl_drv.synchronize('delete', odl_const.ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_SUBNETS,
- context)
+ self.odl_drv.synchronize('create', odl_const.ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_SUBNETS,
- context)
+ self.odl_drv.synchronize('update', odl_const.ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_SUBNETS,
- context)
+ self.odl_drv.synchronize('delete', odl_const.ODL_SUBNETS, context)
def create_port_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_PORTS,
- context)
+ self.odl_drv.synchronize('create', odl_const.ODL_PORTS, context)
def update_port_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_PORTS,
- context)
+ self.odl_drv.synchronize('update', odl_const.ODL_PORTS, context)
def delete_port_postcommit(self, context):
- self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_PORTS,
- context)
+ self.odl_drv.synchronize('delete', odl_const.ODL_PORTS, context)
def bind_port(self, context):
self.odl_drv.bind_port(context)
diff --git a/networking-odl/networking_odl/ml2/mech_driver_v2.py b/networking-odl/networking_odl/ml2/mech_driver_v2.py
index dfc8df1..6fc199b 100644
--- a/networking-odl/networking_odl/ml2/mech_driver_v2.py
+++ b/networking-odl/networking_odl/ml2/mech_driver_v2.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import copy
from oslo_config import cfg
from oslo_log import log as logging
@@ -21,11 +22,8 @@ from neutron.plugins.ml2 import driver_api as api
from networking_odl.common import callback
from networking_odl.common import config as odl_conf
-from networking_odl.common import constants as odl_const
-from networking_odl.journal import cleanup
-from networking_odl.journal import full_sync
+from networking_odl.db import db
from networking_odl.journal import journal
-from networking_odl.journal import maintenance
from networking_odl.ml2 import port_binding
LOG = logging.getLogger(__name__)
@@ -44,66 +42,83 @@ class OpenDaylightMechanismDriver(api.MechanismDriver):
self.sg_handler = callback.OdlSecurityGroupsHandler(self)
self.journal = journal.OpendaylightJournalThread()
self.port_binding_controller = port_binding.PortBindingManager.create()
- self._start_maintenance_thread()
-
- def _start_maintenance_thread(self):
- # start the maintenance thread and register all the maintenance
- # operations :
- # (1) JournalCleanup - Delete completed rows from journal
- # (2) CleanupProcessing - Mark orphaned processing rows to pending
- # (3) Full sync - Re-sync when detecting an ODL "cold reboot"
- cleanup_obj = cleanup.JournalCleanup()
- self._maintenance_thread = maintenance.MaintenanceThread()
- self._maintenance_thread.register_operation(
- cleanup_obj.delete_completed_rows)
- self._maintenance_thread.register_operation(
- cleanup_obj.cleanup_processing_rows)
- self._maintenance_thread.register_operation(full_sync.full_sync)
- self._maintenance_thread.start()
-
- @staticmethod
- def _record_in_journal(context, object_type, operation, data=None):
- if data is None:
- data = context.current
- journal.record(context._plugin_context.session, object_type,
- context.current['id'], operation, data)
def create_network_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE)
+ db.create_pending_row(context._plugin_context.session, 'network',
+ context.current['id'], 'create', context.current)
def create_subnet_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
+ db.create_pending_row(context._plugin_context.session, 'subnet',
+ context.current['id'], 'create', context.current)
def create_port_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_PORT, odl_const.ODL_CREATE)
+ dbcontext = context._plugin_context
+ groups = [context._plugin.get_security_group(dbcontext, sg)
+ for sg in context.current['security_groups']]
+ new_context = copy.deepcopy(context.current)
+ new_context['security_groups'] = groups
+ # NOTE(yamahata): work around for port creation for router
+ # tenant_id=''(empty string) is passed when port is created
+ # by l3 plugin internally for router.
+ # On the other hand, ODL doesn't accept empty string for tenant_id.
+ # In that case, deduce tenant_id from network_id for now.
+ # Right fix: modify Neutron so that don't allow empty string
+ # for tenant_id even for port for internal use.
+ # TODO(yamahata): eliminate this work around when neutron side
+ # is fixed
+ # assert port['tenant_id'] != ''
+ if ('tenant_id' not in context.current or
+ context.current['tenant_id'] == ''):
+ tenant_id = context._network_context._network['tenant_id']
+ new_context['tenant_id'] = tenant_id
+ db.create_pending_row(context._plugin_context.session, 'port',
+ context.current['id'], 'create', new_context)
def update_network_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE)
+ db.create_pending_row(context._plugin_context.session, 'network',
+ context.current['id'], 'update', context.current)
def update_subnet_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE)
+ db.create_pending_row(context._plugin_context.session, 'subnet',
+ context.current['id'], 'update', context.current)
def update_port_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_PORT, odl_const.ODL_UPDATE)
+ port = context._plugin.get_port(context._plugin_context,
+ context.current['id'])
+ dbcontext = context._plugin_context
+ new_context = copy.deepcopy(context.current)
+ groups = [context._plugin.get_security_group(dbcontext, sg)
+ for sg in port['security_groups']]
+ new_context['security_groups'] = groups
+ # Add the network_id in for validation
+ new_context['network_id'] = port['network_id']
+ # NOTE(yamahata): work around for port creation for router
+ # tenant_id=''(empty string) is passed when port is created
+ # by l3 plugin internally for router.
+ # On the other hand, ODL doesn't accept empty string for tenant_id.
+ # In that case, deduce tenant_id from network_id for now.
+ # Right fix: modify Neutron so that don't allow empty string
+ # for tenant_id even for port for internal use.
+ # TODO(yamahata): eliminate this work around when neutron side
+ # is fixed
+ # assert port['tenant_id'] != ''
+ if ('tenant_id' not in context.current or
+ context.current['tenant_id'] == ''):
+ port['tenant_id'] = context._network_context._network['tenant_id']
+ db.create_pending_row(context._plugin_context.session, 'port',
+ context.current['id'], 'update', new_context)
def delete_network_precommit(self, context):
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[])
+ db.create_pending_row(context._plugin_context.session, 'network',
+ context.current['id'], 'delete', None)
def delete_subnet_precommit(self, context):
# Use the journal row's data field to store parent object
# uuids. This information is required for validation checking
# when deleting parent objects.
new_context = [context.current['network_id']]
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE,
- data=new_context)
+ db.create_pending_row(context._plugin_context.session, 'subnet',
+ context.current['id'], 'delete', new_context)
def delete_port_precommit(self, context):
# Use the journal row's data field to store parent object
@@ -112,19 +127,19 @@ class OpenDaylightMechanismDriver(api.MechanismDriver):
new_context = [context.current['network_id']]
for subnet in context.current['fixed_ips']:
new_context.append(subnet['subnet_id'])
- OpenDaylightMechanismDriver._record_in_journal(
- context, odl_const.ODL_PORT, odl_const.ODL_DELETE,
- data=new_context)
+ db.create_pending_row(context._plugin_context.session, 'port',
+ context.current['id'], 'delete', new_context)
@journal.call_thread_on_end
- def sync_from_callback(self, operation, res_type, res_id, resource_dict):
- object_type = res_type.singular
+ def sync_from_callback(self, operation, res_type_uri, res_id,
+ resource_dict):
+ object_type = res_type_uri.replace('-', '_')[:-1]
object_uuid = (resource_dict[object_type]['id']
if operation == 'create' else res_id)
if resource_dict is not None:
resource_dict = resource_dict[object_type]
- journal.record(db_api.get_session(), object_type, object_uuid,
- operation, resource_dict)
+ db.create_pending_row(db_api.get_session(), object_type, object_uuid,
+ operation, resource_dict)
def _postcommit(self, context):
self.journal.set_sync_event()
diff --git a/networking-odl/networking_odl/ml2/network_topology.py b/networking-odl/networking_odl/ml2/network_topology.py
index b0bfae1..99137a8 100644
--- a/networking-odl/networking_odl/ml2/network_topology.py
+++ b/networking-odl/networking_odl/ml2/network_topology.py
@@ -27,7 +27,7 @@ from oslo_serialization import jsonutils
from networking_odl.common import cache
from networking_odl.common import client
from networking_odl.common import utils
-from networking_odl._i18n import _, _LI, _LW, _LE
+from networking_odl._i18n import _LI, _LW, _LE
from networking_odl.ml2 import port_binding
@@ -44,7 +44,8 @@ class NetworkTopologyManager(port_binding.PortBindingController):
# List of class names of registered implementations of interface
# NetworkTopologyParser
network_topology_parsers = [
- 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser',
+ 'networking_odl.ml2.vpp_topology.VppNetworkTopologyParser']
def __init__(self, vif_details=None, client=None):
# Details for binding port
@@ -65,6 +66,7 @@ class NetworkTopologyManager(port_binding.PortBindingController):
"""
host_name = port_context.host
+ LOG.debug('Processing port for host: %s', host_name)
elements = list()
try:
# Append to empty list to add as much elements as possible
@@ -85,6 +87,7 @@ class NetworkTopologyManager(port_binding.PortBindingController):
{'host_name': host_name})
# Imported here to avoid cyclic module dependencies
+ # TODO(wdec): Add vpp topology import
from networking_odl.ml2 import ovsdb_topology
elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
@@ -100,7 +103,8 @@ class NetworkTopologyManager(port_binding.PortBindingController):
# it is invalid for at least one element: discard it
vif_type_is_valid_for_all = False
break
-
+ # TODO(wdec): This needs to deal with not all network elements
+ # supporting all binding types.
if vif_type_is_valid_for_all:
# This is the best VIF type valid for all elements
LOG.debug(
@@ -206,13 +210,14 @@ class NetworkTopologyManager(port_binding.PortBindingController):
try:
for element in parser.parse_network_topology(network_topology):
if not isinstance(element, NetworkTopologyElement):
- raise TypeError(_(
+ raise TypeError(
"Yield element doesn't implement interface "
- "'NetworkTopologyElement': {!r}").format(element))
+ "'NetworkTopologyElement': {!r}".format(element))
# the same element can be known by more host addresses
for host_address in element.host_addresses:
if host_address in addresses:
at_least_one_element_for_asked_addresses = True
+ LOG.debug("Found cached Host: %s \n", host_address)
yield host_address, element
except Exception:
LOG.exception(
@@ -224,8 +229,8 @@ class NetworkTopologyManager(port_binding.PortBindingController):
# calling this method again as soon it is requested and avoid
# waiting for cache expiration
raise ValueError(
- _('No such topology element for given host addresses: {}')
- .format(', '.join(addresses)))
+ 'No such topology element for given host addresses: {}'.format(
+ ', '.join(addresses)))
@six.add_metaclass(abc.ABCMeta)
@@ -240,9 +245,9 @@ class NetworkTopologyParser(object):
module = importlib.import_module(module_name)
clss = getattr(module, class_name)
if not issubclass(clss, cls):
- raise TypeError(_(
+ raise TypeError(
"Class {class_name!r} of module {module_name!r} doesn't "
- "implement 'NetworkTopologyParser' interface.").format(
+ "implement 'NetworkTopologyParser' interface.".format(
class_name=class_name, module_name=module_name))
return clss()
diff --git a/networking-odl/networking_odl/ml2/ovsdb_topology.py b/networking-odl/networking_odl/ml2/ovsdb_topology.py
index f2c8ad8..ed82032 100644
--- a/networking-odl/networking_odl/ml2/ovsdb_topology.py
+++ b/networking-odl/networking_odl/ml2/ovsdb_topology.py
@@ -21,12 +21,11 @@ from oslo_log import log
import six
from six.moves.urllib import parse
+from neutron.common import constants as n_const
from neutron.extensions import portbindings
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api
-from neutron_lib import constants as n_const
-from networking_odl._i18n import _
from networking_odl.ml2 import network_topology
@@ -171,8 +170,7 @@ class OvsdbNetworkTopologyElement(network_topology.NetworkTopologyElement):
status=n_const.PORT_STATUS_ACTIVE)
return
- raise ValueError(
- _('Unable to find any valid segment in given context.'))
+ raise ValueError('Unable to find any valid segment in given context.')
def to_dict(self):
data = super(OvsdbNetworkTopologyElement, self).to_dict()
diff --git a/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py b/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py
deleted file mode 100644
index d24bd55..0000000
--- a/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) 2016 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-from neutron_lib import constants as nl_const
-from requests import exceptions
-import six.moves.urllib.parse as urlparse
-from string import Template
-
-from oslo_config import cfg
-from oslo_log import log
-from oslo_serialization import jsonutils
-
-from neutron import context
-from neutron.extensions import portbindings
-from neutron import manager
-from neutron.plugins.ml2 import driver_api
-
-from networking_odl._i18n import _LE, _LI, _LW
-from networking_odl.common import client as odl_client
-from networking_odl.journal import maintenance as mt
-from networking_odl.ml2 import port_binding
-
-cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
-LOG = log.getLogger(__name__)
-
-
-class PseudoAgentDBBindingController(port_binding.PortBindingController):
- """Switch agnostic Port binding controller for OpenDayLight."""
-
- AGENTDB_BINARY = 'neutron-odlagent-portbinding'
- L2_TYPE = "ODL L2"
-
- # TODO(mzmalick): binary, topic and resource_versions to be provided
- # by ODL, Pending ODL NB patches.
- agentdb_row = {
- 'binary': AGENTDB_BINARY,
- 'host': '',
- 'topic': nl_const.L2_AGENT_TOPIC,
- 'configurations': {},
- 'resource_versions': '',
- 'agent_type': L2_TYPE,
- 'start_flag': True}
- # We are not running host agents, so above start_flag is redundant
-
- def __init__(self, hostconf_uri=None, db_plugin=None):
- """Initialization."""
- LOG.debug("Initializing ODL Port Binding Controller")
-
- if not hostconf_uri:
- # extract host/port from ODL URL and append hostconf_uri path
- hostconf_uri = self._make_hostconf_uri(
- cfg.CONF.ml2_odl.url, cfg.CONF.ml2_odl.odl_hostconf_uri)
-
- LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri)
-
- # TODO(mzmalick): disable port-binding for ODL lightweight testing
- self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
- url=hostconf_uri)
-
- # Neutron DB plugin instance
- self.agents_db = db_plugin
-
- # Start polling ODL restconf using maintenance thread.
- # default: 30s (should be <= agent keep-alive poll interval)
- self._start_maintenance_thread(cfg.CONF.ml2_odl.restconf_poll_interval)
-
- def _make_hostconf_uri(self, odl_url=None, path=''):
- """Make ODL hostconfigs URI with host/port extraced from ODL_URL."""
- # NOTE(yamahata): for unit test.
- odl_url = odl_url or 'http://localhost:8080/'
-
- # extract ODL_IP and ODL_PORT from ODL_ENDPOINT and append path
- # urlsplit and urlunparse don't throw exceptions
- purl = urlparse.urlsplit(odl_url)
- return urlparse.urlunparse((purl.scheme, purl.netloc,
- path, '', '', ''))
- #
- # TODO(mzmalick):
- # 1. implement websockets for ODL hostconfig events
- #
-
- def _start_maintenance_thread(self, poll_interval):
- self._mainth = mt.MaintenanceThread()
- self._mainth.maintenance_interval = poll_interval
- self._mainth.register_operation(self._get_and_update_hostconfigs)
- self._mainth.start()
-
- def _rest_get_hostconfigs(self):
- try:
- response = self.odl_rest_client.get()
- response.raise_for_status()
- hostconfigs = response.json()['hostconfigs']['hostconfig']
- except exceptions.ConnectionError:
- LOG.error(_LE("Cannot connect to the Opendaylight Controller"),
- exc_info=True)
- return None
- except KeyError:
- LOG.error(_LE("got invalid hostconfigs"),
- exc_info=True)
- return None
- except Exception:
- LOG.warning(_LW("REST/GET odl hostconfig failed, "),
- exc_info=True)
- return None
- else:
- if LOG.isEnabledFor(logging.DEBUG):
- _hconfig_str = jsonutils.dumps(
- response, sort_keys=True, indent=4, separators=(',', ': '))
- LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)
-
- return hostconfigs
-
- def _get_and_update_hostconfigs(self, session=None):
- LOG.info(_LI("REST/GET hostconfigs from ODL"))
-
- hostconfigs = self._rest_get_hostconfigs()
-
- if not hostconfigs:
- LOG.warning(_LW("ODL hostconfigs REST/GET failed, "
- "will retry on next poll"))
- return # retry on next poll
-
- self._update_agents_db(hostconfigs=hostconfigs)
-
- def _get_neutron_db_plugin(self):
- if (not self.agents_db) and manager.NeutronManager.has_instance():
- self.agents_db = manager.NeutronManager.get_plugin()
- return self.agents_db
-
- def _update_agents_db(self, hostconfigs):
- LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")
-
- agents_db = self._get_neutron_db_plugin()
-
- if not agents_db: # if ML2 is still initializing
- LOG.warning(_LW("ML2 still initializing, Will retry agentdb"
- " update on next poll"))
- return # Retry on next poll
-
- for host_config in hostconfigs:
- try:
- self.agentdb_row['host'] = host_config['host-id']
- self.agentdb_row['agent_type'] = host_config['host-type']
- self.agentdb_row['configurations'] = host_config['config']
-
- agents_db.create_or_update_agent(
- context.get_admin_context(), self.agentdb_row)
- except Exception:
- LOG.exception(_LE("Unable to update agentdb."))
- continue # try next hostcofig
-
- def _substitute_hconfig_tmpl(self, port_context, hconfig):
- # TODO(mzmalick): Explore options for inlines string splicing of
- # port-id to 14 bytes as required by vhostuser types
- subs_ids = {
- # $IDENTIFER string substitution in hostconfigs JSON string
- 'PORT_ID': port_context.current['id'][:14]
- }
-
- # Substitute identifiers and Convert JSON string to dict
- hconfig_conf_json = Template(hconfig['configurations'])
- substituted_str = hconfig_conf_json.safe_substitute(subs_ids)
- hconfig['configurations'] = jsonutils.loads(substituted_str)
-
- return hconfig
-
- def bind_port(self, port_context):
- """bind port using ODL host configuration."""
- # Get all ODL hostconfigs for this host and type
- agentdb = port_context.host_agents(self.L2_TYPE)
-
- if not agentdb:
- LOG.warning(_LW("No valid hostconfigs in agentsdb for host %s"),
- port_context.host)
- return
-
- for raw_hconfig in agentdb:
- # do any $identifier substitution
- hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig)
-
- # Found ODL hostconfig for this host in agentdb
- LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig)
-
- if self._hconfig_bind_port(port_context, hconfig):
- break # Port binding suceeded!
- else: # Port binding failed!
- LOG.warning(_LW("Failed to bind Port %(pid)s for host "
- "%(host)s on network %(network)s."), {
- 'pid': port_context.current['id'],
- 'host': port_context.host,
- 'network': port_context.network.current['id']})
- else: # No hostconfig found for host in agentdb.
- LOG.warning(_LW("No ODL hostconfigs for host %s found in agentdb"),
- port_context.host)
-
- def _hconfig_bind_port(self, port_context, hconfig):
- """bind port after validating odl host configuration."""
- valid_segment = None
-
- for segment in port_context.segments_to_bind:
- if self._is_valid_segment(segment, hconfig['configurations']):
- valid_segment = segment
- break
- else:
- LOG.debug("No valid segments found!")
- return False
-
- confs = hconfig['configurations']['supported_vnic_types']
-
- # nova provides vnic_type in port_context to neutron.
- # neutron provides supported vif_type for binding based on vnic_type
- # in this case ODL hostconfigs has the vif_type to bind for vnic_type
- vnic_type = port_context.current.get(portbindings.VNIC_TYPE)
-
- if vnic_type != portbindings.VNIC_NORMAL:
- LOG.error(_LE("Binding failed: unsupported VNIC %s"), vnic_type)
- return False
-
- for conf in confs:
- if conf["vnic_type"] == vnic_type:
- vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
- LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
- break
- else:
- vif_type = portbindings.VIF_TYPE_OVS # default: OVS
- LOG.warning(_LW("No supported vif type found for host %s!, "
- "defaulting to OVS"), port_context.host)
-
- vif_details = conf.get('vif_details', {})
-
- if not vif_details: # empty vif_details could be trouble, warn.
- LOG.warning(_LW("hostconfig:vif_details was empty!"))
-
- LOG.debug("Bind port %(port)s on network %(network)s with valid "
- "segment %(segment)s and VIF type %(vif_type)r "
- "VIF details %(vif_details)r.",
- {'port': port_context.current['id'],
- 'network': port_context.network.current['id'],
- 'segment': valid_segment, 'vif_type': vif_type,
- 'vif_details': vif_details})
-
- port_context.set_binding(valid_segment[driver_api.ID], vif_type,
- vif_details,
- status=nl_const.PORT_STATUS_ACTIVE)
- return True
-
- def _is_valid_segment(self, segment, conf):
- """Verify a segment is supported by ODL."""
- network_type = segment[driver_api.NETWORK_TYPE]
- return network_type in conf['allowed_network_types']
diff --git a/networking-odl/networking_odl/ml2/vpp_ml2.tar b/networking-odl/networking_odl/ml2/vpp_ml2.tar
new file mode 100644
index 0000000..e181208
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/vpp_ml2.tar
Binary files differ
diff --git a/networking-odl/networking_odl/ml2/vpp_topology.py b/networking-odl/networking_odl/ml2/vpp_topology.py
new file mode 100644
index 0000000..c16399d
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/vpp_topology.py
@@ -0,0 +1,194 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import collections
+import os
+
+from oslo_log import log
+import six
+
+from neutron.common import constants as n_const
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+
+from networking_odl.ml2 import network_topology
+
+LOG = log.getLogger(__name__)
+HC_VPP_CAPABILITY = 'urn:opendaylight:params:xml:ns:yang:v3po'
+
+
+class VppNetworkTopologyParser(network_topology.NetworkTopologyParser):
+ def new_element(self, devname):
+ return VppNetworkTopologyElement(devname=devname)
+
+ def parse_network_topology(self, network_topologies):
+ LOG.debug("Parsing Topology using VPP Topology Parser")
+ elements_by_name = collections.OrderedDict()
+ for topology in network_topologies['network-topology']['topology']:
+ if topology['topology-id'].startswith('topology-netconf'):
+ for node in topology['node']:
+ # expected :
+ # "node-id": "name",
+ # "netconf-node-topology:host": "172.21.174.41"
+ # "netconf-node-topology:available-capabilities": {
+ # "available-capability" : contains the v3po model
+ node_name = node['node-id']
+ LOG.debug("Examining capabilities for node: %s\n",
+ node_name)
+ try:
+ capabilities = node[
+ 'netconf-node-topology:available-capabilities']
+ LOG.debug("Node's capabilities: %s\n",
+ capabilities)
+ for item in capabilities['available-capability']:
+ if HC_VPP_CAPABILITY in item:
+ LOG.debug("Found VPP matching capability for "
+ "node: %s\n", node_name)
+ element = elements_by_name.get(node_name)
+ if element is None:
+ elements_by_name[node_name] = element = \
+ self.new_element(node_name)
+
+ self._update_elmnt_from_json_netconf_topo_node(
+ node, element, node_name)
+ except KeyError:
+ LOG.debug("No netconf available capabilities found for"
+ ": %s\n", node_name)
+
+ # Can there can be more VPP instances connected beside the same IP
+ # address?
+ # Cache will yield more instaces for the same key
+ for __, element in six.iteritems(elements_by_name):
+ yield element
+
+ def _update_elmnt_from_json_netconf_topo_node(
+ self, node, element, node_name):
+
+ # fetch remote IP address
+ element.remote_ip = node["netconf-node-topology:host"]
+ # Assume Honeycomb-VPP supports vhost_user
+ element.support_vhost_user = True
+
+ LOG.debug(
+ 'Topology element updated:\n'
+ ' - VPP node name: %(node_name)r\n'
+ ' - remote_ip: %(remote_ip)r\n'
+ ' - support_vhost_user: %(support_vhost_user)r',
+ {'node_name': node_name,
+ 'remote_ip': element.remote_ip,
+ 'support_vhost_user': element.support_vhost_user})
+
+
+class VppNetworkTopologyElement(network_topology.NetworkTopologyElement):
+ devname = None # Filled in by parser
+ remote_ip = None # Filled in by parser
+ has_datapath_type_netdev = False # Placeholder for future capability
+ support_vhost_user = False # VPP supports it by default actually.
+
+ # location for vhostuser sockets.
+ # TODO(wdec): This should be configurable in the ML2 config.
+ vhostuser_socket_dir = '/tmp/'
+
+ # TODO(wdec): And also this should be configurable in ML2...
+ # prefix for port
+ port_prefix = 'socket_'
+
+ def __init__(self, **kwargs):
+ for name, value in six.iteritems(kwargs):
+ setattr(self, name, value)
+
+ @property
+ def host_addresses(self):
+ # For now it support only the remote IP found in connection info
+ return self.remote_ip,
+
+ @property
+ def valid_vif_types(self):
+ return [portbindings.VIF_TYPE_VHOST_USER]
+
+ def bind_port(self, port_context, vif_type, vif_details):
+
+ port_context_id = port_context.current['id']
+ network_context_id = port_context.network.current['id']
+
+ # Bind port to the first valid segment
+ for segment in port_context.segments_to_bind:
+ if self._is_valid_segment(segment):
+ # Guest best VIF type for given host
+ vif_details = self._get_vif_details(
+ vif_details=vif_details, port_context_id=port_context_id,
+ vif_type=vif_type)
+ LOG.debug(
+ 'Bind port with valid segment:\n'
+ '\tport: %(port)r\n'
+ '\tnetwork: %(network)r\n'
+ '\tsegment: %(segment)r\n'
+ '\tVIF type: %(vif_type)r\n'
+ '\tVIF details: %(vif_details)r',
+ {'port': port_context_id,
+ 'network': network_context_id,
+ 'segment': segment, 'vif_type': vif_type,
+ 'vif_details': vif_details})
+ port_context.set_binding(
+ segment[driver_api.ID], vif_type, vif_details,
+ status=n_const.PORT_STATUS_ACTIVE)
+ return
+
+ raise ValueError('Unable to find any valid segment in given context.')
+
+ def to_dict(self):
+ data = super(VppNetworkTopologyElement, self).to_dict()
+ data.update(
+ {'uuid': self.devname,
+ 'has_datapath_type_netdev': self.has_datapath_type_netdev,
+ 'support_vhost_user': self.support_vhost_user,
+ 'valid_vif_types': self.valid_vif_types})
+ if portbindings.VIF_TYPE_VHOST_USER in self.valid_vif_types:
+ data.update({'port_prefix': self.port_prefix,
+ 'vhostuser_socket_dir': self.vhostuser_socket_dir})
+ return data
+
+ def _is_valid_segment(self, segment):
+ """Verify a segment is valid for the OpenDaylight MechanismDriver.
+
+ Verify the requested segment is supported by ODL and return True or
+ False to indicate this to callers.
+ """
+
+ network_type = segment[driver_api.NETWORK_TYPE]
+ return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+ constants.TYPE_VXLAN, constants.TYPE_VLAN,
+ constants.TYPE_FLAT]
+
+ def _get_vif_details(self, vif_details, port_context_id, vif_type):
+ vif_details = dict(vif_details)
+ if vif_type == portbindings.VIF_TYPE_VHOST_USER:
+ socket_path = os.path.join(
+ self.vhostuser_socket_dir,
+ (self.port_prefix + port_context_id))
+
+ vif_details.update({
+ portbindings.VHOST_USER_MODE:
+ portbindings.VHOST_USER_MODE_SERVER,
+ portbindings.VHOST_USER_SOCKET: socket_path
+ })
+ return vif_details
+
+ def __setattr__(self, name, value):
+ # raises Attribute error if the class hasn't this attribute
+ getattr(type(self), name)
+ super(VppNetworkTopologyElement, self).__setattr__(name, value)