summaryrefslogtreecommitdiffstats
path: root/networking-odl/networking_odl
diff options
context:
space:
mode:
Diffstat (limited to 'networking-odl/networking_odl')
-rw-r--r--networking-odl/networking_odl/__init__.py23
-rw-r--r--networking-odl/networking_odl/_i18n.py50
-rw-r--r--networking-odl/networking_odl/cmd/__init__.py0
-rw-r--r--networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py123
-rwxr-xr-xnetworking-odl/networking_odl/cmd/test_setup_hostconfig.sh3
-rw-r--r--networking-odl/networking_odl/common/__init__.py0
-rw-r--r--networking-odl/networking_odl/common/cache.py197
-rw-r--r--networking-odl/networking_odl/common/callback.py73
-rw-r--r--networking-odl/networking_odl/common/client.py94
-rw-r--r--networking-odl/networking_odl/common/config.py67
-rw-r--r--networking-odl/networking_odl/common/constants.py55
-rw-r--r--networking-odl/networking_odl/common/exceptions.py20
-rw-r--r--networking-odl/networking_odl/common/filters.py96
-rw-r--r--networking-odl/networking_odl/common/lightweight_testing.py177
-rw-r--r--networking-odl/networking_odl/common/utils.py60
-rw-r--r--networking-odl/networking_odl/db/__init__.py0
-rw-r--r--networking-odl/networking_odl/db/db.py234
-rw-r--r--networking-odl/networking_odl/db/migration/__init__.py0
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/README1
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py0
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/env.py99
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako36
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD1
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD1
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py28
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py36
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py32
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py54
-rw-r--r--networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py52
-rw-r--r--networking-odl/networking_odl/db/models.py47
-rw-r--r--networking-odl/networking_odl/fwaas/__init__.py0
-rw-r--r--networking-odl/networking_odl/fwaas/driver.py69
-rw-r--r--networking-odl/networking_odl/journal/__init__.py0
-rw-r--r--networking-odl/networking_odl/journal/cleanup.py46
-rw-r--r--networking-odl/networking_odl/journal/dependency_validations.py267
-rw-r--r--networking-odl/networking_odl/journal/full_sync.py114
-rw-r--r--networking-odl/networking_odl/journal/journal.py220
-rw-r--r--networking-odl/networking_odl/journal/maintenance.py73
-rw-r--r--networking-odl/networking_odl/l2gateway/__init__.py0
-rw-r--r--networking-odl/networking_odl/l2gateway/driver.py121
-rw-r--r--networking-odl/networking_odl/l3/__init__.py0
-rw-r--r--networking-odl/networking_odl/l3/l3_odl.py189
-rw-r--r--networking-odl/networking_odl/l3/l3_odl_v2.py206
-rw-r--r--networking-odl/networking_odl/lbaas/__init__.py0
-rw-r--r--networking-odl/networking_odl/lbaas/driver_v1.py125
-rw-r--r--networking-odl/networking_odl/lbaas/driver_v2.py126
-rw-r--r--networking-odl/networking_odl/ml2/README.odl41
-rw-r--r--networking-odl/networking_odl/ml2/__init__.py0
-rw-r--r--networking-odl/networking_odl/ml2/legacy_port_binding.py84
-rw-r--r--networking-odl/networking_odl/ml2/mech_driver.py458
-rw-r--r--networking-odl/networking_odl/ml2/mech_driver_v2.py146
-rw-r--r--networking-odl/networking_odl/ml2/network_topology.py313
-rw-r--r--networking-odl/networking_odl/ml2/ovsdb_topology.py218
-rw-r--r--networking-odl/networking_odl/ml2/port_binding.py121
-rw-r--r--networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py263
-rw-r--r--networking-odl/networking_odl/tests/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/base.py28
-rw-r--r--networking-odl/networking_odl/tests/unit/__init__.py19
-rw-r--r--networking-odl/networking_odl/tests/unit/common/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/common/test_cache.py242
-rw-r--r--networking-odl/networking_odl/tests/unit/common/test_callback.py83
-rw-r--r--networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py174
-rw-r--r--networking-odl/networking_odl/tests/unit/common/test_utils.py156
-rw-r--r--networking-odl/networking_odl/tests/unit/db/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/db/test_db.py243
-rw-r--r--networking-odl/networking_odl/tests/unit/fwaas/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py29
-rw-r--r--networking-odl/networking_odl/tests/unit/journal/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py44
-rw-r--r--networking-odl/networking_odl/tests/unit/journal/test_full_sync.py152
-rw-r--r--networking-odl/networking_odl/tests/unit/journal/test_maintenance.py93
-rw-r--r--networking-odl/networking_odl/tests/unit/l2gateway/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py127
-rw-r--r--networking-odl/networking_odl/tests/unit/l3/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py310
-rw-r--r--networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py526
-rw-r--r--networking-odl/networking_odl/tests/unit/lbaas/__init__.py0
-rw-r--r--networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py32
-rw-r--r--networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py32
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/__init__.py0
-rwxr-xr-xnetworking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh37
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js62
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json171
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_driver.py99
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py89
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py596
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py577
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py475
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py248
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py44
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py334
-rw-r--r--networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json182
92 files changed, 10063 insertions, 0 deletions
diff --git a/networking-odl/networking_odl/__init__.py b/networking-odl/networking_odl/__init__.py
new file mode 100644
index 0000000..f2b8357
--- /dev/null
+++ b/networking-odl/networking_odl/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import gettext
+import six
+
+
+if six.PY2:
+ gettext.install('networking_odl', unicode=1)
+else:
+ gettext.install('networking_odl')
diff --git a/networking-odl/networking_odl/_i18n.py b/networking-odl/networking_odl/_i18n.py
new file mode 100644
index 0000000..d338871
--- /dev/null
+++ b/networking-odl/networking_odl/_i18n.py
@@ -0,0 +1,50 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html .
+
+"""
+
+import oslo_i18n
+
+DOMAIN = "networking_odl"
+
+_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# The contextual translation function using the name "_C"
+# requires oslo.i18n >=2.1.0
+_C = _translators.contextual_form
+
+# The plural translation function using the name "_P"
+# requires oslo.i18n >=2.1.0
+_P = _translators.plural_form
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+
+def get_available_languages():
+ return oslo_i18n.get_available_languages(DOMAIN)
diff --git a/networking-odl/networking_odl/cmd/__init__.py b/networking-odl/networking_odl/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/cmd/__init__.py
diff --git a/networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py b/networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py
new file mode 100644
index 0000000..8b8b1d3
--- /dev/null
+++ b/networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron._i18n import _
+from neutron._i18n import _LE
+from neutron._i18n import _LI
+from neutron.agent.common import utils
+from neutron.common import config
+
+LOG = log.getLogger(__name__)
+
+
+class SetOvsHostconfigs(object):
+
+ # Refer below for ovs ext-id strings
+ # https://review.openstack.org/#/c/309630/
+ extid_str = 'external_ids:{}={}'
+ odl_os_hconf_str = 'odl_os_hostconfig_config_{}'
+ odl_os_hostid_str = 'odl_os_hostconfig_hostid'
+ odl_os_hosttype_str = 'odl_os_hostconfig_hosttype'
+
+ # TODO(mzmalick): use neutron.agent.ovsdb instead of subprocess.Popen
+ ovs_cmd_get_uuid = ['ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid']
+ ovs_cmd_set_extid = ['ovs-vsctl', 'set', 'Open_vSwitch', '', '']
+
+ UUID = 3
+ EXTID = 4
+
+ def __init__(self):
+ self.ovs_uuid = self.get_ovs_uuid()
+
+ def ovs_exec_cmd(self, cmd):
+ LOG.info(_LI("SET-HOSTCONFIGS: Executing cmd: %s"), ' '.join(cmd))
+ return utils.execute(cmd, return_stderr=True, run_as_root=True)
+
+ def get_ovs_uuid(self):
+ return self.ovs_exec_cmd(self.ovs_cmd_get_uuid)[0].strip()
+
+ def set_extid_hostname(self, hname):
+ self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+ self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+ self.odl_os_hostid_str, hname)
+ return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+ def set_extid_hosttype(self, htype):
+ self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+ self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+ self.odl_os_hosttype_str, htype)
+ return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+ def set_extid_hostconfig(self, htype, hconfig):
+ ext_htype = self.odl_os_hconf_str.format(
+ htype.lower().replace(' ', '_'))
+ self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+ self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+ ext_htype, jsonutils.dumps(hconfig))
+ return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+ def set_ovs_extid_hostconfigs(self, conf):
+ if not conf.ovs_hostconfigs:
+ LOG.error(_LE("ovs_hostconfigs argument needed!"))
+ return
+
+ json_str = cfg.CONF.ovs_hostconfigs
+ json_str.replace("\'", "\"")
+ LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str)
+
+ self.set_extid_hostname(cfg.CONF.host)
+ htype_config = jsonutils.loads(json_str)
+
+ for htype in htype_config.keys():
+ self.set_extid_hostconfig(htype, htype_config[htype])
+
+
+def setup_conf():
+ """setup cmdline options."""
+ cli_opts = [
+ cfg.StrOpt('ovs_hostconfigs', help=_(
+ "OVS hostconfiguration for OpenDaylight "
+ "as a JSON string"))
+ ]
+
+ conf = cfg.CONF
+ conf.register_cli_opts(cli_opts)
+ conf.import_opt('host', 'neutron.common.config')
+ conf()
+ return conf
+
+
+def main():
+
+ conf = setup_conf()
+ config.setup_logging()
+ SetOvsHostconfigs().set_ovs_extid_hostconfigs(conf)
+
+#
+# command line example (run without line breaks):
+#
+# set_ovs_hostconfigs.py --ovs_hostconfigs='{"ODL L2": {
+# "supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs",
+# "vif_details":{}}], "allowed_network_types":["local","vlan",
+# "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}},
+# "ODL L3": {}}' --debug
+#
+
+if __name__ == '__main__':
+ main()
diff --git a/networking-odl/networking_odl/cmd/test_setup_hostconfig.sh b/networking-odl/networking_odl/cmd/test_setup_hostconfig.sh
new file mode 100755
index 0000000..1651d0e
--- /dev/null
+++ b/networking-odl/networking_odl/cmd/test_setup_hostconfig.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}'
diff --git a/networking-odl/networking_odl/common/__init__.py b/networking-odl/networking_odl/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/common/__init__.py
diff --git a/networking-odl/networking_odl/common/cache.py b/networking-odl/networking_odl/common/cache.py
new file mode 100644
index 0000000..6c44cc3
--- /dev/null
+++ b/networking-odl/networking_odl/common/cache.py
@@ -0,0 +1,197 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import six
+import sys
+import time
+
+from oslo_log import log
+
+from networking_odl._i18n import _LW
+
+
+LOG = log.getLogger(__name__)
+
+
+class CacheEntry(collections.namedtuple('CacheEntry', ['timeout', 'values'])):
+
+ error = None
+
+ @classmethod
+ def create(cls, timeout, *values):
+ return CacheEntry(timeout, list(values))
+
+ def add_value(self, value):
+ self.values.append(value)
+
+ def is_expired(self, current_clock):
+ return self.timeout <= current_clock
+
+ def __hash__(self):
+ return id(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class Cache(object):
+ '''Generic mapping class used to cache mapping
+
+ Example of uses:
+ - host name to IP addresses mapping
+ - IP addresses to ODL networking topology elements mapping
+ '''
+
+ # TODO(Federico Ressi) after Mitaka: this class should store cached data
+ # in a place shared between more hosts using a caching mechanism coherent
+ # with other OpenStack libraries. This is specially interesting in the
+ # context of reliability when there are more Neutron instances and direct
+ # connection to ODL is broken.
+
+ create_new_entry = CacheEntry.create
+
+ def __init__(self, fetch_all_func):
+ if not callable(fetch_all_func):
+ message = 'Expected callable as parameter, got {!r}.'.format(
+ fetch_all_func)
+ raise TypeError(message)
+ self._fetch_all = fetch_all_func
+ self.clear()
+
+ def clear(self):
+ self._entries = collections.OrderedDict()
+
+ def fetch(self, key, timeout):
+ __, value = self.fetch_any([key], timeout=timeout)
+ return value
+
+ def fetch_any(self, keys, timeout):
+ return next(self.fetch_all(keys=keys, timeout=timeout))
+
+ def fetch_all(self, keys, timeout):
+ # this mean now in numbers
+ current_clock = time.clock()
+ # this is the moment in the future in which new entries will expires
+ new_entries_timeout = current_clock + timeout
+ # entries to be fetched because missing or expired
+ new_entries = collections.OrderedDict()
+ # all entries missing or expired
+ missing = collections.OrderedDict()
+ # captured error for the case a problem has to be reported
+ cause_exc_info = None
+
+ for key in keys:
+ entry = self._entries.get(key)
+ if entry is None or entry.is_expired(current_clock) or entry.error:
+ # this entry has to be fetched
+ new_entries[key] = missing[key] =\
+ self.create_new_entry(new_entries_timeout)
+ elif entry.values:
+ # Yield existing entry
+ for value in entry.values:
+ yield key, value
+ else:
+ # This entry is not expired and there were no error where it
+ # has been fetch. Therefore we accept that there are no values
+ # for given key until it expires. This is going to produce a
+ # KeyError if it is still missing at the end of this function.
+ missing[key] = entry
+
+ if missing:
+ if new_entries:
+ # Fetch some entries and update the cache
+ try:
+ new_entry_keys = tuple(new_entries)
+ for key, value in self._fetch_all(new_entry_keys):
+ entry = new_entries.get(key)
+ if entry:
+ # Add fresh new value
+ entry.add_value(value)
+ else:
+ # This key was not asked, but we take it in any
+ # way. "Noli equi dentes inspicere donati."
+ new_entries[key] = entry = self.create_new_entry(
+ new_entries_timeout, value)
+
+ # pylint: disable=broad-except
+ except Exception:
+ # Something has gone wrong: update and yield what got until
+ # now before raising any error
+ cause_exc_info = sys.exc_info()
+ LOG.warning(
+ _LW('Error fetching values for keys: %r'),
+ ', '.join(repr(k) for k in new_entry_keys),
+ exc_info=cause_exc_info)
+
+ # update the cache with new fresh entries
+ self._entries.update(new_entries)
+
+ missing_keys = []
+ for key, entry in six.iteritems(missing):
+ if entry.values:
+ # yield entries that was missing before
+ for value in entry.values:
+ # Yield just fetched entry
+ yield key, value
+ else:
+ if cause_exc_info:
+ # mark this entry as failed
+ entry.error = cause_exc_info
+ # after all this entry is still without any value
+ missing_keys.append(key)
+
+ if missing_keys:
+ # After all some entry is still missing, probably because the
+ # key was invalid. It's time to raise an error.
+ missing_keys = tuple(missing_keys)
+ if not cause_exc_info:
+ # Search for the error cause in missing entries
+ for key in missing_keys:
+ error = self._entries[key].error
+ if error:
+ # A cached entry for which fetch method produced an
+ # error will produce the same error if fetch method
+ # fails to fetch it again without giving any error
+ # Is this what we want?
+ break
+
+ else:
+ # If the cause of the problem is not knwow then
+ # probably keys were wrong
+ message = 'Invalid keys: {!r}'.format(
+ ', '.join(missing_keys))
+ error = KeyError(message)
+
+ try:
+ raise error
+ except KeyError:
+ cause_exc_info = sys.exc_info()
+
+ raise CacheFetchError(
+ missing_keys=missing_keys, cause_exc_info=cause_exc_info)
+
+
+class CacheFetchError(KeyError):
+
+ def __init__(self, missing_keys, cause_exc_info):
+ super(CacheFetchError, self).__init__(str(cause_exc_info[1]))
+ self.cause_exc_info = cause_exc_info
+ self.missing_keys = missing_keys
+
+ def reraise_cause(self):
+ six.reraise(*self.cause_exc_info)
diff --git a/networking-odl/networking_odl/common/callback.py b/networking-odl/networking_odl/common/callback.py
new file mode 100644
index 0000000..d9d168b
--- /dev/null
+++ b/networking-odl/networking_odl/common/callback.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+
+from oslo_log import log as logging
+
+from neutron.callbacks import events
+from neutron.callbacks import registry
+from neutron.callbacks import resources
+
+from networking_odl.common import constants as odl_const
+
+LOG = logging.getLogger(__name__)
+
+ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural'))
+_RESOURCE_MAPPING = {
+ resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS),
+ resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE,
+ odl_const.ODL_SG_RULES),
+}
+_OPERATION_MAPPING = {
+ events.AFTER_CREATE: odl_const.ODL_CREATE,
+ events.AFTER_UPDATE: odl_const.ODL_UPDATE,
+ events.AFTER_DELETE: odl_const.ODL_DELETE,
+}
+
+
+class OdlSecurityGroupsHandler(object):
+
+ def __init__(self, odl_driver):
+ self.odl_driver = odl_driver
+ self._subscribe()
+
+ def _subscribe(self):
+ for event in (events.AFTER_CREATE, events.AFTER_DELETE):
+ registry.subscribe(self.sg_callback, resources.SECURITY_GROUP,
+ event)
+ registry.subscribe(self.sg_callback, resources.SECURITY_GROUP_RULE,
+ event)
+
+ registry.subscribe(self.sg_callback, resources.SECURITY_GROUP,
+ events.AFTER_UPDATE)
+
+ def sg_callback(self, resource, event, trigger, **kwargs):
+ res = kwargs.get(resource)
+ res_id = kwargs.get("%s_id" % resource)
+ odl_res_type = _RESOURCE_MAPPING[resource]
+
+ odl_ops = _OPERATION_MAPPING[event]
+ odl_res_dict = None if res is None else {odl_res_type.singular: res}
+
+ LOG.debug("Calling sync_from_callback with ODL_OPS (%(odl_ops)s) "
+ "ODL_RES_TYPE (%(odl_res_type)s) RES_ID (%(res_id)s) "
+ "ODL_RES_DICT (%(odl_res_dict)s) KWARGS (%(kwargs)s)",
+ {'odl_ops': odl_ops, 'odl_res_type': odl_res_type,
+ 'res_id': res_id, 'odl_res_dict': odl_res_dict,
+ 'kwargs': kwargs})
+
+ self.odl_driver.sync_from_callback(odl_ops, odl_res_type,
+ res_id, odl_res_dict)
diff --git a/networking-odl/networking_odl/common/client.py b/networking-odl/networking_odl/common/client.py
new file mode 100644
index 0000000..45349e9
--- /dev/null
+++ b/networking-odl/networking_odl/common/client.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import excutils
+import requests
+
+
+LOG = log.getLogger(__name__)
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+
+class OpenDaylightRestClient(object):
+
+ @classmethod
+ def create_client(cls, url=None):
+ if cfg.CONF.ml2_odl.enable_lightweight_testing:
+ LOG.debug("ODL lightweight testing is enabled, "
+ "returning a OpenDaylightLwtClient instance")
+
+ """Have to import at here, otherwise we create a dependency loop"""
+ from networking_odl.common import lightweight_testing as lwt
+ cls = lwt.OpenDaylightLwtClient
+
+ return cls(
+ url or cfg.CONF.ml2_odl.url,
+ cfg.CONF.ml2_odl.username,
+ cfg.CONF.ml2_odl.password,
+ cfg.CONF.ml2_odl.timeout)
+
+ def __init__(self, url, username, password, timeout):
+ self.url = url
+ self.timeout = timeout
+ self.auth = (username, password)
+
+ def get(self, urlpath='', data=None):
+ return self.request('get', urlpath, data)
+
+ def put(self, urlpath='', data=None):
+ return self.request('put', urlpath, data)
+
+ def delete(self, urlpath='', data=None):
+ return self.request('delete', urlpath, data)
+
+ def request(self, method, urlpath='', data=None):
+ headers = {'Content-Type': 'application/json'}
+ url = '/'.join([self.url, urlpath])
+ LOG.debug(
+ "Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)",
+ {'method': method, 'url': url, 'data': data})
+ return requests.request(
+ method, url=url, headers=headers, data=data, auth=self.auth,
+ timeout=self.timeout)
+
+ def sendjson(self, method, urlpath, obj):
+ """Send json to the OpenDaylight controller."""
+ data = jsonutils.dumps(obj, indent=2) if obj else None
+ return self._check_rensponse(self.request(method, urlpath, data))
+
+ def try_delete(self, urlpath):
+ rensponse = self.delete(urlpath)
+ if rensponse.status_code == requests.codes.not_found:
+ # The resource is already removed. ignore 404 gracefully
+ LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath})
+ return False
+ else:
+ self._check_rensponse(rensponse)
+ return True
+
+ def _check_rensponse(self, rensponse):
+ try:
+ rensponse.raise_for_status()
+ except requests.HTTPError as error:
+ with excutils.save_and_reraise_exception():
+ LOG.debug("Exception from ODL: %(e)s %(text)s",
+ {'e': error, 'text': rensponse.text}, exc_info=1)
+ else:
+ LOG.debug("Got response:\n"
+ "(%(response)s)", {'response': rensponse.text})
+ return rensponse
diff --git a/networking-odl/networking_odl/common/config.py b/networking-odl/networking_odl/common/config.py
new file mode 100644
index 0000000..c921242
--- /dev/null
+++ b/networking-odl/networking_odl/common/config.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from networking_odl._i18n import _
+
+
+odl_opts = [
+ cfg.StrOpt('url',
+ help=_("HTTP URL of OpenDaylight REST interface.")),
+ cfg.StrOpt('username',
+ help=_("HTTP username for authentication.")),
+ cfg.StrOpt('password', secret=True,
+ help=_("HTTP password for authentication.")),
+ cfg.IntOpt('timeout', default=10,
+ help=_("HTTP timeout in seconds.")),
+ cfg.IntOpt('session_timeout', default=30,
+ help=_("Tomcat session timeout in minutes.")),
+ cfg.IntOpt('sync_timeout', default=10,
+ help=_("(V2 driver) Sync thread timeout in seconds.")),
+ cfg.IntOpt('retry_count', default=5,
+ help=_("(V2 driver) Number of times to retry a row "
+ "before failing.")),
+ cfg.IntOpt('maintenance_interval', default=300,
+ help=_("(V2 driver) Journal maintenance operations interval "
+ "in seconds.")),
+ cfg.IntOpt('completed_rows_retention', default=600,
+ help=_("(V2 driver) Time to keep completed rows in seconds."
+ "Completed rows retention will be checked every "
+ "maintenance_interval by the cleanup thread."
+ "To disable completed rows deletion "
+ "value should be -1")),
+ cfg.BoolOpt('enable_lightweight_testing',
+ default=False,
+ help=_('Test without real ODL.')),
+ cfg.StrOpt('port_binding_controller',
+ default='network-topology',
+ help=_('Name of the controller to be used for port binding.')),
+ cfg.IntOpt('processing_timeout', default='100',
+ help=_("(V2 driver) Time in seconds to wait before a "
+ "processing row is marked back to pending.")),
+ cfg.StrOpt('odl_hostconf_uri',
+ help=_("Path for ODL host configuration REST interface"),
+ default="/restconf/operational/neutron:neutron/hostconfigs"),
+ cfg.IntOpt('restconf_poll_interval', default=30,
+ help=_("Poll interval in seconds for getting ODL hostconfig")),
+
+]
+
+cfg.CONF.register_opts(odl_opts, "ml2_odl")
+
+
+def list_opts():
+ return [('ml2_odl', odl_opts)]
diff --git a/networking-odl/networking_odl/common/constants.py b/networking-odl/networking_odl/common/constants.py
new file mode 100644
index 0000000..50c0117
--- /dev/null
+++ b/networking-odl/networking_odl/common/constants.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+ODL_NETWORK = 'network'
+ODL_NETWORKS = 'networks'
+ODL_SUBNET = 'subnet'
+ODL_SUBNETS = 'subnets'
+ODL_PORT = 'port'
+ODL_PORTS = 'ports'
+ODL_SG = 'security_group'
+ODL_SGS = 'security_groups'
+ODL_SG_RULE = 'security_group_rule'
+ODL_SG_RULES = 'security_group_rules'
+ODL_ROUTER = 'router'
+ODL_ROUTERS = 'routers'
+ODL_ROUTER_INTF = 'router_interface'
+ODL_FLOATINGIP = 'floatingip'
+ODL_FLOATINGIPS = 'floatingips'
+
+ODL_LOADBALANCER = 'loadbalancer'
+ODL_LOADBALANCERS = 'loadbalancers'
+ODL_LISTENER = 'listener'
+ODL_LISTENERS = 'listeners'
+ODL_POOL = 'pool'
+ODL_POOLS = 'pools'
+ODL_MEMBER = 'member'
+ODL_MEMBERS = 'members'
+ODL_HEALTHMONITOR = 'healthmonitor'
+ODL_HEALTHMONITORS = 'healthmonitors'
+
+ODL_CREATE = 'create'
+ODL_UPDATE = 'update'
+ODL_DELETE = 'delete'
+ODL_ADD = 'add'
+ODL_REMOVE = 'remove'
+
+ODL_UUID_NOT_USED = '0'
+
+# Constants for journal operation states
+PENDING = 'pending'
+PROCESSING = 'processing'
+FAILED = 'failed'
+COMPLETED = 'completed'
diff --git a/networking-odl/networking_odl/common/exceptions.py b/networking-odl/networking_odl/common/exceptions.py
new file mode 100644
index 0000000..f174c10
--- /dev/null
+++ b/networking-odl/networking_odl/common/exceptions.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_lib import exceptions as exc
+
+
+class OpendaylightAuthError(exc.NeutronException):
+ message = '%(msg)s'
diff --git a/networking-odl/networking_odl/common/filters.py b/networking-odl/networking_odl/common/filters.py
new file mode 100644
index 0000000..fb42a0e
--- /dev/null
+++ b/networking-odl/networking_odl/common/filters.py
@@ -0,0 +1,96 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from networking_odl.common import constants as odl_const
+from networking_odl.common import utils as odl_utils
+
+
+def _filter_unmapped_null(resource_dict, unmapped_keys):
+ # NOTE(yamahata): bug work around
+ # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
+ # Null-value for an unmapped element causes next mapped
+ # collection to contain a null value
+ # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
+ #
+ # Java Object:
+ # class Root {
+ # Collection<String> mappedCollection = new ArrayList<String>;
+ # }
+ #
+ # Result:
+ # Field B contains one element; null
+ #
+ # TODO(yamahata): update along side with neutron and ODL
+ # add when neutron adds more extensions
+ # delete when ODL neutron northbound supports it
+ # TODO(yamahata): do same thing for other resources
+ keys_to_del = [key for key in unmapped_keys
+ if resource_dict.get(key) is None]
+ if keys_to_del:
+ odl_utils.try_del(resource_dict, keys_to_del)
+
+
+_NETWORK_UNMAPPED_KEYS = ['qos_policy_id']
+_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name',
+ 'port_security_enabled', 'qos_policy_id']
+
+
+def _filter_network_create(network):
+ odl_utils.try_del(network, ['status', 'subnets'])
+ _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
+
+
+def _filter_network_update(network):
+ odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
+ _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
+
+
+def _filter_subnet_update(subnet):
+ odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
+ 'allocation_pools', 'tenant_id'])
+
+
+def _filter_port_create(port):
+ """Filter out port attributes not required for a create."""
+ odl_utils.try_del(port, ['status'])
+ _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
+
+
+def _filter_port_update(port):
+ """Filter out port attributes for an update operation."""
+ odl_utils.try_del(port, ['network_id', 'id', 'status', 'mac_address',
+ 'tenant_id', 'fixed_ips'])
+ _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
+
+
+def _filter_router_update(router):
+ """Filter out attributes for an update operation."""
+ odl_utils.try_del(router, ['id', 'tenant_id', 'status'])
+
+
+_FILTER_MAP = {
+ (odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create,
+ (odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update,
+ (odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update,
+ (odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create,
+ (odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update,
+ (odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update,
+}
+
+
+def filter_for_odl(object_type, operation, data):
+ """Filter out the attributed before sending the data to ODL"""
+ filter_key = (object_type, operation)
+ if filter_key in _FILTER_MAP:
+ _FILTER_MAP[filter_key](data)
diff --git a/networking-odl/networking_odl/common/lightweight_testing.py b/networking-odl/networking_odl/common/lightweight_testing.py
new file mode 100644
index 0000000..3d0cf2e
--- /dev/null
+++ b/networking-odl/networking_odl/common/lightweight_testing.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2015 Intel Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from copy import deepcopy
+import requests
+import six
+
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from networking_odl._i18n import _
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+
+
+LOG = logging.getLogger(__name__)
+
+OK = requests.codes.ok
+NO_CONTENT = requests.codes.no_content
+NOT_ALLOWED = requests.codes.not_allowed
+NOT_FOUND = requests.codes.not_found
+BAD_REQUEST = requests.codes.bad_request
+
+
+class OpenDaylightLwtClient(client.OpenDaylightRestClient):
+ """Lightweight testing client"""
+
+ lwt_dict = {odl_const.ODL_NETWORKS: {},
+ odl_const.ODL_SUBNETS: {},
+ odl_const.ODL_PORTS: {},
+ odl_const.ODL_SGS: {},
+ odl_const.ODL_SG_RULES: {},
+ odl_const.ODL_LOADBALANCERS: {},
+ odl_const.ODL_LISTENERS: {},
+ odl_const.ODL_POOLS: {},
+ odl_const.ODL_MEMBERS: {},
+ odl_const.ODL_HEALTHMONITORS: {}}
+
+ @classmethod
+ def _make_response(cls, status_code=OK, content=None):
+ """Only supports 'content-type': 'application/json'"""
+ response = requests.models.Response()
+ response.status_code = status_code
+ if content:
+ response.raw = six.BytesIO(
+ jsonutils.dumps(content).encode('utf-8'))
+
+ return response
+
+ @classmethod
+ def _get_resource_id(cls, urlpath):
+ # resouce ID is the last element of urlpath
+ return str(urlpath).rsplit('/', 1)[-1]
+
+ @classmethod
+ def post(cls, resource_type, resource_dict, urlpath, resource_list):
+ """No ID in URL, elements in resource_list must have ID"""
+
+ if resource_list is None:
+ raise ValueError(_("resource_list can not be None"))
+
+ for resource in resource_list:
+ if resource['id'] in resource_dict:
+ LOG.debug("%s %s already exists", resource_type,
+ resource['id'])
+ response = cls._make_response(NOT_ALLOWED)
+ raise requests.exceptions.HTTPError(response=response)
+
+ resource_dict[resource['id']] = deepcopy(resource)
+
+ return cls._make_response(NO_CONTENT)
+
+ @classmethod
+ def put(cls, resource_type, resource_dict, urlpath, resource_list):
+
+ resource_id = cls._get_resource_id(urlpath)
+
+ if resource_list is None:
+ raise ValueError(_("resource_list can not be None"))
+
+ if resource_id and len(resource_list) != 1:
+ LOG.debug("Updating %s with multiple resources", urlpath)
+ response = cls._make_response(BAD_REQUEST)
+ raise requests.exceptions.HTTPError(response=response)
+
+ for resource in resource_list:
+ res_id = resource_id or resource['id']
+ if res_id in resource_dict:
+ resource_dict[res_id].update(deepcopy(resource))
+ else:
+ LOG.debug("%s %s does not exist", resource_type, res_id)
+ response = cls._make_response(NOT_FOUND)
+ raise requests.exceptions.HTTPError(response=response)
+
+ return cls._make_response(NO_CONTENT)
+
+ @classmethod
+ def delete(cls, resource_type, resource_dict, urlpath, resource_list):
+
+ if resource_list is None:
+ resource_id = cls._get_resource_id(urlpath)
+ id_list = [resource_id]
+ else:
+ id_list = [res['id'] for res in resource_list]
+
+ for res_id in id_list:
+ removed = resource_dict.pop(res_id, None)
+ if removed is None:
+ LOG.debug("%s %s does not exist", resource_type, res_id)
+ response = cls._make_response(NOT_FOUND)
+ raise requests.exceptions.HTTPError(response=response)
+
+ return cls._make_response(NO_CONTENT)
+
+ @classmethod
+ def get(cls, resource_type, resource_dict, urlpath, resource_list=None):
+
+ resource_id = cls._get_resource_id(urlpath)
+
+ if resource_id:
+ resource = resource_dict.get(resource_id)
+ if resource is None:
+ LOG.debug("%s %s does not exist", resource_type, resource_id)
+ response = cls._make_response(NOT_FOUND)
+ raise requests.exceptions.HTTPError(response=response)
+ else:
+ # When getting single resource, return value is a dict
+ r_list = {resource_type[:-1]: deepcopy(resource)}
+ return cls._make_response(OK, r_list)
+
+ r_list = [{resource_type[:-1]: deepcopy(res)}
+ for res in six.itervalues(resource_dict)]
+
+ return cls._make_response(OK, r_list)
+
+ def sendjson(self, method, urlpath, obj=None):
+ """Lightweight testing without ODL"""
+
+ if '/' not in urlpath:
+ urlpath += '/'
+
+ resource_type = str(urlpath).split('/', 1)[0]
+ resource_type = resource_type.replace('-', '_')
+
+ resource_dict = self.lwt_dict.get(resource_type)
+
+ if resource_dict is None:
+ LOG.debug("Resource type %s is not supported", resource_type)
+ response = self._make_response(NOT_FOUND)
+ raise requests.exceptions.HTTPError(response=response)
+
+ func = getattr(self, str(method).lower())
+
+ resource_list = None
+ if obj:
+ """If obj is not None, it can only have one entry"""
+ assert len(obj) == 1, "Obj can only have one entry"
+
+ key, resource_list = list(obj.items())[0]
+
+ if not isinstance(resource_list, list):
+ # Need to transform resource_list to a real list, i.e. [res]
+ resource_list = [resource_list]
+
+ return func(resource_type, resource_dict, urlpath, resource_list)
diff --git a/networking-odl/networking_odl/common/utils.py b/networking-odl/networking_odl/common/utils.py
new file mode 100644
index 0000000..a01a14a
--- /dev/null
+++ b/networking-odl/networking_odl/common/utils.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import socket
+
+from oslo_log import log
+
+from networking_odl.common import cache
+
+LOG = log.getLogger(__name__)
+
+
+def try_del(d, keys):
+ """Ignore key errors when deleting from a dictionary."""
+ for key in keys:
+ try:
+ del d[key]
+ except KeyError:
+ pass
+
+
+def _fetch_all_addresses_by_hostnames(hostnames):
+ for name in hostnames:
+ # it uses an ordered dict to avoid duplicates and keep order
+ entries = collections.OrderedDict(
+ (info[4][0], None) for info in socket.getaddrinfo(name, None))
+ for entry in entries:
+ yield name, entry
+
+
+_addresses_by_name_cache = cache.Cache(_fetch_all_addresses_by_hostnames)
+
+
+def get_addresses_by_name(name, time_to_live=60.0):
+ """Gets and caches addresses for given name.
+
+ This is a cached wrapper for function 'socket.getaddrinfo'.
+
+ :returns: a sequence of unique addresses binded to given hostname.
+ """
+
+ try:
+ results = _addresses_by_name_cache.fetch_all(
+ [name], timeout=time_to_live)
+ return tuple(address for name, address in results)
+ except cache.CacheFetchError as error:
+ error.reraise_cause()
diff --git a/networking-odl/networking_odl/db/__init__.py b/networking-odl/networking_odl/db/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/db/__init__.py
diff --git a/networking-odl/networking_odl/db/db.py b/networking-odl/networking_odl/db/db.py
new file mode 100644
index 0000000..31f4ce2
--- /dev/null
+++ b/networking-odl/networking_odl/db/db.py
@@ -0,0 +1,234 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+
+from sqlalchemy import asc
+from sqlalchemy import func
+from sqlalchemy import or_
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import models
+
+from neutron.db import api as db_api
+
+from oslo_db import api as oslo_db_api
+
+
+def check_for_pending_or_processing_ops(session, object_uuid, operation=None):
+ q = session.query(models.OpendaylightJournal).filter(
+ or_(models.OpendaylightJournal.state == odl_const.PENDING,
+ models.OpendaylightJournal.state == odl_const.PROCESSING),
+ models.OpendaylightJournal.object_uuid == object_uuid)
+ if operation:
+ if isinstance(operation, (list, tuple)):
+ q = q.filter(models.OpendaylightJournal.operation.in_(operation))
+ else:
+ q = q.filter(models.OpendaylightJournal.operation == operation)
+ return session.query(q.exists()).scalar()
+
+
+def check_for_pending_delete_ops_with_parent(session, object_type, parent_id):
+ rows = session.query(models.OpendaylightJournal).filter(
+ or_(models.OpendaylightJournal.state == odl_const.PENDING,
+ models.OpendaylightJournal.state == odl_const.PROCESSING),
+ models.OpendaylightJournal.object_type == object_type,
+ models.OpendaylightJournal.operation == odl_const.ODL_DELETE
+ ).all()
+
+ for row in rows:
+ if parent_id in row.data:
+ return True
+
+ return False
+
+
+def check_for_pending_or_processing_add(session, router_id, subnet_id):
+ rows = session.query(models.OpendaylightJournal).filter(
+ or_(models.OpendaylightJournal.state == odl_const.PENDING,
+ models.OpendaylightJournal.state == odl_const.PROCESSING),
+ models.OpendaylightJournal.object_type == odl_const.ODL_ROUTER_INTF,
+ models.OpendaylightJournal.operation == odl_const.ODL_ADD
+ ).all()
+
+ for row in rows:
+ if router_id in row.data.values() and subnet_id in row.data.values():
+ return True
+
+ return False
+
+
+def check_for_pending_remove_ops_with_parent(session, parent_id):
+ rows = session.query(models.OpendaylightJournal).filter(
+ or_(models.OpendaylightJournal.state == odl_const.PENDING,
+ models.OpendaylightJournal.state == odl_const.PROCESSING),
+ models.OpendaylightJournal.object_type == odl_const.ODL_ROUTER_INTF,
+ models.OpendaylightJournal.operation == odl_const.ODL_REMOVE
+ ).all()
+
+ for row in rows:
+ if parent_id in row.data.values():
+ return True
+
+ return False
+
+
+def check_for_older_ops(session, row):
+ q = session.query(models.OpendaylightJournal).filter(
+ or_(models.OpendaylightJournal.state == odl_const.PENDING,
+ models.OpendaylightJournal.state == odl_const.PROCESSING),
+ models.OpendaylightJournal.operation == row.operation,
+ models.OpendaylightJournal.object_uuid == row.object_uuid,
+ models.OpendaylightJournal.created_at < row.created_at,
+ models.OpendaylightJournal.id != row.id)
+ return session.query(q.exists()).scalar()
+
+
+def get_all_db_rows(session):
+ return session.query(models.OpendaylightJournal).all()
+
+
+def get_all_db_rows_by_state(session, state):
+ return session.query(models.OpendaylightJournal).filter_by(
+ state=state).all()
+
+
+# Retry deadlock exception for Galera DB.
+# If two (or more) different threads call this method at the same time, they
+# might both succeed in changing the same row to pending, but at least one
+# of them will get a deadlock from Galera and will have to retry the operation.
+@db_api.retry_db_errors
+def get_oldest_pending_db_row_with_lock(session):
+ with session.begin():
+ row = session.query(models.OpendaylightJournal).filter_by(
+ state=odl_const.PENDING).order_by(
+ asc(models.OpendaylightJournal.last_retried)).with_for_update(
+ ).first()
+ if row:
+ update_db_row_state(session, row, odl_const.PROCESSING)
+
+ return row
+
+
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+ retry_on_request=True)
+def update_db_row_state(session, row, state):
+ row.state = state
+ session.merge(row)
+ session.flush()
+
+
+def update_pending_db_row_retry(session, row, retry_count):
+ if row.retry_count >= retry_count:
+ update_db_row_state(session, row, odl_const.FAILED)
+ else:
+ row.retry_count += 1
+ update_db_row_state(session, row, odl_const.PENDING)
+
+
+# This function is currently not used.
+# Deleted resources are marked as 'deleted' in the database.
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+ retry_on_request=True)
+def delete_row(session, row=None, row_id=None):
+ if row_id:
+ row = session.query(models.OpendaylightJournal).filter_by(
+ id=row_id).one()
+ if row:
+ session.delete(row)
+ session.flush()
+
+
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+ retry_on_request=True)
+def create_pending_row(session, object_type, object_uuid,
+ operation, data):
+ row = models.OpendaylightJournal(object_type=object_type,
+ object_uuid=object_uuid,
+ operation=operation, data=data,
+ created_at=func.now(),
+ state=odl_const.PENDING)
+ session.add(row)
+ # Keep session flush for unit tests. NOOP for L2/L3 events since calls are
+ # made inside database session transaction with subtransactions=True.
+ session.flush()
+
+
+@db_api.retry_db_errors
+def delete_pending_rows(session, operations_to_delete):
+ with session.begin():
+ session.query(models.OpendaylightJournal).filter(
+ models.OpendaylightJournal.operation.in_(operations_to_delete),
+ models.OpendaylightJournal.state == odl_const.PENDING).delete(
+ synchronize_session=False)
+ session.expire_all()
+
+
+@db_api.retry_db_errors
+def _update_maintenance_state(session, expected_state, state):
+ with session.begin():
+ row = session.query(models.OpendaylightMaintenance).filter_by(
+ state=expected_state).with_for_update().one_or_none()
+ if row is None:
+ return False
+
+ row.state = state
+ return True
+
+
+def lock_maintenance(session):
+ return _update_maintenance_state(session, odl_const.PENDING,
+ odl_const.PROCESSING)
+
+
+def unlock_maintenance(session):
+ return _update_maintenance_state(session, odl_const.PROCESSING,
+ odl_const.PENDING)
+
+
+def update_maintenance_operation(session, operation=None):
+ """Update the current maintenance operation details.
+
+ The function assumes the lock is held, so it mustn't be run outside of a
+ locked context.
+ """
+ op_text = None
+ if operation:
+ op_text = operation.__name__
+
+ with session.begin():
+ row = session.query(models.OpendaylightMaintenance).one_or_none()
+ row.processing_operation = op_text
+
+
+def delete_rows_by_state_and_time(session, state, time_delta):
+ with session.begin():
+ now = session.execute(func.now()).scalar()
+ session.query(models.OpendaylightJournal).filter(
+ models.OpendaylightJournal.state == state,
+ models.OpendaylightJournal.last_retried < now - time_delta).delete(
+ synchronize_session=False)
+ session.expire_all()
+
+
+def reset_processing_rows(session, max_timedelta):
+ with session.begin():
+ now = session.execute(func.now()).scalar()
+ max_timedelta = datetime.timedelta(seconds=max_timedelta)
+ rows = session.query(models.OpendaylightJournal).filter(
+ models.OpendaylightJournal.last_retried < now - max_timedelta,
+ models.OpendaylightJournal.state == odl_const.PROCESSING,
+ ).update({'state': odl_const.PENDING})
+
+ return rows
diff --git a/networking-odl/networking_odl/db/migration/__init__.py b/networking-odl/networking_odl/db/migration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/__init__.py
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/README b/networking-odl/networking_odl/db/migration/alembic_migrations/README
new file mode 100644
index 0000000..5d89e57
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/README
@@ -0,0 +1 @@
+This directory contains the migration scripts for the networking_odl project.
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py b/networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/env.py b/networking-odl/networking_odl/db/migration/alembic_migrations/env.py
new file mode 100644
index 0000000..9405ae0
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/env.py
@@ -0,0 +1,99 @@
+# Copyright 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from logging import config as logging_config
+
+from alembic import context
+from oslo_config import cfg
+from oslo_db.sqlalchemy import session
+import sqlalchemy as sa
+from sqlalchemy import event
+
+from neutron.db.migration.alembic_migrations import external
+from neutron.db.migration.models import head # noqa
+from neutron.db import model_base
+
+MYSQL_ENGINE = None
+ODL_VERSION_TABLE = 'odl_alembic_version'
+config = context.config
+neutron_config = config.neutron_config
+logging_config.fileConfig(config.config_file_name)
+target_metadata = model_base.BASEV2.metadata
+
+
+def set_mysql_engine():
+ try:
+ mysql_engine = neutron_config.command.mysql_engine
+ except cfg.NoSuchOptError:
+ mysql_engine = None
+
+ global MYSQL_ENGINE
+ MYSQL_ENGINE = (mysql_engine or
+ model_base.BASEV2.__table_args__['mysql_engine'])
+
+
+def include_object(object, name, type_, reflected, compare_to):
+ if type_ == 'table' and name in external.TABLES:
+ return False
+ else:
+ return True
+
+
+def run_migrations_offline():
+ set_mysql_engine()
+
+ kwargs = dict()
+ if neutron_config.database.connection:
+ kwargs['url'] = neutron_config.database.connection
+ else:
+ kwargs['dialect_name'] = neutron_config.database.engine
+ kwargs['include_object'] = include_object
+ kwargs['version_table'] = ODL_VERSION_TABLE
+ context.configure(**kwargs)
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+@event.listens_for(sa.Table, 'after_parent_attach')
+def set_storage_engine(target, parent):
+ if MYSQL_ENGINE:
+ target.kwargs['mysql_engine'] = MYSQL_ENGINE
+
+
+def run_migrations_online():
+ set_mysql_engine()
+ engine = session.create_engine(neutron_config.database.connection)
+
+ connection = engine.connect()
+ context.configure(
+ connection=connection,
+ target_metadata=target_metadata,
+ include_object=include_object,
+ version_table=ODL_VERSION_TABLE
+ )
+
+ try:
+ with context.begin_transaction():
+ context.run_migrations()
+ finally:
+ connection.close()
+ engine.dispose()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako b/networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako
new file mode 100644
index 0000000..9e0b2ce
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako
@@ -0,0 +1,36 @@
+# Copyright ${create_date.year} <PUT YOUR NAME/COMPANY HERE>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision}
+Create Date: ${create_date}
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+% if branch_labels:
+branch_labels = ${repr(branch_labels)}
+%endif
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+def upgrade():
+ ${upgrades if upgrades else "pass"}
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD
new file mode 100644
index 0000000..b7dbc31
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD
@@ -0,0 +1 @@
+383acb0d38a0
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD
new file mode 100644
index 0000000..34912ba
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD
@@ -0,0 +1 @@
+703dbf02afde
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py
new file mode 100644
index 0000000..d80815d
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Initial odl db, branchpoint
+
+Revision ID: b89a299e19f9
+Revises: None
+Create Date: 2015-09-03 22:22:22.222222
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b89a299e19f9'
+down_revision = None
+
+
+def upgrade():
+ pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py
new file mode 100644
index 0000000..43959c0
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Start of odl contract branch
+
+Revision ID: 383acb0d38a0
+Revises: b89a299e19f9
+Create Date: 2015-09-03 22:27:49.306394
+
+"""
+
+from neutron.db import migration
+from neutron.db.migration import cli
+
+
+# revision identifiers, used by Alembic.
+revision = '383acb0d38a0'
+down_revision = 'b89a299e19f9'
+branch_labels = (cli.CONTRACT_BRANCH,)
+
+# milestone identifier, used by neutron-db-manage
+neutron_milestone = [migration.MITAKA]
+
+
+def upgrade():
+ pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py
new file mode 100644
index 0000000..71d24b3
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Start of odl expand branch
+
+Revision ID: 247501328046
+Revises: b89a299e19f9
+Create Date: 2015-09-03 22:27:49.292238
+
+"""
+
+from neutron.db.migration import cli
+
+
+# revision identifiers, used by Alembic.
+revision = '247501328046'
+down_revision = 'b89a299e19f9'
+branch_labels = (cli.EXPAND_BRANCH,)
+
+
+def upgrade():
+ pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py
new file mode 100644
index 0000000..71d8273
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Opendaylight Neutron mechanism driver refactor
+
+Revision ID: 37e242787ae5
+Revises: 247501328046
+Create Date: 2015-10-30 22:09:27.221767
+
+"""
+from neutron.db import migration
+
+
+# revision identifiers, used by Alembic.
+revision = '37e242787ae5'
+down_revision = '247501328046'
+
+# milestone identifier, used by neutron-db-manage
+neutron_milestone = [migration.MITAKA]
+
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.create_table(
+ 'opendaylightjournal',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('object_type', sa.String(36), nullable=False),
+ sa.Column('object_uuid', sa.String(36), nullable=False),
+ sa.Column('operation', sa.String(36), nullable=False),
+ sa.Column('data', sa.PickleType, nullable=True),
+ sa.Column('state',
+ sa.Enum('pending', 'processing', 'failed', 'completed',
+ name='state'),
+ nullable=False, default='pending'),
+ sa.Column('retry_count', sa.Integer, default=0),
+ sa.Column('created_at', sa.DateTime, default=sa.func.now()),
+ sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(),
+ onupdate=sa.func.now())
+ )
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py
new file mode 100644
index 0000000..bbe0c46
--- /dev/null
+++ b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py
@@ -0,0 +1,52 @@
+# Copyright 2016 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Add journal maintenance table
+
+Revision ID: 703dbf02afde
+Revises: 37e242787ae5
+Create Date: 2016-04-12 10:49:31.802663
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '703dbf02afde'
+down_revision = '37e242787ae5'
+
+from alembic import op
+from oslo_utils import uuidutils
+import sqlalchemy as sa
+
+from networking_odl.common import constants as odl_const
+
+
+def upgrade():
+ maint_table = op.create_table(
+ 'opendaylight_maintenance',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
+ name='state'),
+ nullable=False),
+ sa.Column('processing_operation', sa.String(70)),
+ sa.Column('lock_updated', sa.TIMESTAMP, nullable=False,
+ server_default=sa.func.now(),
+ onupdate=sa.func.now())
+ )
+
+ # Insert the only row here that is used to synchronize the lock between
+ # different Neutron processes.
+ op.bulk_insert(maint_table,
+ [{'id': uuidutils.generate_uuid(),
+ 'state': odl_const.PENDING}])
diff --git a/networking-odl/networking_odl/db/models.py b/networking-odl/networking_odl/db/models.py
new file mode 100644
index 0000000..0416ed1
--- /dev/null
+++ b/networking-odl/networking_odl/db/models.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+from networking_odl.common import constants as odl_const
+from neutron.db import model_base
+from neutron.db.models_v2 import HasId
+
+
+class OpendaylightJournal(model_base.BASEV2, HasId):
+ __tablename__ = 'opendaylightjournal'
+
+ object_type = sa.Column(sa.String(36), nullable=False)
+ object_uuid = sa.Column(sa.String(36), nullable=False)
+ operation = sa.Column(sa.String(36), nullable=False)
+ data = sa.Column(sa.PickleType, nullable=True)
+ state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.FAILED,
+ odl_const.PROCESSING, odl_const.COMPLETED),
+ nullable=False, default=odl_const.PENDING)
+ retry_count = sa.Column(sa.Integer, default=0)
+ created_at = sa.Column(sa.DateTime, server_default=sa.func.now())
+ last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(),
+ onupdate=sa.func.now())
+
+
+class OpendaylightMaintenance(model_base.BASEV2, HasId):
+ __tablename__ = 'opendaylight_maintenance'
+
+ state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING),
+ nullable=False)
+ processing_operation = sa.Column(sa.String(70))
+ lock_updated = sa.Column(sa.TIMESTAMP, nullable=False,
+ server_default=sa.func.now(),
+ onupdate=sa.func.now())
diff --git a/networking-odl/networking_odl/fwaas/__init__.py b/networking-odl/networking_odl/fwaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/fwaas/__init__.py
diff --git a/networking-odl/networking_odl/fwaas/driver.py b/networking-odl/networking_odl/fwaas/driver.py
new file mode 100644
index 0000000..a9de4f2
--- /dev/null
+++ b/networking-odl/networking_odl/fwaas/driver.py
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_log import log as logging
+
+from neutron_fwaas.services.firewall.drivers import fwaas_base
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import config # noqa
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightFwaasDriver(fwaas_base.FwaasDriverBase):
+
+ """OpenDaylight FWaaS Driver
+
+ This code is the backend implementation for the OpenDaylight FWaaS
+ driver for OpenStack Neutron.
+ """
+
+ def __init__(self):
+ LOG.debug("Initializing OpenDaylight FWaaS driver")
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+
+ def create_firewall(self, apply_list, firewall):
+ """Create the Firewall with default (drop all) policy.
+
+ The default policy will be applied on all the interfaces of
+ trusted zone.
+ """
+ pass
+
+ def delete_firewall(self, apply_list, firewall):
+ """Delete firewall.
+
+ Removes all policies created by this instance and frees up
+ all the resources.
+ """
+ pass
+
+ def update_firewall(self, apply_list, firewall):
+ """Apply the policy on all trusted interfaces.
+
+ Remove previous policy and apply the new policy on all trusted
+ interfaces.
+ """
+ pass
+
+ def apply_default_policy(self, apply_list, firewall):
+ """Apply the default policy on all trusted interfaces.
+
+ Remove current policy and apply the default policy on all trusted
+ interfaces.
+ """
+ pass
diff --git a/networking-odl/networking_odl/journal/__init__.py b/networking-odl/networking_odl/journal/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/journal/__init__.py
diff --git a/networking-odl/networking_odl/journal/cleanup.py b/networking-odl/networking_odl/journal/cleanup.py
new file mode 100644
index 0000000..994fb82
--- /dev/null
+++ b/networking-odl/networking_odl/journal/cleanup.py
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from datetime import timedelta
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from networking_odl._i18n import _LI
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+LOG = logging.getLogger(__name__)
+
+
+class JournalCleanup(object):
+ """Journal maintenance operation for deleting completed rows."""
+ def __init__(self):
+ self._rows_retention = cfg.CONF.ml2_odl.completed_rows_retention
+ self._processing_timeout = cfg.CONF.ml2_odl.processing_timeout
+
+ def delete_completed_rows(self, session):
+ if self._rows_retention is not -1:
+ LOG.debug("Deleting completed rows")
+ db.delete_rows_by_state_and_time(
+ session, odl_const.COMPLETED,
+ timedelta(seconds=self._rows_retention))
+
+ def cleanup_processing_rows(self, session):
+ row_count = db.reset_processing_rows(session, self._processing_timeout)
+ if row_count:
+ LOG.info(_LI("Reset %(num)s orphaned rows back to pending"),
+ {"num": row_count})
diff --git a/networking-odl/networking_odl/journal/dependency_validations.py b/networking-odl/networking_odl/journal/dependency_validations.py
new file mode 100644
index 0000000..a6f5f96
--- /dev/null
+++ b/networking-odl/networking_odl/journal/dependency_validations.py
@@ -0,0 +1,267 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+
+def _is_valid_update_operation(session, row):
+ # Check if there are older updates in the queue
+ if db.check_for_older_ops(session, row):
+ return False
+
+ # Check for a pending or processing create operation on this uuid
+ if db.check_for_pending_or_processing_ops(
+ session, row.object_uuid, odl_const.ODL_CREATE):
+ return False
+ return True
+
+
+def validate_network_operation(session, row):
+ """Validate the network operation based on dependencies.
+
+ Validate network operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state. e.g.
+ """
+ if row.operation == odl_const.ODL_DELETE:
+ # Check for any pending or processing create or update
+ # ops on this uuid itself
+ if db.check_for_pending_or_processing_ops(
+ session, row.object_uuid, [odl_const.ODL_UPDATE,
+ odl_const.ODL_CREATE]):
+ return False
+ # Check for dependent operations
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_SUBNET, row.object_uuid):
+ return False
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_PORT, row.object_uuid):
+ return False
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_ROUTER, row.object_uuid):
+ return False
+ elif (row.operation == odl_const.ODL_UPDATE and
+ not _is_valid_update_operation(session, row)):
+ return False
+ return True
+
+
+def validate_subnet_operation(session, row):
+ """Validate the subnet operation based on dependencies.
+
+ Validate subnet operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state. e.g.
+ """
+ if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+ network_id = row.data['network_id']
+ # Check for pending or processing network operations
+ if db.check_for_pending_or_processing_ops(session, network_id):
+ return False
+ if (row.operation == odl_const.ODL_UPDATE and
+ not _is_valid_update_operation(session, row)):
+ return False
+ elif row.operation == odl_const.ODL_DELETE:
+ # Check for any pending or processing create or update
+ # ops on this uuid itself
+ if db.check_for_pending_or_processing_ops(
+ session, row.object_uuid, [odl_const.ODL_UPDATE,
+ odl_const.ODL_CREATE]):
+ return False
+ # Check for dependent operations
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_PORT, row.object_uuid):
+ return False
+
+ return True
+
+
+def validate_port_operation(session, row):
+ """Validate port operation based on dependencies.
+
+ Validate port operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state. e.g.
+ """
+ if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+ network_id = row.data['network_id']
+ # Check for pending or processing network operations
+ ops = db.check_for_pending_or_processing_ops(session, network_id)
+ # Check for pending subnet operations.
+ for fixed_ip in row.data['fixed_ips']:
+ ip_ops = db.check_for_pending_or_processing_ops(
+ session, fixed_ip['subnet_id'])
+ ops = ops or ip_ops
+
+ if ops:
+ return False
+ if (row.operation == odl_const.ODL_UPDATE and
+ not _is_valid_update_operation(session, row)):
+ return False
+ elif row.operation == odl_const.ODL_DELETE:
+ # Check for any pending or processing create or update
+ # ops on this uuid itself
+ if db.check_for_pending_or_processing_ops(
+ session, row.object_uuid, [odl_const.ODL_UPDATE,
+ odl_const.ODL_CREATE]):
+ return False
+
+ return True
+
+
+def validate_router_operation(session, row):
+ """Validate router operation based on dependencies.
+
+ Validate router operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state.
+ """
+ if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+ if row.data['gw_port_id'] is not None:
+ if db.check_for_pending_or_processing_ops(session,
+ row.data['gw_port_id']):
+ return False
+ if (row.operation == odl_const.ODL_UPDATE and
+ not _is_valid_update_operation(session, row)):
+ return False
+ elif row.operation == odl_const.ODL_DELETE:
+ # Check for any pending or processing create or update
+ # operations on this uuid.
+ if db.check_for_pending_or_processing_ops(session, row.object_uuid,
+ [odl_const.ODL_UPDATE,
+ odl_const.ODL_CREATE]):
+ return False
+
+ # Check that dependent port delete operation has completed.
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_PORT, row.object_uuid):
+ return False
+
+ # Check that dependent floatingip delete operation has completed.
+ if db.check_for_pending_delete_ops_with_parent(
+ session, odl_const.ODL_FLOATINGIP, row.object_uuid):
+ return False
+
+ # Check that dependent router interface remove operation has completed.
+ if db.check_for_pending_remove_ops_with_parent(
+ session, row.object_uuid):
+ return False
+
+ return True
+
+
+def validate_floatingip_operation(session, row):
+ """Validate floatingip operation based on dependencies.
+
+ Validate floating IP operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state.
+ """
+ if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+ network_id = row.data.get('floating_network_id')
+ if network_id is not None:
+ if not db.check_for_pending_or_processing_ops(session, network_id):
+ port_id = row.data.get('port_id')
+ if port_id is not None:
+ if db.check_for_pending_or_processing_ops(session,
+ port_id):
+ return False
+ else:
+ return False
+
+ router_id = row.data.get('router_id')
+ if router_id is not None:
+ if db.check_for_pending_or_processing_ops(session, router_id):
+ return False
+ if (row.operation == odl_const.ODL_UPDATE and
+ not _is_valid_update_operation(session, row)):
+ return False
+ elif row.operation == odl_const.ODL_DELETE:
+ # Check for any pending or processing create or update
+ # ops on this uuid itself
+ if db.check_for_pending_or_processing_ops(session, row.object_uuid,
+ [odl_const.ODL_UPDATE,
+ odl_const.ODL_CREATE]):
+ return False
+
+ return True
+
+
+def validate_router_interface_operation(session, row):
+ """Validate router_interface operation based on dependencies.
+
+ Validate router_interface operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state.
+ """
+ if row.operation == odl_const.ODL_ADD:
+ # Verify that router event has been completed.
+ if db.check_for_pending_or_processing_ops(session, row.data['id']):
+ return False
+
+ # TODO(rcurran): Check for port_id?
+ if db.check_for_pending_or_processing_ops(session,
+ row.data['subnet_id']):
+ return False
+ elif row.operation == odl_const.ODL_REMOVE:
+ if db.check_for_pending_or_processing_add(session, row.data['id'],
+ row.data['subnet_id']):
+ return False
+
+ return True
+
+
+def validate_security_group_operation(session, row):
+ """Validate security_group operation based on dependencies.
+
+ Validate security_group operation depending on whether it's dependencies
+ are still in 'pending' or 'processing' state. e.g.
+ """
+ return True
+
+
+def validate_security_group_rule_operation(session, row):
+ """Validate security_group_rule operation based on dependencies.
+
+ Validate security_group_rule operation depending on whether it's
+ dependencies are still in 'pending' or 'processing' state. e.g.
+ """
+ return True
+
+_VALIDATION_MAP = {
+ odl_const.ODL_NETWORK: validate_network_operation,
+ odl_const.ODL_SUBNET: validate_subnet_operation,
+ odl_const.ODL_PORT: validate_port_operation,
+ odl_const.ODL_ROUTER: validate_router_operation,
+ odl_const.ODL_ROUTER_INTF: validate_router_interface_operation,
+ odl_const.ODL_FLOATINGIP: validate_floatingip_operation,
+ odl_const.ODL_SG: validate_security_group_operation,
+ odl_const.ODL_SG_RULE: validate_security_group_rule_operation,
+}
+
+
+def validate(session, row):
+ """Validate resource dependency in journaled operations.
+
+ :param session: db session
+ :param row: entry in journal entry to be validated
+ """
+ return _VALIDATION_MAP[row.object_type](session, row)
+
+
+def register_validator(object_type, validator):
+ """Register validator function for given resource.
+
+ :param object_type: neutron resource type
+ :param validator: function to be registered which validates resource
+ dependencies
+ """
+ assert object_type not in _VALIDATION_MAP
+ _VALIDATION_MAP[object_type] = validator
diff --git a/networking-odl/networking_odl/journal/full_sync.py b/networking-odl/networking_odl/journal/full_sync.py
new file mode 100644
index 0000000..dad7215
--- /dev/null
+++ b/networking-odl/networking_odl/journal/full_sync.py
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import requests
+
+from neutron import context as neutron_context
+from neutron import manager
+from neutron.plugins.common import constants
+from neutron_lib import constants as l3_constants
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+# Define which pending operation types should be deleted
+_CANARY_NETWORK_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
+_CANARY_TENANT_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
+_CANARY_NETWORK_DATA = {'id': _CANARY_NETWORK_ID,
+ 'tenant_id': _CANARY_TENANT_ID,
+ 'name': 'Sync Canary Network',
+ 'admin_state_up': False}
+_OPS_TO_DELETE_ON_SYNC = (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)
+_L2_RESOURCES_TO_SYNC = [(odl_const.ODL_SG, odl_const.ODL_SGS),
+ (odl_const.ODL_SG_RULE, odl_const.ODL_SG_RULES),
+ (odl_const.ODL_NETWORK, odl_const.ODL_NETWORKS),
+ (odl_const.ODL_SUBNET, odl_const.ODL_SUBNETS),
+ (odl_const.ODL_PORT, odl_const.ODL_PORTS)]
+_L3_RESOURCES_TO_SYNC = [(odl_const.ODL_ROUTER, odl_const.ODL_ROUTERS),
+ (odl_const.ODL_FLOATINGIP, odl_const.ODL_FLOATINGIPS)]
+_CLIENT = client.OpenDaylightRestClient.create_client()
+
+
+def full_sync(session):
+ if not _full_sync_needed(session):
+ return
+
+ db.delete_pending_rows(session, _OPS_TO_DELETE_ON_SYNC)
+
+ dbcontext = neutron_context.get_admin_context()
+ plugin = manager.NeutronManager.get_plugin()
+ for resource_type, collection_name in _L2_RESOURCES_TO_SYNC:
+ _sync_resources(session, plugin, dbcontext, resource_type,
+ collection_name)
+
+ l3plugin = manager.NeutronManager.get_service_plugins().get(
+ constants.L3_ROUTER_NAT)
+ for resource_type, collection_name in _L3_RESOURCES_TO_SYNC:
+ _sync_resources(session, l3plugin, dbcontext, resource_type,
+ collection_name)
+ _sync_router_ports(session, plugin, dbcontext)
+
+ db.create_pending_row(session, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID,
+ odl_const.ODL_CREATE, _CANARY_NETWORK_DATA)
+
+
+def _full_sync_needed(session):
+ return (_canary_network_missing_on_odl() and
+ _canary_network_not_in_journal(session))
+
+
+def _canary_network_missing_on_odl():
+ # Try to reach the ODL server, sometimes it might be up & responding to
+ # HTTP calls but inoperative..
+ response = _CLIENT.get(odl_const.ODL_NETWORKS)
+ response.raise_for_status()
+
+ response = _CLIENT.get(odl_const.ODL_NETWORKS + "/" + _CANARY_NETWORK_ID)
+ if response.status_code == requests.codes.not_found:
+ return True
+
+ # In case there was an error raise it up because we don't know how to deal
+ # with it..
+ response.raise_for_status()
+ return False
+
+
+def _canary_network_not_in_journal(session):
+ return not db.check_for_pending_or_processing_ops(session,
+ _CANARY_NETWORK_ID,
+ odl_const.ODL_CREATE)
+
+
+def _sync_resources(session, plugin, dbcontext, object_type, collection_name):
+ obj_getter = getattr(plugin, 'get_%s' % collection_name)
+ resources = obj_getter(dbcontext)
+
+ for resource in resources:
+ db.create_pending_row(session, object_type, resource['id'],
+ odl_const.ODL_CREATE, resource)
+
+
+def _sync_router_ports(session, plugin, dbcontext):
+ filters = {'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_INTF]}
+ router_ports = plugin.get_ports(dbcontext, filters=filters)
+ for port in router_ports:
+ resource = {'subnet_id': port['fixed_ips'][0]['subnet_id'],
+ 'port_id': port['id'],
+ 'id': port['device_id'],
+ 'tenant_id': port['tenant_id']}
+ db.create_pending_row(session, odl_const.ODL_ROUTER_INTF, port['id'],
+ odl_const.ODL_ADD, resource)
diff --git a/networking-odl/networking_odl/journal/journal.py b/networking-odl/networking_odl/journal/journal.py
new file mode 100644
index 0000000..ca0d2c2
--- /dev/null
+++ b/networking-odl/networking_odl/journal/journal.py
@@ -0,0 +1,220 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import threading
+
+from requests import exceptions
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron import context as neutron_context
+from neutron.db import api as neutron_db_api
+from neutron import manager
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl._i18n import _LI, _LE
+from networking_odl.db import db
+from networking_odl.journal import dependency_validations
+
+
+LOG = logging.getLogger(__name__)
+
+
+def call_thread_on_end(func):
+ def new_func(obj, *args, **kwargs):
+ return_value = func(obj, *args, **kwargs)
+ obj.journal.set_sync_event()
+ return return_value
+ return new_func
+
+
+def _enrich_port(db_session, context, object_type, operation, data):
+ """Enrich the port with additional information needed by ODL"""
+ if context:
+ plugin = context._plugin
+ dbcontext = context._plugin_context
+ else:
+ dbcontext = neutron_context.get_admin_context()
+ plugin = manager.NeutronManager.get_plugin()
+
+ groups = [plugin.get_security_group(dbcontext, sg)
+ for sg in data['security_groups']]
+ new_data = copy.deepcopy(data)
+ new_data['security_groups'] = groups
+
+ # NOTE(yamahata): work around for port creation for router
+ # tenant_id=''(empty string) is passed when port is created
+ # by l3 plugin internally for router.
+ # On the other hand, ODL doesn't accept empty string for tenant_id.
+ # In that case, deduce tenant_id from network_id for now.
+ # Right fix: modify Neutron so that don't allow empty string
+ # for tenant_id even for port for internal use.
+ # TODO(yamahata): eliminate this work around when neutron side
+ # is fixed
+ # assert port['tenant_id'] != ''
+ if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
+ if context:
+ tenant_id = context._network_context._network['tenant_id']
+ else:
+ network = plugin.get_network(dbcontext, new_data['network_id'])
+ tenant_id = network['tenant_id']
+ new_data['tenant_id'] = tenant_id
+
+ return new_data
+
+
+def record(db_session, object_type, object_uuid, operation, data,
+ context=None):
+ if (object_type == odl_const.ODL_PORT and
+ operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
+ data = _enrich_port(db_session, context, object_type, operation, data)
+
+ db.create_pending_row(db_session, object_type, object_uuid, operation,
+ data)
+
+
+class OpendaylightJournalThread(object):
+ """Thread worker for the Opendaylight Journal Database."""
+ def __init__(self):
+ self.client = client.OpenDaylightRestClient.create_client()
+ self._odl_sync_timeout = cfg.CONF.ml2_odl.sync_timeout
+ self._row_retry_count = cfg.CONF.ml2_odl.retry_count
+ self.event = threading.Event()
+ self.lock = threading.Lock()
+ self._odl_sync_thread = self.start_odl_sync_thread()
+ self._start_sync_timer()
+
+ def start_odl_sync_thread(self):
+ # Start the sync thread
+ LOG.debug("Starting a new sync thread")
+ odl_sync_thread = threading.Thread(
+ name='sync',
+ target=self.run_sync_thread)
+ odl_sync_thread.start()
+ return odl_sync_thread
+
+ def set_sync_event(self):
+ # Prevent race when starting the timer
+ with self.lock:
+ LOG.debug("Resetting thread timer")
+ self._timer.cancel()
+ self._start_sync_timer()
+ self.event.set()
+
+ def _start_sync_timer(self):
+ self._timer = threading.Timer(self._odl_sync_timeout,
+ self.set_sync_event)
+ self._timer.start()
+
+ def _json_data(self, row):
+ data = copy.deepcopy(row.data)
+ filters.filter_for_odl(row.object_type, row.operation, data)
+ url_object = row.object_type.replace('_', '-')
+
+ if row.operation == odl_const.ODL_CREATE:
+ method = 'post'
+ urlpath = url_object + 's'
+ to_send = {row.object_type: data}
+ elif row.operation == odl_const.ODL_UPDATE:
+ method = 'put'
+ urlpath = url_object + 's/' + row.object_uuid
+ to_send = {row.object_type: data}
+ elif row.operation == odl_const.ODL_DELETE:
+ method = 'delete'
+ urlpath = url_object + 's/' + row.object_uuid
+ to_send = None
+ elif row.operation == odl_const.ODL_ADD:
+ method = 'put'
+ urlpath = 'routers/' + data['id'] + '/add_router_interface'
+ to_send = data
+ elif row.operation == odl_const.ODL_REMOVE:
+ method = 'put'
+ urlpath = 'routers/' + data['id'] + '/remove_router_interface'
+ to_send = data
+
+ return method, urlpath, to_send
+
+ def run_sync_thread(self, exit_after_run=False):
+ while True:
+ try:
+ self.event.wait()
+ self.event.clear()
+
+ session = neutron_db_api.get_session()
+ self._sync_pending_rows(session, exit_after_run)
+
+ LOG.debug("Clearing sync thread event")
+ if exit_after_run:
+ # Permanently waiting thread model breaks unit tests
+ # Adding this arg to exit here only for unit tests
+ break
+ except Exception:
+ # Catch exceptions to protect the thread while running
+ LOG.exception(_LE("Error on run_sync_thread"))
+
+ def _sync_pending_rows(self, session, exit_after_run):
+ while True:
+ LOG.debug("Thread walking database")
+ row = db.get_oldest_pending_db_row_with_lock(session)
+ if not row:
+ LOG.debug("No rows to sync")
+ break
+
+ # Validate the operation
+ valid = dependency_validations.validate(session, row)
+ if not valid:
+ LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a "
+ "valid operation yet, skipping for now"),
+ {'operation': row.operation,
+ 'type': row.object_type,
+ 'uuid': row.object_uuid})
+
+ # Set row back to pending.
+ db.update_db_row_state(session, row, odl_const.PENDING)
+ if exit_after_run:
+ break
+ continue
+
+ LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"),
+ {'operation': row.operation, 'type': row.object_type,
+ 'uuid': row.object_uuid})
+
+ # Add code to sync this to ODL
+ method, urlpath, to_send = self._json_data(row)
+
+ try:
+ self.client.sendjson(method, urlpath, to_send)
+ db.update_db_row_state(session, row, odl_const.COMPLETED)
+ except exceptions.ConnectionError as e:
+ # Don't raise the retry count, just log an error
+ LOG.error(_LE("Cannot connect to the Opendaylight Controller"))
+ # Set row back to pending
+ db.update_db_row_state(session, row, odl_const.PENDING)
+ # Break our of the loop and retry with the next
+ # timer interval
+ break
+ except Exception as e:
+ LOG.error(_LE("Error syncing %(type)s %(operation)s,"
+ " id %(uuid)s Error: %(error)s"),
+ {'type': row.object_type,
+ 'uuid': row.object_uuid,
+ 'operation': row.operation,
+ 'error': e.message})
+ db.update_pending_db_row_retry(session, row,
+ self._row_retry_count)
diff --git a/networking-odl/networking_odl/journal/maintenance.py b/networking-odl/networking_odl/journal/maintenance.py
new file mode 100644
index 0000000..7fb82a0
--- /dev/null
+++ b/networking-odl/networking_odl/journal/maintenance.py
@@ -0,0 +1,73 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from neutron.db import api as neutron_db_api
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_service import loopingcall
+
+from networking_odl._i18n import _LI, _LE
+from networking_odl.db import db
+
+
+LOG = logging.getLogger(__name__)
+
+
+class MaintenanceThread(object):
+ def __init__(self):
+ self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops)
+ self.maintenance_interval = cfg.CONF.ml2_odl.maintenance_interval
+ self.maintenance_ops = []
+
+ def start(self):
+ self.timer.start(self.maintenance_interval, stop_on_exception=False)
+
+ def _execute_op(self, operation, session):
+ op_details = operation.__name__
+ if operation.__doc__:
+ op_details += " (%s)" % operation.func_doc
+
+ try:
+ LOG.info(_LI("Starting maintenance operation %s."), op_details)
+ db.update_maintenance_operation(session, operation=operation)
+ operation(session=session)
+ LOG.info(_LI("Finished maintenance operation %s."), op_details)
+ except Exception:
+ LOG.exception(_LE("Failed during maintenance operation %s."),
+ op_details)
+
+ def execute_ops(self):
+ LOG.info(_LI("Starting journal maintenance run."))
+ session = neutron_db_api.get_session()
+ if not db.lock_maintenance(session):
+ LOG.info(_LI("Maintenance already running, aborting."))
+ return
+
+ try:
+ for operation in self.maintenance_ops:
+ self._execute_op(operation, session)
+ finally:
+ db.update_maintenance_operation(session, operation=None)
+ db.unlock_maintenance(session)
+ LOG.info(_LI("Finished journal maintenance run."))
+
+ def register_operation(self, f):
+ """Register a function to be run by the maintenance thread.
+
+ :param f: Function to call when the thread runs. The function will
+ receive a DB session to use for DB operations.
+ """
+ self.maintenance_ops.append(f)
diff --git a/networking-odl/networking_odl/l2gateway/__init__.py b/networking-odl/networking_odl/l2gateway/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/l2gateway/__init__.py
diff --git a/networking-odl/networking_odl/l2gateway/driver.py b/networking-odl/networking_odl/l2gateway/driver.py
new file mode 100644
index 0000000..d1fd5bb
--- /dev/null
+++ b/networking-odl/networking_odl/l2gateway/driver.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2016 Ericsson India Global Service Pvt Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import copy
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import six
+
+from networking_l2gw.services.l2gateway.common import constants
+from networking_l2gw.services.l2gateway import service_drivers
+from networking_odl._i18n import _LE, _LI
+from networking_odl.common import client as odl_client
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+LOG = logging.getLogger(__name__)
+
+L2GATEWAYS = 'l2-gateways'
+L2GATEWAY_CONNECTIONS = 'l2gateway-connections'
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpenDaylightL2gwDriver(service_drivers.L2gwDriver):
+ """Opendaylight L2Gateway Service Driver
+
+ This code is the openstack driver for exciting the OpenDaylight L2GW
+ facility.
+ """
+
+ def __init__(self, service_plugin, validator=None):
+ super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator)
+ self.service_plugin = service_plugin
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+ LOG.info(_LI("ODL: Started OpenDaylight L2Gateway driver"))
+
+ @property
+ def service_type(self):
+ return constants.L2GW
+
+ def create_l2_gateway_postcommit(self, context, l2_gateway):
+ LOG.info(_LI("ODL: Create L2Gateway %(l2gateway)s"),
+ {'l2gateway': l2_gateway})
+ request = {'l2_gateway': l2_gateway}
+ try:
+ self.client.sendjson('post', L2GATEWAYS, request)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("ODL: L2Gateway create"
+ " failed for gateway %(l2gatewayid)s"),
+ {'l2gatewayid': l2_gateway['id']})
+
+ def delete_l2_gateway_postcommit(self, context, l2_gateway_id):
+ LOG.info(_LI("ODL: Delete L2Gateway %(l2gatewayid)s"),
+ {'l2gatewayid': l2_gateway_id})
+ url = L2GATEWAYS + '/' + l2_gateway_id
+ try:
+ self.client.try_delete(url)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("ODL: L2Gateway delete"
+ " failed for gateway_id %(l2gatewayid)s"),
+ {'l2gatewayid': l2_gateway_id})
+
+ def update_l2_gateway_postcommit(self, context, l2_gateway):
+ LOG.info(_LI("ODL: Update L2Gateway %(l2gateway)s"),
+ {'l2gateway': l2_gateway})
+ request = {'l2_gateway': l2_gateway}
+ url = L2GATEWAYS + '/' + l2_gateway['id']
+ try:
+ self.client.sendjson('put', url, request)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("ODL: L2Gateway update"
+ " failed for gateway %(l2gatewayid)s"),
+ {'l2gatewayid': l2_gateway['id']})
+
+ def create_l2_gateway_connection_postcommit(self, context,
+ l2_gateway_connection):
+ LOG.info(_LI("ODL: Create L2Gateway connection %(l2gwconn)s"),
+ {'l2gwconn': l2_gateway_connection})
+ odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection)
+ odl_l2_gateway_connection['gateway_id'] = (
+ l2_gateway_connection['l2_gateway_id'])
+ odl_l2_gateway_connection.pop('l2_gateway_id')
+ request = {'l2gateway_connection': odl_l2_gateway_connection}
+ try:
+ self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("ODL: L2Gateway connection create"
+ " failed for gateway %(l2gwconnid)s"),
+ {'l2gwconnid':
+ l2_gateway_connection['l2_gateway_id']})
+
+ def delete_l2_gateway_connection_postcommit(self, context,
+ l2_gateway_connection_id):
+ LOG.info(_LI("ODL: Delete L2Gateway connection %(l2gwconnid)s"),
+ {'l2gwconnid': l2_gateway_connection_id})
+ url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id
+ try:
+ self.client.try_delete(url)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("ODL: L2Gateway connection delete"
+ " failed for connection %(l2gwconnid)s"),
+ {'l2gwconnid': l2_gateway_connection_id})
diff --git a/networking-odl/networking_odl/l3/__init__.py b/networking-odl/networking_odl/l3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/l3/__init__.py
diff --git a/networking-odl/networking_odl/l3/l3_odl.py b/networking-odl/networking_odl/l3/l3_odl.py
new file mode 100644
index 0000000..e06e335
--- /dev/null
+++ b/networking-odl/networking_odl/l3/l3_odl.py
@@ -0,0 +1,189 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
+from neutron.api.rpc.handlers import l3_rpc
+from neutron.common import rpc as n_rpc
+from neutron.common import topics
+from neutron.db import extraroute_db
+from neutron.db import l3_agentschedulers_db
+from neutron.db import l3_dvr_db
+from neutron.db import l3_gwmode_db
+from neutron.plugins.common import constants
+from neutron_lib import constants as q_const
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import utils as odl_utils
+
+try:
+ from neutron.db.db_base_plugin_v2 import common_db_mixin
+except ImportError as e:
+ # the change set ofece8cc2e9aae1610a325d0c206e38da3da9a0a1a
+ # the Change-Id of I1eac61c258541bca80e14be4b7c75519a014ffae
+ # db_base_plugin_v2.common_db_mixin was removed
+ from neutron.db import common_db_mixin
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+ROUTERS = 'routers'
+FLOATINGIPS = 'floatingips'
+
+
+class OpenDaylightL3RouterPlugin(
+ common_db_mixin.CommonDbMixin,
+ extraroute_db.ExtraRoute_db_mixin,
+ l3_dvr_db.L3_NAT_with_dvr_db_mixin,
+ l3_gwmode_db.L3_NAT_db_mixin,
+ l3_agentschedulers_db.L3AgentSchedulerDbMixin):
+
+ """Implementation of the OpenDaylight L3 Router Service Plugin.
+
+ This class implements a L3 service plugin that provides
+ router and floatingip resources and manages associated
+ request/response.
+ """
+ supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
+ "extraroute"]
+
+ def __init__(self):
+ self.setup_rpc()
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+
+ def setup_rpc(self):
+ self.topic = topics.L3PLUGIN
+ self.conn = n_rpc.create_connection()
+ self.agent_notifiers.update(
+ {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
+ self.endpoints = [l3_rpc.L3RpcCallback()]
+ self.conn.create_consumer(self.topic, self.endpoints,
+ fanout=False)
+ self.conn.consume_in_threads()
+
+ def get_plugin_type(self):
+ return constants.L3_ROUTER_NAT
+
+ def get_plugin_description(self):
+ """returns string description of the plugin."""
+ return ("L3 Router Service Plugin for basic L3 forwarding"
+ " using OpenDaylight")
+
+ def filter_update_router_attributes(self, router):
+ """Filter out router attributes for an update operation."""
+ odl_utils.try_del(router, ['id', 'tenant_id', 'status'])
+
+ def create_router(self, context, router):
+ router_dict = super(OpenDaylightL3RouterPlugin, self).create_router(
+ context, router)
+ url = ROUTERS
+ self.client.sendjson('post', url, {ROUTERS[:-1]: router_dict})
+ return router_dict
+
+ def update_router(self, context, id, router):
+ router_dict = super(OpenDaylightL3RouterPlugin, self).update_router(
+ context, id, router)
+ url = ROUTERS + "/" + id
+ resource = router_dict.copy()
+ self.filter_update_router_attributes(resource)
+ self.client.sendjson('put', url, {ROUTERS[:-1]: resource})
+ return router_dict
+
+ def delete_router(self, context, id):
+ super(OpenDaylightL3RouterPlugin, self).delete_router(context, id)
+ url = ROUTERS + "/" + id
+ self.client.sendjson('delete', url, None)
+
+ def create_floatingip(self, context, floatingip,
+ initial_status=q_const.FLOATINGIP_STATUS_ACTIVE):
+ fip_dict = super(OpenDaylightL3RouterPlugin, self).create_floatingip(
+ context, floatingip, initial_status)
+ url = FLOATINGIPS
+ self.client.sendjson('post', url, {FLOATINGIPS[:-1]: fip_dict})
+ return fip_dict
+
+ def update_floatingip(self, context, id, floatingip):
+ with context.session.begin(subtransactions=True):
+ fip_dict = super(OpenDaylightL3RouterPlugin,
+ self).update_floatingip(context, id, floatingip)
+ # Update status based on association
+ if fip_dict.get('port_id') is None:
+ fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
+ else:
+ fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE
+ self.update_floatingip_status(context, id, fip_dict['status'])
+
+ url = FLOATINGIPS + "/" + id
+ self.client.sendjson('put', url, {FLOATINGIPS[:-1]: fip_dict})
+ return fip_dict
+
+ def delete_floatingip(self, context, id):
+ super(OpenDaylightL3RouterPlugin, self).delete_floatingip(context, id)
+ url = FLOATINGIPS + "/" + id
+ self.client.sendjson('delete', url, None)
+
+ def add_router_interface(self, context, router_id, interface_info):
+ new_router = super(
+ OpenDaylightL3RouterPlugin, self).add_router_interface(
+ context, router_id, interface_info)
+ url = ROUTERS + "/" + router_id + "/add_router_interface"
+ router_dict = self._generate_router_dict(router_id, interface_info,
+ new_router)
+ self.client.sendjson('put', url, router_dict)
+ return new_router
+
+ def remove_router_interface(self, context, router_id, interface_info):
+ new_router = super(
+ OpenDaylightL3RouterPlugin, self).remove_router_interface(
+ context, router_id, interface_info)
+ url = ROUTERS + "/" + router_id + "/remove_router_interface"
+ router_dict = self._generate_router_dict(router_id, interface_info,
+ new_router)
+ self.client.sendjson('put', url, router_dict)
+ return new_router
+
+ def _generate_router_dict(self, router_id, interface_info, new_router):
+ # Get network info for the subnet that is being added to the router.
+ # Check if the interface information is by port-id or subnet-id
+ add_by_port, add_by_sub = self._validate_interface_info(interface_info)
+ if add_by_sub:
+ _port_id = new_router['port_id']
+ _subnet_id = interface_info['subnet_id']
+ elif add_by_port:
+ _port_id = interface_info['port_id']
+ _subnet_id = new_router['subnet_id']
+
+ router_dict = {'subnet_id': _subnet_id,
+ 'port_id': _port_id,
+ 'id': router_id,
+ 'tenant_id': new_router['tenant_id']}
+
+ return router_dict
+
+ dvr_deletens_if_no_port_warned = False
+
+ def dvr_deletens_if_no_port(self, context, port_id):
+ # TODO(yamahata): implement this method or delete this logging
+ # For now, this is defined to avoid attribute exception
+ # Since ODL L3 does not create namespaces, this is always going to
+ # be a noop. When it is confirmed, delete this comment and logging
+ if not self.dvr_deletens_if_no_port_warned:
+ LOG.debug('dvr is not suported yet. '
+ 'this method needs to be implemented')
+ self.dvr_deletens_if_no_port_warned = True
+ return []
diff --git a/networking-odl/networking_odl/l3/l3_odl_v2.py b/networking-odl/networking_odl/l3/l3_odl_v2.py
new file mode 100644
index 0000000..2732ea6
--- /dev/null
+++ b/networking-odl/networking_odl/l3/l3_odl_v2.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_log import log as logging
+
+from neutron.db import api as db_api
+from neutron.db import common_db_mixin
+from neutron.db import extraroute_db
+from neutron.db import l3_agentschedulers_db
+from neutron.db import l3_dvr_db
+from neutron.db import l3_gwmode_db
+from neutron.plugins.common import constants
+from neutron_lib import constants as q_const
+
+from networking_odl.common import config # noqa
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.journal import journal
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightL3RouterPlugin(
+ common_db_mixin.CommonDbMixin,
+ extraroute_db.ExtraRoute_db_mixin,
+ l3_dvr_db.L3_NAT_with_dvr_db_mixin,
+ l3_gwmode_db.L3_NAT_db_mixin,
+ l3_agentschedulers_db.L3AgentSchedulerDbMixin):
+
+ """Implementation of the OpenDaylight L3 Router Service Plugin.
+
+ This class implements a L3 service plugin that provides
+ router and floatingip resources and manages associated
+ request/response.
+ """
+ supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
+ "extraroute"]
+
+ def __init__(self):
+ super(OpenDaylightL3RouterPlugin, self).__init__()
+
+ # TODO(rcurran): Continue investigation into how many journal threads
+ # to run per neutron controller deployment.
+ self.journal = journal.OpendaylightJournalThread()
+
+ def get_plugin_type(self):
+ return constants.L3_ROUTER_NAT
+
+ def get_plugin_description(self):
+ """Returns string description of the plugin."""
+ return ("L3 Router Service Plugin for basic L3 forwarding "
+ "using OpenDaylight.")
+
+ @journal.call_thread_on_end
+ def create_router(self, context, router):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ router_dict = super(
+ OpenDaylightL3RouterPlugin, self).create_router(context,
+ router)
+ db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+ router_dict['id'], odl_const.ODL_CREATE,
+ router_dict)
+ return router_dict
+
+ @journal.call_thread_on_end
+ def update_router(self, context, router_id, router):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ router_dict = super(
+ OpenDaylightL3RouterPlugin, self).update_router(
+ context, router_id, router)
+ db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+ router_id, odl_const.ODL_UPDATE, router_dict)
+ return router_dict
+
+ @journal.call_thread_on_end
+ def delete_router(self, context, router_id):
+ session = db_api.get_session()
+ router_dict = self.get_router(context, router_id)
+ dependency_list = [router_dict['gw_port_id']]
+ with session.begin(subtransactions=True):
+ super(OpenDaylightL3RouterPlugin, self).delete_router(context,
+ router_id)
+ db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+ router_id, odl_const.ODL_DELETE,
+ dependency_list)
+
+ @journal.call_thread_on_end
+ def create_floatingip(self, context, floatingip,
+ initial_status=q_const.FLOATINGIP_STATUS_ACTIVE):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ fip_dict = super(
+ OpenDaylightL3RouterPlugin, self).create_floatingip(
+ context, floatingip, initial_status)
+ db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+ fip_dict['id'], odl_const.ODL_CREATE,
+ fip_dict)
+ return fip_dict
+
+ @journal.call_thread_on_end
+ def update_floatingip(self, context, floatingip_id, floatingip):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ fip_dict = super(
+ OpenDaylightL3RouterPlugin, self).update_floatingip(
+ context, floatingip_id, floatingip)
+
+ # Update status based on association
+ if fip_dict.get('port_id') is None:
+ fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
+ else:
+ fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE
+ self.update_floatingip_status(context, floatingip_id,
+ fip_dict['status'])
+
+ db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+ floatingip_id, odl_const.ODL_UPDATE,
+ fip_dict)
+ return fip_dict
+
+ @journal.call_thread_on_end
+ def delete_floatingip(self, context, floatingip_id):
+ session = db_api.get_session()
+ floatingip_dict = self.get_floatingip(context, floatingip_id)
+ dependency_list = [floatingip_dict['router_id']]
+ dependency_list.append(floatingip_dict['floating_network_id'])
+ with session.begin(subtransactions=True):
+ super(OpenDaylightL3RouterPlugin, self).delete_floatingip(
+ context, floatingip_id)
+ db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+ floatingip_id, odl_const.ODL_DELETE,
+ dependency_list)
+
+ @journal.call_thread_on_end
+ def add_router_interface(self, context, router_id, interface_info):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ new_router = super(
+ OpenDaylightL3RouterPlugin, self).add_router_interface(
+ context, router_id, interface_info)
+ router_dict = self._generate_router_dict(router_id, interface_info,
+ new_router)
+ db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED,
+ odl_const.ODL_ADD, router_dict)
+ return new_router
+
+ @journal.call_thread_on_end
+ def remove_router_interface(self, context, router_id, interface_info):
+ session = db_api.get_session()
+ with session.begin(subtransactions=True):
+ new_router = super(
+ OpenDaylightL3RouterPlugin, self).remove_router_interface(
+ context, router_id, interface_info)
+ router_dict = self._generate_router_dict(router_id, interface_info,
+ new_router)
+ db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED,
+ odl_const.ODL_REMOVE, router_dict)
+ return new_router
+
+ def _generate_router_dict(self, router_id, interface_info, new_router):
+ # Get network info for the subnet that is being added to the router.
+ # Check if the interface information is by port-id or subnet-id.
+ add_by_port, add_by_sub = self._validate_interface_info(interface_info)
+ if add_by_sub:
+ _port_id = new_router['port_id']
+ _subnet_id = interface_info['subnet_id']
+ elif add_by_port:
+ _port_id = interface_info['port_id']
+ _subnet_id = new_router['subnet_id']
+
+ router_dict = {'subnet_id': _subnet_id,
+ 'port_id': _port_id,
+ 'id': router_id,
+ 'tenant_id': new_router['tenant_id']}
+
+ return router_dict
+
+ dvr_deletens_if_no_port_warned = False
+
+ def dvr_deletens_if_no_port(self, context, port_id):
+ # TODO(yamahata): implement this method or delete this logging
+ # For now, this is defined to avoid attribute exception
+ # Since ODL L3 does not create namespaces, this is always going to
+ # be a noop. When it is confirmed, delete this comment and logging
+ if not self.dvr_deletens_if_no_port_warned:
+ LOG.debug('dvr is not suported yet. '
+ 'this method needs to be implemented')
+ self.dvr_deletens_if_no_port_warned = True
+ return []
diff --git a/networking-odl/networking_odl/lbaas/__init__.py b/networking-odl/networking_odl/lbaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/lbaas/__init__.py
diff --git a/networking-odl/networking_odl/lbaas/driver_v1.py b/networking-odl/networking_odl/lbaas/driver_v1.py
new file mode 100644
index 0000000..aaf3dcf
--- /dev/null
+++ b/networking-odl/networking_odl/lbaas/driver_v1.py
@@ -0,0 +1,125 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+LBAAS = "lbaas"
+POOLS_URL_PATH = LBAAS + '/' + odl_const.ODL_POOLS
+HEALTHMONITORS_URL_PATH = LBAAS + '/' + odl_const.ODL_HEALTHMONITORS
+
+
+class OpenDaylightLbaasDriverV1(abstract_driver.LoadBalancerAbstractDriver):
+
+ """OpenDaylight LBaaS Driver for the V1 API
+
+ This code is the backend implementation for the OpenDaylight
+ LBaaS V1 driver for OpenStack Neutron.
+ """
+
+ def __init__(self, plugin):
+ LOG.debug("Initializing OpenDaylight LBaaS driver")
+ self.plugin = plugin
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+
+ def create_vip(self, context, vip):
+ """Create a vip on the OpenDaylight Controller.
+
+ No code related to vip in the OpenDayLight neutronNorthbound,
+ so pass this method.
+ """
+ pass
+
+ def update_vip(self, context, old_vip, vip):
+ """Update a vip on the OpenDaylight Controller.
+
+ No code related to vip in the OpenDayLight neutronNorthbound,
+ so pass this method.
+ """
+ pass
+
+ def delete_vip(self, context, vip):
+ """Delete a vip on the OpenDaylight Controller.
+
+ No code related to vip in the OpenDayLight neutronNorthbound,
+ so pass this method.
+ """
+ pass
+
+ def create_pool(self, context, pool):
+ """Create a pool on the OpenDaylight Controller."""
+ url = POOLS_URL_PATH
+ self.client.sendjson('post', url, {odl_const.ODL_POOL: pool})
+
+ def update_pool(self, context, old_pool, pool):
+ """Update a pool on the OpenDaylight Controller."""
+ url = POOLS_URL_PATH + "/" + old_pool['id']
+ self.client.sendjson('put', url, {odl_const.ODL_POOL: pool})
+
+ def delete_pool(self, context, pool):
+ """Delete a pool on the OpenDaylight Controller."""
+ url = POOLS_URL_PATH + "/" + pool['id']
+ self.client.sendjson('delete', url, None)
+
+ def create_member(self, context, member):
+ """Create a pool member on the OpenDaylight Controller."""
+ url = (
+ POOLS_URL_PATH + '/' + member['pool_id'] +
+ '/' + odl_const.ODL_MEMBERS)
+ self.client.sendjson('post', url, {odl_const.ODL_MEMBER: member})
+
+ def update_member(self, context, old_member, member):
+ """Update a pool member on the OpenDaylight Controller."""
+ url = (
+ POOLS_URL_PATH + '/' + member['pool_id'] +
+ '/' + odl_const.ODL_MEMBERS + "/" + old_member['id'])
+ self.client.sendjson('put', url, {odl_const.ODL_MEMBER: member})
+
+ def delete_member(self, context, member):
+ """Delete a pool member on the OpenDaylight Controller."""
+ url = (
+ POOLS_URL_PATH + '/' + member['pool_id'] +
+ '/' + odl_const.ODL_MEMBERS + "/" + member['id'])
+ self.client.sendjson('delete', url, None)
+
+ def create_pool_health_monitor(self, context, health_monitor, pool_id):
+ """Create a pool health monitor on the OpenDaylight Controller."""
+ url = HEALTHMONITORS_URL_PATH
+ self.client.sendjson(
+ 'post', url, {odl_const.ODL_HEALTHMONITOR: health_monitor})
+
+ def update_pool_health_monitor(self, context, old_health_monitor,
+ health_monitor, pool_id):
+ """Update a pool health monitor on the OpenDaylight Controller."""
+ url = HEALTHMONITORS_URL_PATH + "/" + old_health_monitor['id']
+ self.client.sendjson(
+ 'put', url, {odl_const.ODL_HEALTHMONITOR: health_monitor})
+
+ def delete_pool_health_monitor(self, context, health_monitor, pool_id):
+ """Delete a pool health monitor on the OpenDaylight Controller."""
+ url = HEALTHMONITORS_URL_PATH + "/" + health_monitor['id']
+ self.client.sendjson('delete', url, None)
+
+ def stats(self, context, pool_id):
+ """Retrieve pool statistics from the OpenDaylight Controller."""
+ pass
diff --git a/networking-odl/networking_odl/lbaas/driver_v2.py b/networking-odl/networking_odl/lbaas/driver_v2.py
new file mode 100644
index 0000000..720a5c2
--- /dev/null
+++ b/networking-odl/networking_odl/lbaas/driver_v2.py
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import helpers as log_helpers
+from oslo_log import log as logging
+
+from neutron_lbaas.drivers import driver_base
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+
+LBAAS = "lbaas"
+
+
+class OpenDaylightLbaasDriverV2(driver_base.LoadBalancerBaseDriver):
+
+ @log_helpers.log_method_call
+ def __init__(self, plugin):
+ LOG.debug("Initializing OpenDaylight LBaaS driver")
+ self.plugin = plugin
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+ self._loadbalancer = ODLLoadBalancerManager(self.client)
+ self._listener = ODLListenerManager(self.client)
+ self._pool = ODLPoolManager(self.client)
+ self._member = ODLMemberManager(self.client)
+ self._healthmonitor = ODLHealthMonitorManager(self.client)
+
+
+class OpenDaylightManager(object):
+
+ out_of_sync = True
+ url_path = ""
+ obj_type = ""
+
+ """OpenDaylight LBaaS Driver for the V2 API
+
+ This code is the backend implementation for the OpenDaylight
+ LBaaS V2 driver for OpenStack Neutron.
+ """
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.client = client
+ self.url_path = LBAAS + self.obj_type
+
+ @log_helpers.log_method_call
+ def create(self, context, obj):
+ self.client.sendjson('post', self.url_path, None)
+
+ @log_helpers.log_method_call
+ def update(self, context, obj):
+ self.client.sendjson('put', self.url_path + '/' + obj.id, None)
+
+ @log_helpers.log_method_call
+ def delete(self, context, obj):
+ self.client.sendjson('delete', self.url_path + '/' + obj.id, None)
+
+
+class ODLLoadBalancerManager(OpenDaylightManager,
+ driver_base.BaseLoadBalancerManager):
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.obj_type = odl_const.ODL_LOADBALANCERS
+ super(ODLLoadBalancerManager, self).__init__(client)
+
+ @log_helpers.log_method_call
+ def refresh(self, context, lb):
+ pass
+
+ @log_helpers.log_method_call
+ def stats(self, context, lb):
+ pass
+
+
+class ODLListenerManager(OpenDaylightManager,
+ driver_base.BaseListenerManager):
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.obj_type = odl_const.ODL_LISTENERS
+ super(ODLListenerManager, self).__init__(client)
+
+
+class ODLPoolManager(OpenDaylightManager,
+ driver_base.BasePoolManager):
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.obj_type = odl_const.ODL_POOLS
+ super(ODLPoolManager, self).__init__(client)
+
+
+class ODLMemberManager(OpenDaylightManager,
+ driver_base.BaseMemberManager):
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.obj_type = odl_const.ODL_MEMBERS
+ super(ODLMemberManager, self).__init__(client)
+
+
+class ODLHealthMonitorManager(OpenDaylightManager,
+ driver_base.BaseHealthMonitorManager):
+
+ @log_helpers.log_method_call
+ def __init__(self, client):
+ self.obj_type = odl_const.ODL_HEALTHMONITORS
+ super(ODLHealthMonitorManager, self).__init__(client)
diff --git a/networking-odl/networking_odl/ml2/README.odl b/networking-odl/networking_odl/ml2/README.odl
new file mode 100644
index 0000000..eef8d44
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/README.odl
@@ -0,0 +1,41 @@
+OpenDaylight ML2 MechanismDriver
+================================
+OpenDaylight is an Open Source SDN Controller developed by a plethora of
+companies and hosted by the Linux Foundation. The OpenDaylight website
+contains more information on the capabilities OpenDaylight provides:
+
+ http://www.opendaylight.org
+
+Theory of operation
+===================
+The OpenStack Neutron integration with OpenDaylight consists of the ML2
+MechanismDriver which acts as a REST proxy and passess all Neutron API
+calls into OpenDaylight. OpenDaylight contains a NB REST service (called
+the NeutronAPIService) which caches data from these proxied API calls and
+makes it available to other services inside of OpenDaylight. One current
+user of the SB side of the NeutronAPIService is the OVSDB code in
+OpenDaylight. OVSDB uses the neutron information to isolate tenant networks
+using GRE or VXLAN tunnels.
+
+How to use the OpenDaylight ML2 MechanismDriver
+===============================================
+To use the ML2 MechanismDriver, you need to ensure you have it configured
+as one of the "mechanism_drivers" in ML2:
+
+ mechanism_drivers=opendaylight
+
+The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini
+file or in a separate ml2_conf_odl.ini file. An example is shown below:
+
+ [ml2_odl]
+ password = admin
+ username = admin
+ url = http://192.168.100.1:8080/controller/nb/v2/neutron
+
+When starting OpenDaylight, ensure you have the SimpleForwarding application
+disabled or remove the .jar file from the plugins directory. Also ensure you
+start OpenDaylight before you start OpenStack Neutron.
+
+There is devstack support for this which will automatically pull down OpenDaylight
+and start it as part of devstack as well. The patch for this will likely merge
+around the same time as this patch merges.
diff --git a/networking-odl/networking_odl/ml2/__init__.py b/networking-odl/networking_odl/ml2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/__init__.py
diff --git a/networking-odl/networking_odl/ml2/legacy_port_binding.py b/networking-odl/networking_odl/ml2/legacy_port_binding.py
new file mode 100644
index 0000000..7b9b918
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/legacy_port_binding.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo_log import log
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron_lib import constants as n_const
+
+from networking_odl.ml2 import port_binding
+
+
+LOG = log.getLogger(__name__)
+
+
+class LegacyPortBindingManager(port_binding.PortBindingController):
+
+ def __init__(self):
+ self.vif_details = {portbindings.CAP_PORT_FILTER: True}
+ self.supported_vnic_types = [portbindings.VNIC_NORMAL]
+
+ def bind_port(self, port_context):
+ """Set binding for all valid segments
+
+ """
+ vnic_type = port_context.current.get(portbindings.VNIC_TYPE,
+ portbindings.VNIC_NORMAL)
+ if vnic_type not in self.supported_vnic_types:
+ LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
+ vnic_type)
+ return
+
+ valid_segment = None
+ for segment in port_context.segments_to_bind:
+ if self._check_segment(segment):
+ valid_segment = segment
+ break
+
+ if valid_segment:
+ vif_type = self._get_vif_type(port_context)
+ LOG.debug("Bind port %(port)s on network %(network)s with valid "
+ "segment %(segment)s and VIF type %(vif_type)r.",
+ {'port': port_context.current['id'],
+ 'network': port_context.network.current['id'],
+ 'segment': valid_segment, 'vif_type': vif_type})
+
+ port_context.set_binding(
+ segment[driver_api.ID], vif_type,
+ self.vif_details,
+ status=n_const.PORT_STATUS_ACTIVE)
+
+ def _check_segment(self, segment):
+ """Verify a segment is valid for the OpenDaylight MechanismDriver.
+
+ Verify the requested segment is supported by ODL and return True or
+ False to indicate this to callers.
+ """
+
+ network_type = segment[driver_api.NETWORK_TYPE]
+ return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+ constants.TYPE_VXLAN, constants.TYPE_VLAN]
+
+ def _get_vif_type(self, port_context):
+ """Get VIF type string for given PortContext
+
+ Dummy implementation: it always returns following constant.
+ neutron.extensions.portbindings.VIF_TYPE_OVS
+ """
+
+ return portbindings.VIF_TYPE_OVS
diff --git a/networking-odl/networking_odl/ml2/mech_driver.py b/networking-odl/networking_odl/ml2/mech_driver.py
new file mode 100644
index 0000000..adde8d9
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/mech_driver.py
@@ -0,0 +1,458 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import copy
+import six
+
+import netaddr
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import requests
+
+from neutron.common import utils
+from neutron import context as neutron_context
+from neutron.extensions import allowedaddresspairs as addr_pair
+from neutron.extensions import securitygroup as sg
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import exceptions as n_exc
+
+from networking_odl._i18n import _LE
+from networking_odl.common import callback as odl_call
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import utils as odl_utils
+from networking_odl.ml2 import port_binding
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+
+not_found_exception_map = {odl_const.ODL_NETWORKS: n_exc.NetworkNotFound,
+ odl_const.ODL_SUBNETS: n_exc.SubnetNotFound,
+ odl_const.ODL_PORTS: n_exc.PortNotFound,
+ odl_const.ODL_SGS: sg.SecurityGroupNotFound,
+ odl_const.ODL_SG_RULES:
+ sg.SecurityGroupRuleNotFound}
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ResourceFilterBase(object):
+ @staticmethod
+ @abc.abstractmethod
+ def filter_create_attributes(resource, context):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def filter_update_attributes(resource, context):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def filter_create_attributes_with_plugin(resource, plugin, dbcontext):
+ pass
+
+ @staticmethod
+ def _filter_unmapped_null(resource_dict, unmapped_keys):
+ # NOTE(yamahata): bug work around
+ # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
+ # Null-value for an unmapped element causes next mapped
+ # collection to contain a null value
+ # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
+ #
+ # Java Object:
+ # class Root {
+ # Collection<String> mappedCollection = new ArrayList<String>;
+ # }
+ #
+ # Result:
+ # Field B contains one element; null
+ #
+ # TODO(yamahata): update along side with neutron and ODL
+ # add when neutron adds more extensions
+ # delete when ODL neutron northbound supports it
+ # TODO(yamahata): do same thing for other resources
+ keys_to_del = [key for key in unmapped_keys
+ if resource_dict.get(key) is None]
+ if keys_to_del:
+ odl_utils.try_del(resource_dict, keys_to_del)
+
+
+class NetworkFilter(ResourceFilterBase):
+ _UNMAPPED_KEYS = ['qos_policy_id']
+
+ @classmethod
+ def filter_create_attributes(cls, network, context):
+ """Filter out network attributes not required for a create."""
+ odl_utils.try_del(network, ['status', 'subnets'])
+ cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
+
+ @classmethod
+ def filter_update_attributes(cls, network, context):
+ """Filter out network attributes for an update operation."""
+ odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
+ cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
+
+ @classmethod
+ def filter_create_attributes_with_plugin(cls, network, plugin, dbcontext):
+ context = driver_context.NetworkContext(plugin, dbcontext, network)
+ cls.filter_create_attributes(network, context)
+
+
+class SubnetFilter(ResourceFilterBase):
+ @staticmethod
+ def filter_create_attributes(subnet, context):
+ """Filter out subnet attributes not required for a create."""
+ pass
+
+ @staticmethod
+ def filter_update_attributes(subnet, context):
+ """Filter out subnet attributes for an update operation."""
+ odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
+ 'allocation_pools', 'tenant_id'])
+
+ @classmethod
+ def filter_create_attributes_with_plugin(cls, subnet, plugin, dbcontext):
+ network = plugin.get_network(dbcontext, subnet['network_id'])
+ context = driver_context.SubnetContext(plugin, dbcontext, subnet,
+ network)
+ cls.filter_create_attributes(subnet, context)
+
+
+class PortFilter(ResourceFilterBase):
+ _UNMAPPED_KEYS = ['binding:profile', 'dns_name',
+ 'port_security_enabled', 'qos_policy_id']
+
+ @staticmethod
+ def _add_security_groups(port, context):
+ """Populate the 'security_groups' field with entire records."""
+ dbcontext = context._plugin_context
+ groups = [context._plugin.get_security_group(dbcontext, sg)
+ for sg in port['security_groups']]
+ port['security_groups'] = groups
+
+ @classmethod
+ def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs):
+ """unify (ip address or network address) into network address"""
+ for address_pair in allowed_address_pairs:
+ ip_address = address_pair['ip_address']
+ network_address = str(netaddr.IPNetwork(ip_address))
+ address_pair['ip_address'] = network_address
+
+ @classmethod
+ def filter_create_attributes(cls, port, context):
+ """Filter out port attributes not required for a create."""
+ cls._add_security_groups(port, context)
+ cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
+ cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+ odl_utils.try_del(port, ['status'])
+
+ # NOTE(yamahata): work around for port creation for router
+ # tenant_id=''(empty string) is passed when port is created
+ # by l3 plugin internally for router.
+ # On the other hand, ODL doesn't accept empty string for tenant_id.
+ # In that case, deduce tenant_id from network_id for now.
+ # Right fix: modify Neutron so that don't allow empty string
+ # for tenant_id even for port for internal use.
+ # TODO(yamahata): eliminate this work around when neutron side
+ # is fixed
+ # assert port['tenant_id'] != ''
+ if port['tenant_id'] == '':
+ LOG.debug('empty string was passed for tenant_id: %s(port)', port)
+ port['tenant_id'] = context._network_context._network['tenant_id']
+
+ @classmethod
+ def filter_update_attributes(cls, port, context):
+ """Filter out port attributes for an update operation."""
+ cls._add_security_groups(port, context)
+ cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
+ cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+ odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])
+
+ @classmethod
+ def filter_create_attributes_with_plugin(cls, port, plugin, dbcontext):
+ network = plugin.get_network(dbcontext, port['network_id'])
+ # TODO(yamahata): port binding
+ binding = {}
+ context = driver_context.PortContext(
+ plugin, dbcontext, port, network, binding, None)
+ cls.filter_create_attributes(port, context)
+
+
+class SecurityGroupFilter(ResourceFilterBase):
+ @staticmethod
+ def filter_create_attributes(sg, context):
+ """Filter out security-group attributes not required for a create."""
+ pass
+
+ @staticmethod
+ def filter_update_attributes(sg, context):
+ """Filter out security-group attributes for an update operation."""
+ pass
+
+ @staticmethod
+ def filter_create_attributes_with_plugin(sg, plugin, dbcontext):
+ pass
+
+
+class SecurityGroupRuleFilter(ResourceFilterBase):
+ @staticmethod
+ def filter_create_attributes(sg_rule, context):
+ """Filter out sg-rule attributes not required for a create."""
+ pass
+
+ @staticmethod
+ def filter_update_attributes(sg_rule, context):
+ """Filter out sg-rule attributes for an update operation."""
+ pass
+
+ @staticmethod
+ def filter_create_attributes_with_plugin(sg_rule, plugin, dbcontext):
+ pass
+
+
+class OpenDaylightDriver(object):
+
+ """OpenDaylight Python Driver for Neutron.
+
+ This code is the backend implementation for the OpenDaylight ML2
+ MechanismDriver for OpenStack Neutron.
+ """
+ FILTER_MAP = {
+ odl_const.ODL_NETWORKS: NetworkFilter,
+ odl_const.ODL_SUBNETS: SubnetFilter,
+ odl_const.ODL_PORTS: PortFilter,
+ odl_const.ODL_SGS: SecurityGroupFilter,
+ odl_const.ODL_SG_RULES: SecurityGroupRuleFilter,
+ }
+ out_of_sync = True
+
+ def __init__(self):
+ LOG.debug("Initializing OpenDaylight ML2 driver")
+ self.client = odl_client.OpenDaylightRestClient.create_client()
+ self.sec_handler = odl_call.OdlSecurityGroupsHandler(self)
+ self.port_binding_controller = port_binding.PortBindingManager.create()
+ # TODO(rzang): Each port binding controller should have any necessary
+ # parameter passed in from configuration files.
+ # BTW, CAP_PORT_FILTER seems being obsoleted.
+ # Leave the code commmeted out for now for future reference.
+ #
+ # self.vif_details = {portbindings.CAP_PORT_FILTER: True}
+ # self._network_topology = network_topology.NetworkTopologyManager(
+ # vif_details=self.vif_details)
+
+ def synchronize(self, operation, object_type, context):
+ """Synchronize ODL with Neutron following a configuration change."""
+ if self.out_of_sync:
+ self.sync_full(context._plugin)
+ else:
+ self.sync_single_resource(operation, object_type, context)
+
+ def sync_resources(self, plugin, dbcontext, collection_name):
+ """Sync objects from Neutron over to OpenDaylight.
+
+ This will handle syncing networks, subnets, and ports from Neutron to
+ OpenDaylight. It also filters out the requisite items which are not
+ valid for create API operations.
+ """
+ filter_cls = self.FILTER_MAP[collection_name]
+ to_be_synced = []
+ obj_getter = getattr(plugin, 'get_%s' % collection_name)
+ if collection_name == odl_const.ODL_SGS:
+ resources = obj_getter(dbcontext, default_sg=True)
+ else:
+ resources = obj_getter(dbcontext)
+ for resource in resources:
+ try:
+ # Convert underscores to dashes in the URL for ODL
+ collection_name_url = collection_name.replace('_', '-')
+ urlpath = collection_name_url + '/' + resource['id']
+ self.client.sendjson('get', urlpath, None)
+ except requests.exceptions.HTTPError as e:
+ with excutils.save_and_reraise_exception() as ctx:
+ if e.response.status_code == requests.codes.not_found:
+ filter_cls.filter_create_attributes_with_plugin(
+ resource, plugin, dbcontext)
+ to_be_synced.append(resource)
+ ctx.reraise = False
+ else:
+ # TODO(yamahata): compare result with resource.
+ # If they don't match, update it below
+ pass
+
+ if to_be_synced:
+ key = collection_name[:-1] if len(to_be_synced) == 1 else (
+ collection_name)
+ # Convert underscores to dashes in the URL for ODL
+ collection_name_url = collection_name.replace('_', '-')
+ self.client.sendjson('post', collection_name_url,
+ {key: to_be_synced})
+
+ # https://bugs.launchpad.net/networking-odl/+bug/1371115
+ # TODO(yamahata): update resources with unsyned attributes
+ # TODO(yamahata): find dangling ODL resouce that was deleted in
+ # neutron db
+
+ @utils.synchronized('odl-sync-full')
+ def sync_full(self, plugin):
+ """Resync the entire database to ODL.
+
+ Transition to the in-sync state on success.
+ Note: we only allow a single thread in here at a time.
+ """
+ if not self.out_of_sync:
+ return
+ dbcontext = neutron_context.get_admin_context()
+ for collection_name in [odl_const.ODL_NETWORKS,
+ odl_const.ODL_SUBNETS,
+ odl_const.ODL_PORTS,
+ odl_const.ODL_SGS,
+ odl_const.ODL_SG_RULES]:
+ self.sync_resources(plugin, dbcontext, collection_name)
+ self.out_of_sync = False
+
+ def sync_single_resource(self, operation, object_type, context):
+ """Sync over a single resource from Neutron to OpenDaylight.
+
+ Handle syncing a single operation over to OpenDaylight, and correctly
+ filter attributes out which are not required for the requisite
+ operation (create or update) being handled.
+ """
+ # Convert underscores to dashes in the URL for ODL
+ object_type_url = object_type.replace('_', '-')
+ try:
+ obj_id = context.current['id']
+ if operation == odl_const.ODL_DELETE:
+ self.out_of_sync |= not self.client.try_delete(
+ object_type_url + '/' + obj_id)
+ else:
+ filter_cls = self.FILTER_MAP[object_type]
+ if operation == odl_const.ODL_CREATE:
+ urlpath = object_type_url
+ method = 'post'
+ attr_filter = filter_cls.filter_create_attributes
+ elif operation == odl_const.ODL_UPDATE:
+ urlpath = object_type_url + '/' + obj_id
+ method = 'put'
+ attr_filter = filter_cls.filter_update_attributes
+ resource = copy.deepcopy(context.current)
+ attr_filter(resource, context)
+ self.client.sendjson(method, urlpath,
+ {object_type_url[:-1]: resource})
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE("Unable to perform %(operation)s on "
+ "%(object_type)s %(object_id)s"),
+ {'operation': operation,
+ 'object_type': object_type,
+ 'object_id': obj_id})
+ self.out_of_sync = True
+
+ def sync_from_callback(self, operation, res_type, res_id, resource_dict):
+ object_type = res_type.plural.replace('_', '-')
+ try:
+ if operation == odl_const.ODL_DELETE:
+ self.out_of_sync |= not self.client.try_delete(
+ object_type + '/' + res_id)
+ else:
+ if operation == odl_const.ODL_CREATE:
+ urlpath = object_type
+ method = 'post'
+ elif operation == odl_const.ODL_UPDATE:
+ urlpath = object_type + '/' + res_id
+ method = 'put'
+ self.client.sendjson(method, urlpath, resource_dict)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE("Unable to perform %(operation)s on "
+ "%(object_type)s %(res_id)s "
+ "%(resource_dict)s"),
+ {'operation': operation,
+ 'object_type': object_type,
+ 'res_id': res_id,
+ 'resource_dict': resource_dict})
+ self.out_of_sync = True
+
+ def bind_port(self, port_context):
+ """Set binding for a valid segments
+
+ """
+ self.port_binding_controller.bind_port(port_context)
+
+
+class OpenDaylightMechanismDriver(driver_api.MechanismDriver):
+
+ """Mechanism Driver for OpenDaylight.
+
+ This driver was a port from the NCS MechanismDriver. The API
+ exposed by ODL is slightly different from the API exposed by NCS,
+ but the general concepts are the same.
+ """
+
+ def initialize(self):
+ self.url = cfg.CONF.ml2_odl.url
+ self.timeout = cfg.CONF.ml2_odl.timeout
+ self.username = cfg.CONF.ml2_odl.username
+ self.password = cfg.CONF.ml2_odl.password
+ required_opts = ('url', 'username', 'password')
+ for opt in required_opts:
+ if not getattr(self, opt):
+ raise cfg.RequiredOptError(opt, 'ml2_odl')
+
+ self.odl_drv = OpenDaylightDriver()
+
+ # Postcommit hooks are used to trigger synchronization.
+
+ def create_network_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_NETWORKS,
+ context)
+
+ def update_network_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_NETWORKS,
+ context)
+
+ def delete_network_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_NETWORKS,
+ context)
+
+ def create_subnet_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_SUBNETS,
+ context)
+
+ def update_subnet_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_SUBNETS,
+ context)
+
+ def delete_subnet_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_SUBNETS,
+ context)
+
+ def create_port_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_PORTS,
+ context)
+
+ def update_port_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_PORTS,
+ context)
+
+ def delete_port_postcommit(self, context):
+ self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_PORTS,
+ context)
+
+ def bind_port(self, context):
+ self.odl_drv.bind_port(context)
diff --git a/networking-odl/networking_odl/ml2/mech_driver_v2.py b/networking-odl/networking_odl/ml2/mech_driver_v2.py
new file mode 100644
index 0000000..dfc8df1
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/mech_driver_v2.py
@@ -0,0 +1,146 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron.db import api as db_api
+from neutron.plugins.ml2 import driver_api as api
+
+from networking_odl.common import callback
+from networking_odl.common import config as odl_conf
+from networking_odl.common import constants as odl_const
+from networking_odl.journal import cleanup
+from networking_odl.journal import full_sync
+from networking_odl.journal import journal
+from networking_odl.journal import maintenance
+from networking_odl.ml2 import port_binding
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightMechanismDriver(api.MechanismDriver):
+ """OpenDaylight Python Driver for Neutron.
+
+ This code is the backend implementation for the OpenDaylight ML2
+ MechanismDriver for OpenStack Neutron.
+ """
+
+ def initialize(self):
+ LOG.debug("Initializing OpenDaylight ML2 driver")
+ cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl")
+ self.sg_handler = callback.OdlSecurityGroupsHandler(self)
+ self.journal = journal.OpendaylightJournalThread()
+ self.port_binding_controller = port_binding.PortBindingManager.create()
+ self._start_maintenance_thread()
+
+ def _start_maintenance_thread(self):
+ # start the maintenance thread and register all the maintenance
+ # operations :
+ # (1) JournalCleanup - Delete completed rows from journal
+ # (2) CleanupProcessing - Mark orphaned processing rows to pending
+ # (3) Full sync - Re-sync when detecting an ODL "cold reboot"
+ cleanup_obj = cleanup.JournalCleanup()
+ self._maintenance_thread = maintenance.MaintenanceThread()
+ self._maintenance_thread.register_operation(
+ cleanup_obj.delete_completed_rows)
+ self._maintenance_thread.register_operation(
+ cleanup_obj.cleanup_processing_rows)
+ self._maintenance_thread.register_operation(full_sync.full_sync)
+ self._maintenance_thread.start()
+
+ @staticmethod
+ def _record_in_journal(context, object_type, operation, data=None):
+ if data is None:
+ data = context.current
+ journal.record(context._plugin_context.session, object_type,
+ context.current['id'], operation, data)
+
+ def create_network_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE)
+
+ def create_subnet_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
+
+ def create_port_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_PORT, odl_const.ODL_CREATE)
+
+ def update_network_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE)
+
+ def update_subnet_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE)
+
+ def update_port_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_PORT, odl_const.ODL_UPDATE)
+
+ def delete_network_precommit(self, context):
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[])
+
+ def delete_subnet_precommit(self, context):
+ # Use the journal row's data field to store parent object
+ # uuids. This information is required for validation checking
+ # when deleting parent objects.
+ new_context = [context.current['network_id']]
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE,
+ data=new_context)
+
+ def delete_port_precommit(self, context):
+ # Use the journal row's data field to store parent object
+ # uuids. This information is required for validation checking
+ # when deleting parent objects.
+ new_context = [context.current['network_id']]
+ for subnet in context.current['fixed_ips']:
+ new_context.append(subnet['subnet_id'])
+ OpenDaylightMechanismDriver._record_in_journal(
+ context, odl_const.ODL_PORT, odl_const.ODL_DELETE,
+ data=new_context)
+
+ @journal.call_thread_on_end
+ def sync_from_callback(self, operation, res_type, res_id, resource_dict):
+ object_type = res_type.singular
+ object_uuid = (resource_dict[object_type]['id']
+ if operation == 'create' else res_id)
+ if resource_dict is not None:
+ resource_dict = resource_dict[object_type]
+ journal.record(db_api.get_session(), object_type, object_uuid,
+ operation, resource_dict)
+
+ def _postcommit(self, context):
+ self.journal.set_sync_event()
+
+ create_network_postcommit = _postcommit
+ create_subnet_postcommit = _postcommit
+ create_port_postcommit = _postcommit
+ update_network_postcommit = _postcommit
+ update_subnet_postcommit = _postcommit
+ update_port_postcommit = _postcommit
+ delete_network_postcommit = _postcommit
+ delete_subnet_postcommit = _postcommit
+ delete_port_postcommit = _postcommit
+
+ def bind_port(self, port_context):
+ """Set binding for a valid segments
+
+ """
+ return self.port_binding_controller.bind_port(port_context)
diff --git a/networking-odl/networking_odl/ml2/network_topology.py b/networking-odl/networking_odl/ml2/network_topology.py
new file mode 100644
index 0000000..b0bfae1
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/network_topology.py
@@ -0,0 +1,313 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import importlib
+import logging
+
+import six
+from six.moves.urllib import parse
+
+from neutron.extensions import portbindings
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from networking_odl.common import cache
+from networking_odl.common import client
+from networking_odl.common import utils
+from networking_odl._i18n import _, _LI, _LW, _LE
+from networking_odl.ml2 import port_binding
+
+
+LOG = log.getLogger(__name__)
+
+
+class NetworkTopologyManager(port_binding.PortBindingController):
+
+ # the first valid vif type will be chosed following the order
+ # on this list. This list can be modified to adapt to user preferences.
+ valid_vif_types = [
+ portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS]
+
+ # List of class names of registered implementations of interface
+ # NetworkTopologyParser
+ network_topology_parsers = [
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
+
+ def __init__(self, vif_details=None, client=None):
+ # Details for binding port
+ self._vif_details = vif_details or {portbindings.CAP_PORT_FILTER: True}
+
+ # Rest client used for getting network topology from ODL
+ self._client = client or NetworkTopologyClient.create_client()
+
+ # Table of NetworkTopologyElement
+ self._elements_by_ip = cache.Cache(
+ self._fetch_and_parse_network_topology)
+
+ # Parsers used for processing network topology
+ self._parsers = list(self._create_parsers())
+
+ def bind_port(self, port_context):
+ """Set binding for a valid segment
+
+ """
+ host_name = port_context.host
+ elements = list()
+ try:
+ # Append to empty list to add as much elements as possible
+ # in the case it raises an exception
+ elements.extend(self._fetch_elements_by_host(host_name))
+ except Exception:
+ LOG.exception(
+ _LE('Error fetching elements for host %(host_name)r.'),
+ {'host_name': host_name}, exc_info=1)
+
+ if not elements:
+ # In case it wasn't able to find any network topology element
+ # for given host then it uses the legacy OVS one keeping the old
+ # behaviour
+ LOG.warning(
+ _LW('Using legacy OVS network topology element for port '
+ 'binding for host: %(host_name)r.'),
+ {'host_name': host_name})
+
+ # Imported here to avoid cyclic module dependencies
+ from networking_odl.ml2 import ovsdb_topology
+ elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
+
+ # TODO(Federico Ressi): in the case there are more candidate virtual
+ # switches instances for the same host it choses one for binding
+ # port. As there isn't any know way to perform this selection it
+ # selects a VIF type that is valid for all switches that have
+ # been found and a VIF type valid for all them. This has to be improved
+ for vif_type in self.valid_vif_types:
+ vif_type_is_valid_for_all = True
+ for element in elements:
+ if vif_type not in element.valid_vif_types:
+ # it is invalid for at least one element: discard it
+ vif_type_is_valid_for_all = False
+ break
+
+ if vif_type_is_valid_for_all:
+ # This is the best VIF type valid for all elements
+ LOG.debug(
+ "Found VIF type %(vif_type)r valid for all network "
+ "topology elements for host %(host_name)r.",
+ {'vif_type': vif_type, 'host_name': host_name})
+
+ for element in elements:
+ # It assumes that any element could be good for given host
+ # In most of the cases I expect exactely one element for
+ # every compute host
+ try:
+ return element.bind_port(
+ port_context, vif_type, self._vif_details)
+
+ except Exception:
+ LOG.exception(
+ _LE('Network topology element has failed binding '
+ 'port:\n%(element)s'),
+ {'element': element.to_json()})
+
+ LOG.error(
+ _LE('Unable to bind port element for given host and valid VIF '
+ 'types:\n'
+ '\thostname: %(host_name)s\n'
+ '\tvalid VIF types: %(valid_vif_types)s'),
+ {'host_name': host_name,
+ 'valid_vif_types': ', '.join(self.valid_vif_types)})
+ # TDOO(Federico Ressi): should I raise an exception here?
+
+ def _create_parsers(self):
+ for parser_name in self.network_topology_parsers:
+ try:
+ yield NetworkTopologyParser.create_parser(parser_name)
+
+ except Exception:
+ LOG.exception(
+ _LE('Error initializing topology parser: %(parser_name)r'),
+ {'parser_name': parser_name})
+
+ def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
+ '''Yields all network topology elements referring to given host name
+
+ '''
+
+ host_addresses = [host_name]
+ try:
+ # It uses both compute host name and known IP addresses to
+ # recognize topology elements valid for given computed host
+ ip_addresses = utils.get_addresses_by_name(host_name)
+ except Exception:
+ ip_addresses = []
+ LOG.exception(
+ _LE('Unable to resolve IP addresses for host %(host_name)r'),
+ {'host_name': host_name})
+ else:
+ host_addresses.extend(ip_addresses)
+
+ yield_elements = set()
+ try:
+ for __, element in self._elements_by_ip.fetch_all(
+ host_addresses, cache_timeout):
+ # yields every element only once
+ if element not in yield_elements:
+ yield_elements.add(element)
+ yield element
+
+ except cache.CacheFetchError as error:
+ # This error is expected on most of the cases because typically not
+ # all host_addresses maps to a network topology element.
+ if yield_elements:
+ # As we need only one element for every host we ignore the
+ # case in which others host addresseses didn't map to any host
+ LOG.debug(
+ 'Host addresses not found in networking topology: %s',
+ ', '.join(error.missing_keys))
+ else:
+ LOG.exception(
+ _LE('No such network topology elements for given host '
+ '%(host_name)r and given IPs: %(ip_addresses)s.'),
+ {'host_name': host_name,
+ 'ip_addresses': ", ".join(ip_addresses)})
+ error.reraise_cause()
+
+ def _fetch_and_parse_network_topology(self, addresses):
+ # The cache calls this method to fecth new elements when at least one
+ # of the addresses is not in the cache or it has expired.
+
+ # pylint: disable=unused-argument
+ LOG.info(_LI('Fetch network topology from ODL.'))
+ response = self._client.get()
+ response.raise_for_status()
+
+ network_topology = response.json()
+ if LOG.isEnabledFor(logging.DEBUG):
+ topology_str = jsonutils.dumps(
+ network_topology, sort_keys=True, indent=4,
+ separators=(',', ': '))
+ LOG.debug("Got network topology:\n%s", topology_str)
+
+ at_least_one_element_for_asked_addresses = False
+ for parser in self._parsers:
+ try:
+ for element in parser.parse_network_topology(network_topology):
+ if not isinstance(element, NetworkTopologyElement):
+ raise TypeError(_(
+ "Yield element doesn't implement interface "
+ "'NetworkTopologyElement': {!r}").format(element))
+ # the same element can be known by more host addresses
+ for host_address in element.host_addresses:
+ if host_address in addresses:
+ at_least_one_element_for_asked_addresses = True
+ yield host_address, element
+ except Exception:
+ LOG.exception(
+ _LE("Parser %(parser)r failed to parse network topology."),
+ {'parser': parser})
+
+ if not at_least_one_element_for_asked_addresses:
+ # this will mark entries for given addresses as failed to allow
+ # calling this method again as soon it is requested and avoid
+ # waiting for cache expiration
+ raise ValueError(
+ _('No such topology element for given host addresses: {}')
+ .format(', '.join(addresses)))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NetworkTopologyParser(object):
+
+ @classmethod
+ def create_parser(cls, parser_class_name):
+ '''Creates a 'NetworkTopologyParser' of given class name.
+
+ '''
+ module_name, class_name = parser_class_name.rsplit('.', 1)
+ module = importlib.import_module(module_name)
+ clss = getattr(module, class_name)
+ if not issubclass(clss, cls):
+ raise TypeError(_(
+ "Class {class_name!r} of module {module_name!r} doesn't "
+ "implement 'NetworkTopologyParser' interface.").format(
+ class_name=class_name, module_name=module_name))
+ return clss()
+
+ @abc.abstractmethod
+ def parse_network_topology(self, network_topology):
+ '''Parses OpenDaylight network topology
+
+ Yields all network topology elements implementing
+ 'NetworkTopologyElement' interface found in given network topology.
+ '''
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NetworkTopologyElement(object):
+
+ @abc.abstractproperty
+ def host_addresses(self):
+ '''List of known host addresses of a single compute host
+
+ Either host names and ip addresses are valid.
+ Neutron host controller must know at least one of these compute host
+ names or ip addresses to find this element.
+ '''
+
+ @abc.abstractproperty
+ def valid_vif_types(self):
+ '''Returns a tuple listing VIF types supported by the compute node
+
+ '''
+
+ @abc.abstractmethod
+ def bind_port(self, port_context, vif_type, vif_details):
+ '''Bind port context using given vif type and vif details
+
+ This method is expected to search for a valid segment and then
+ call port_context.set_binding()
+ '''
+
+ def to_dict(self):
+ cls = type(self)
+ return {
+ 'class': cls.__module__ + '.' + cls.__name__,
+ 'host_addresses': list(self.host_addresses),
+ 'valid_vif_types': list(self.valid_vif_types)}
+
+ def to_json(self):
+ return jsonutils.dumps(
+ self.to_dict(), sort_keys=True, indent=4, separators=(',', ': '))
+
+
+class NetworkTopologyClient(client.OpenDaylightRestClient):
+
+ _GET_ODL_NETWORK_TOPOLOGY_URL =\
+ 'restconf/operational/network-topology:network-topology'
+
+ def __init__(self, url, username, password, timeout):
+ if url:
+ url = parse.urlparse(url)
+ port = ''
+ if url.port:
+ port = ':' + str(url.port)
+ topology_url = '{}://{}{}/{}'.format(
+ url.scheme, url.hostname, port,
+ self._GET_ODL_NETWORK_TOPOLOGY_URL)
+ else:
+ topology_url = None
+ super(NetworkTopologyClient, self).__init__(
+ topology_url, username, password, timeout)
diff --git a/networking-odl/networking_odl/ml2/ovsdb_topology.py b/networking-odl/networking_odl/ml2/ovsdb_topology.py
new file mode 100644
index 0000000..f2c8ad8
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/ovsdb_topology.py
@@ -0,0 +1,218 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import collections
+import os
+
+from oslo_log import log
+import six
+from six.moves.urllib import parse
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron_lib import constants as n_const
+
+from networking_odl._i18n import _
+from networking_odl.ml2 import network_topology
+
+
+LOG = log.getLogger(__name__)
+
+
+class OvsdbNetworkTopologyParser(network_topology.NetworkTopologyParser):
+
+ def new_element(self, uuid):
+ return OvsdbNetworkTopologyElement(uuid=uuid)
+
+ def parse_network_topology(self, network_topologies):
+ elements_by_uuid = collections.OrderedDict()
+ for topology in network_topologies[
+ 'network-topology']['topology']:
+ if topology['topology-id'].startswith('ovsdb:'):
+ for node in topology['node']:
+ # expected url format: ovsdb://uuid/<uuid>[/<path>]]
+ node_url = parse.urlparse(node['node-id'])
+ if node_url.scheme == 'ovsdb'\
+ and node_url.netloc == 'uuid':
+ # split_res = ['', '<uuid>', '<path>']
+ split_res = node_url.path.split('/', 2)
+
+ # uuid is used to identify nodes referring to the same
+ # element
+ uuid = split_res[1]
+ element = elements_by_uuid.get(uuid)
+ if element is None:
+ elements_by_uuid[uuid] = element =\
+ self.new_element(uuid)
+
+ # inner_path can be [] or [<path>]
+ inner_path = split_res[2:]
+ self._update_element_from_json_ovsdb_topology_node(
+ node, element, uuid, *inner_path)
+
+ # There can be more OVS instances connected beside the same IP address
+ # Cache will yield more instaces for the same key
+ for __, element in six.iteritems(elements_by_uuid):
+ yield element
+
+ def _update_element_from_json_ovsdb_topology_node(
+ self, node, element, uuid, path=None):
+
+ if not path:
+ # global element section (root path)
+
+ # fetch remote IP address
+ element.remote_ip = node["ovsdb:connection-info"]["remote-ip"]
+
+ for vif_type_entry in node.get(
+ "ovsdb:interface-type-entry", []):
+ # Is this a good place to add others OVS VIF types?
+ if vif_type_entry.get("interface-type") ==\
+ "ovsdb:interface-type-dpdkvhostuser":
+ element.support_vhost_user = True
+ break
+ else:
+ LOG.debug(
+ 'Interface type not found in network topology node %r.',
+ uuid)
+
+ LOG.debug(
+ 'Topology element updated:\n'
+ ' - uuid: %(uuid)r\n'
+ ' - remote_ip: %(remote_ip)r\n'
+ ' - support_vhost_user: %(support_vhost_user)r',
+ {'uuid': uuid,
+ 'remote_ip': element.remote_ip,
+ 'support_vhost_user': element.support_vhost_user})
+ elif path == 'bridge/br-int':
+ datapath_type = node.get("ovsdb:datapath-type")
+ if datapath_type == "ovsdb:datapath-type-netdev":
+ element.has_datapath_type_netdev = True
+ LOG.debug(
+ 'Topology element updated:\n'
+ ' - uuid: %(uuid)r\n'
+ ' - has_datapath_type_netdev: %('
+ 'has_datapath_type_netdev)r',
+ {'uuid': uuid,
+ 'has_datapath_type_netdev':
+ element.has_datapath_type_netdev})
+
+
+class OvsdbNetworkTopologyElement(network_topology.NetworkTopologyElement):
+
+ uuid = None
+ remote_ip = None # it can be None or a string
+ has_datapath_type_netdev = False # it can be False or True
+ support_vhost_user = False # it can be False or True
+
+ # location for vhostuser sockets
+ vhostuser_socket_dir = '/var/run/openvswitch'
+
+ # prefix for ovs port
+ port_prefix = 'vhu'
+
+ def __init__(self, **kwargs):
+ for name, value in six.iteritems(kwargs):
+ setattr(self, name, value)
+
+ @property
+ def host_addresses(self):
+ # For now it support only the remote IP found in connection info
+ return self.remote_ip,
+
+ @property
+ def valid_vif_types(self):
+ if self.has_datapath_type_netdev and self.support_vhost_user:
+ return [
+ portbindings.VIF_TYPE_VHOST_USER,
+ portbindings.VIF_TYPE_OVS]
+ else:
+ return [portbindings.VIF_TYPE_OVS]
+
+ def bind_port(self, port_context, vif_type, vif_details):
+
+ port_context_id = port_context.current['id']
+ network_context_id = port_context.network.current['id']
+
+ # Bind port to the first valid segment
+ for segment in port_context.segments_to_bind:
+ if self._is_valid_segment(segment):
+ # Guest best VIF type for given host
+ vif_details = self._get_vif_details(
+ vif_details=vif_details, port_context_id=port_context_id,
+ vif_type=vif_type)
+ LOG.debug(
+ 'Bind port with valid segment:\n'
+ '\tport: %(port)r\n'
+ '\tnetwork: %(network)r\n'
+ '\tsegment: %(segment)r\n'
+ '\tVIF type: %(vif_type)r\n'
+ '\tVIF details: %(vif_details)r',
+ {'port': port_context_id,
+ 'network': network_context_id,
+ 'segment': segment, 'vif_type': vif_type,
+ 'vif_details': vif_details})
+ port_context.set_binding(
+ segment[driver_api.ID], vif_type, vif_details,
+ status=n_const.PORT_STATUS_ACTIVE)
+ return
+
+ raise ValueError(
+ _('Unable to find any valid segment in given context.'))
+
+ def to_dict(self):
+ data = super(OvsdbNetworkTopologyElement, self).to_dict()
+ data.update(
+ {'uuid': self.uuid,
+ 'has_datapath_type_netdev': self.has_datapath_type_netdev,
+ 'support_vhost_user': self.support_vhost_user,
+ 'valid_vif_types': self.valid_vif_types})
+ if portbindings.VIF_TYPE_VHOST_USER in self.valid_vif_types:
+ data.update({'port_prefix': self.port_prefix,
+ 'vhostuser_socket_dir': self.vhostuser_socket_dir})
+ return data
+
+ def _is_valid_segment(self, segment):
+ """Verify a segment is valid for the OpenDaylight MechanismDriver.
+
+ Verify the requested segment is supported by ODL and return True or
+ False to indicate this to callers.
+ """
+
+ network_type = segment[driver_api.NETWORK_TYPE]
+ return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+ constants.TYPE_VXLAN, constants.TYPE_VLAN]
+
+ def _get_vif_details(self, vif_details, port_context_id, vif_type):
+ vif_details = dict(vif_details)
+ if vif_type == portbindings.VIF_TYPE_VHOST_USER:
+ socket_path = os.path.join(
+ self.vhostuser_socket_dir,
+ (self.port_prefix + port_context_id)[:14])
+
+ vif_details.update({
+ portbindings.VHOST_USER_MODE:
+ portbindings.VHOST_USER_MODE_CLIENT,
+ portbindings.VHOST_USER_OVS_PLUG: True,
+ portbindings.VHOST_USER_SOCKET: socket_path
+ })
+ return vif_details
+
+ def __setattr__(self, name, value):
+ # raises Attribute error if the class hasn't this attribute
+ getattr(type(self), name)
+ super(OvsdbNetworkTopologyElement, self).__setattr__(name, value)
diff --git a/networking-odl/networking_odl/ml2/port_binding.py b/networking-odl/networking_odl/ml2/port_binding.py
new file mode 100644
index 0000000..d34dc01
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/port_binding.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import six
+import stevedore
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import excutils
+
+from networking_odl._i18n import _LI, _LE
+
+
+LOG = log.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class PortBindingController(object):
+
+ @abc.abstractmethod
+ def bind_port(self, port_context):
+ """Attempt to bind a port.
+
+ :param context: PortContext instance describing the port
+
+ This method is called outside any transaction to attempt to
+ establish a port binding using calling mechanism driver. Bindings
+ may be created at each of multiple levels of a hierarchical
+ network, and are established from the top level downward. At
+ each level, the mechanism driver determines whether it can
+ bind to any of the network segments in the
+ context.segments_to_bind property, based on the value of the
+ context.host property, any relevant port or network
+ attributes, and its own knowledge of the network topology. At
+ the top level, context.segments_to_bind contains the static
+ segments of the port's network. At each lower level of
+ binding, it contains static or dynamic segments supplied by
+ the driver that bound at the level above. If the driver is
+ able to complete the binding of the port to any segment in
+ context.segments_to_bind, it must call context.set_binding
+ with the binding details. If it can partially bind the port,
+ it must call context.continue_binding with the network
+ segments to be used to bind at the next lower level.
+ If the binding results are committed after bind_port returns,
+ they will be seen by all mechanism drivers as
+ update_port_precommit and update_port_postcommit calls. But if
+ some other thread or process concurrently binds or updates the
+ port, these binding results will not be committed, and
+ update_port_precommit and update_port_postcommit will not be
+ called on the mechanism drivers with these results. Because
+ binding results can be discarded rather than committed,
+ drivers should avoid making persistent state changes in
+ bind_port, or else must ensure that such state changes are
+ eventually cleaned up.
+ Implementing this method explicitly declares the mechanism
+ driver as having the intention to bind ports. This is inspected
+ by the QoS service to identify the available QoS rules you
+ can use with ports.
+ """
+
+
+class PortBindingManager(PortBindingController):
+ # At this point, there is no requirement to have multiple
+ # port binding controllers at the same time.
+ # Stay with single controller until there is a real requirement
+
+ def __init__(self, name, controller):
+ self.name = name
+ self.controller = controller
+
+ @classmethod
+ def create(
+ cls, namespace='networking_odl.ml2.port_binding_controllers',
+ name=cfg.CONF.ml2_odl.port_binding_controller):
+
+ ext_mgr = stevedore.named.NamedExtensionManager(
+ namespace, [name], invoke_on_load=True)
+
+ assert len(ext_mgr.extensions) == 1, (
+ "Wrong port binding controller is specified")
+
+ extension = ext_mgr.extensions[0]
+ if isinstance(extension.obj, PortBindingController):
+ return cls(extension.name, extension.obj)
+ else:
+ raise ValueError(
+ ("Port binding controller '%(name)s (%(controller)r)' "
+ "doesn't implement PortBindingController interface."),
+ {'name': extension.name, 'controller': extension.obj})
+
+ def bind_port(self, port_context):
+ controller_details = {'name': self.name, 'controller': self.controller}
+ try:
+ self.controller.bind_port(port_context)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(
+ _LE("Controller '%(name)s (%(controller)r)' had an error "
+ "when binding port."), controller_details)
+ else:
+ if port_context._new_bound_segment:
+ LOG.info(
+ _LI("Controller '%(name)s (%(controller)r)' has bound "
+ "port."), controller_details)
+ else:
+ LOG.debug(
+ "Controller %(name)s (%(controller)r) hasn't bound "
+ "port.", controller_details)
diff --git a/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py b/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py
new file mode 100644
index 0000000..d24bd55
--- /dev/null
+++ b/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py
@@ -0,0 +1,263 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+from neutron_lib import constants as nl_const
+from requests import exceptions
+import six.moves.urllib.parse as urlparse
+from string import Template
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron import context
+from neutron.extensions import portbindings
+from neutron import manager
+from neutron.plugins.ml2 import driver_api
+
+from networking_odl._i18n import _LE, _LI, _LW
+from networking_odl.common import client as odl_client
+from networking_odl.journal import maintenance as mt
+from networking_odl.ml2 import port_binding
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = log.getLogger(__name__)
+
+
+class PseudoAgentDBBindingController(port_binding.PortBindingController):
+ """Switch agnostic Port binding controller for OpenDayLight."""
+
+ AGENTDB_BINARY = 'neutron-odlagent-portbinding'
+ L2_TYPE = "ODL L2"
+
+ # TODO(mzmalick): binary, topic and resource_versions to be provided
+ # by ODL, Pending ODL NB patches.
+ agentdb_row = {
+ 'binary': AGENTDB_BINARY,
+ 'host': '',
+ 'topic': nl_const.L2_AGENT_TOPIC,
+ 'configurations': {},
+ 'resource_versions': '',
+ 'agent_type': L2_TYPE,
+ 'start_flag': True}
+ # We are not running host agents, so above start_flag is redundant
+
+ def __init__(self, hostconf_uri=None, db_plugin=None):
+ """Initialization."""
+ LOG.debug("Initializing ODL Port Binding Controller")
+
+ if not hostconf_uri:
+ # extract host/port from ODL URL and append hostconf_uri path
+ hostconf_uri = self._make_hostconf_uri(
+ cfg.CONF.ml2_odl.url, cfg.CONF.ml2_odl.odl_hostconf_uri)
+
+ LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri)
+
+ # TODO(mzmalick): disable port-binding for ODL lightweight testing
+ self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
+ url=hostconf_uri)
+
+ # Neutron DB plugin instance
+ self.agents_db = db_plugin
+
+ # Start polling ODL restconf using maintenance thread.
+ # default: 30s (should be <= agent keep-alive poll interval)
+ self._start_maintenance_thread(cfg.CONF.ml2_odl.restconf_poll_interval)
+
+ def _make_hostconf_uri(self, odl_url=None, path=''):
+ """Make ODL hostconfigs URI with host/port extraced from ODL_URL."""
+ # NOTE(yamahata): for unit test.
+ odl_url = odl_url or 'http://localhost:8080/'
+
+ # extract ODL_IP and ODL_PORT from ODL_ENDPOINT and append path
+ # urlsplit and urlunparse don't throw exceptions
+ purl = urlparse.urlsplit(odl_url)
+ return urlparse.urlunparse((purl.scheme, purl.netloc,
+ path, '', '', ''))
+ #
+ # TODO(mzmalick):
+ # 1. implement websockets for ODL hostconfig events
+ #
+
+ def _start_maintenance_thread(self, poll_interval):
+ self._mainth = mt.MaintenanceThread()
+ self._mainth.maintenance_interval = poll_interval
+ self._mainth.register_operation(self._get_and_update_hostconfigs)
+ self._mainth.start()
+
+ def _rest_get_hostconfigs(self):
+ try:
+ response = self.odl_rest_client.get()
+ response.raise_for_status()
+ hostconfigs = response.json()['hostconfigs']['hostconfig']
+ except exceptions.ConnectionError:
+ LOG.error(_LE("Cannot connect to the Opendaylight Controller"),
+ exc_info=True)
+ return None
+ except KeyError:
+ LOG.error(_LE("got invalid hostconfigs"),
+ exc_info=True)
+ return None
+ except Exception:
+ LOG.warning(_LW("REST/GET odl hostconfig failed, "),
+ exc_info=True)
+ return None
+ else:
+ if LOG.isEnabledFor(logging.DEBUG):
+ _hconfig_str = jsonutils.dumps(
+ response, sort_keys=True, indent=4, separators=(',', ': '))
+ LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)
+
+ return hostconfigs
+
+ def _get_and_update_hostconfigs(self, session=None):
+ LOG.info(_LI("REST/GET hostconfigs from ODL"))
+
+ hostconfigs = self._rest_get_hostconfigs()
+
+ if not hostconfigs:
+ LOG.warning(_LW("ODL hostconfigs REST/GET failed, "
+ "will retry on next poll"))
+ return # retry on next poll
+
+ self._update_agents_db(hostconfigs=hostconfigs)
+
+ def _get_neutron_db_plugin(self):
+ if (not self.agents_db) and manager.NeutronManager.has_instance():
+ self.agents_db = manager.NeutronManager.get_plugin()
+ return self.agents_db
+
+ def _update_agents_db(self, hostconfigs):
+ LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")
+
+ agents_db = self._get_neutron_db_plugin()
+
+ if not agents_db: # if ML2 is still initializing
+ LOG.warning(_LW("ML2 still initializing, Will retry agentdb"
+ " update on next poll"))
+ return # Retry on next poll
+
+ for host_config in hostconfigs:
+ try:
+ self.agentdb_row['host'] = host_config['host-id']
+ self.agentdb_row['agent_type'] = host_config['host-type']
+ self.agentdb_row['configurations'] = host_config['config']
+
+ agents_db.create_or_update_agent(
+ context.get_admin_context(), self.agentdb_row)
+ except Exception:
+ LOG.exception(_LE("Unable to update agentdb."))
+ continue # try next hostcofig
+
+ def _substitute_hconfig_tmpl(self, port_context, hconfig):
+ # TODO(mzmalick): Explore options for inlines string splicing of
+ # port-id to 14 bytes as required by vhostuser types
+ subs_ids = {
+ # $IDENTIFER string substitution in hostconfigs JSON string
+ 'PORT_ID': port_context.current['id'][:14]
+ }
+
+ # Substitute identifiers and Convert JSON string to dict
+ hconfig_conf_json = Template(hconfig['configurations'])
+ substituted_str = hconfig_conf_json.safe_substitute(subs_ids)
+ hconfig['configurations'] = jsonutils.loads(substituted_str)
+
+ return hconfig
+
+ def bind_port(self, port_context):
+ """bind port using ODL host configuration."""
+ # Get all ODL hostconfigs for this host and type
+ agentdb = port_context.host_agents(self.L2_TYPE)
+
+ if not agentdb:
+ LOG.warning(_LW("No valid hostconfigs in agentsdb for host %s"),
+ port_context.host)
+ return
+
+ for raw_hconfig in agentdb:
+ # do any $identifier substitution
+ hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig)
+
+ # Found ODL hostconfig for this host in agentdb
+ LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig)
+
+ if self._hconfig_bind_port(port_context, hconfig):
+ break # Port binding suceeded!
+ else: # Port binding failed!
+ LOG.warning(_LW("Failed to bind Port %(pid)s for host "
+ "%(host)s on network %(network)s."), {
+ 'pid': port_context.current['id'],
+ 'host': port_context.host,
+ 'network': port_context.network.current['id']})
+ else: # No hostconfig found for host in agentdb.
+ LOG.warning(_LW("No ODL hostconfigs for host %s found in agentdb"),
+ port_context.host)
+
+ def _hconfig_bind_port(self, port_context, hconfig):
+ """bind port after validating odl host configuration."""
+ valid_segment = None
+
+ for segment in port_context.segments_to_bind:
+ if self._is_valid_segment(segment, hconfig['configurations']):
+ valid_segment = segment
+ break
+ else:
+ LOG.debug("No valid segments found!")
+ return False
+
+ confs = hconfig['configurations']['supported_vnic_types']
+
+ # nova provides vnic_type in port_context to neutron.
+ # neutron provides supported vif_type for binding based on vnic_type
+ # in this case ODL hostconfigs has the vif_type to bind for vnic_type
+ vnic_type = port_context.current.get(portbindings.VNIC_TYPE)
+
+ if vnic_type != portbindings.VNIC_NORMAL:
+ LOG.error(_LE("Binding failed: unsupported VNIC %s"), vnic_type)
+ return False
+
+ for conf in confs:
+ if conf["vnic_type"] == vnic_type:
+ vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
+ LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
+ break
+ else:
+ vif_type = portbindings.VIF_TYPE_OVS # default: OVS
+ LOG.warning(_LW("No supported vif type found for host %s!, "
+ "defaulting to OVS"), port_context.host)
+
+ vif_details = conf.get('vif_details', {})
+
+ if not vif_details: # empty vif_details could be trouble, warn.
+ LOG.warning(_LW("hostconfig:vif_details was empty!"))
+
+ LOG.debug("Bind port %(port)s on network %(network)s with valid "
+ "segment %(segment)s and VIF type %(vif_type)r "
+ "VIF details %(vif_details)r.",
+ {'port': port_context.current['id'],
+ 'network': port_context.network.current['id'],
+ 'segment': valid_segment, 'vif_type': vif_type,
+ 'vif_details': vif_details})
+
+ port_context.set_binding(valid_segment[driver_api.ID], vif_type,
+ vif_details,
+ status=nl_const.PORT_STATUS_ACTIVE)
+ return True
+
+ def _is_valid_segment(self, segment, conf):
+ """Verify a segment is supported by ODL."""
+ network_type = segment[driver_api.NETWORK_TYPE]
+ return network_type in conf['allowed_network_types']
diff --git a/networking-odl/networking_odl/tests/__init__.py b/networking-odl/networking_odl/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/__init__.py
diff --git a/networking-odl/networking_odl/tests/base.py b/networking-odl/networking_odl/tests/base.py
new file mode 100644
index 0000000..d28be71
--- /dev/null
+++ b/networking-odl/networking_odl/tests/base.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from neutron.tests import base
+
+
+class DietTestCase(base.DietTestCase):
+
+ def patch(self, target, name, *args, **kwargs):
+ context = mock.patch.object(target, name, *args, **kwargs)
+ patch = context.start()
+ self.addCleanup(context.stop)
+ return patch
diff --git a/networking-odl/networking_odl/tests/unit/__init__.py b/networking-odl/networking_odl/tests/unit/__init__.py
new file mode 100644
index 0000000..faed26a
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+
+cfg.CONF.use_stderr = False
diff --git a/networking-odl/networking_odl/tests/unit/common/__init__.py b/networking-odl/networking_odl/tests/unit/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/common/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/common/test_cache.py b/networking-odl/networking_odl/tests/unit/common/test_cache.py
new file mode 100644
index 0000000..b702455
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/common/test_cache.py
@@ -0,0 +1,242 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.common import cache
+
+
+class TestCache(base.DietTestCase):
+
+ def test_init_with_callable(self):
+
+ def given_fetch_method():
+ pass
+
+ cache.Cache(given_fetch_method)
+
+ def test_init_without_callable(self):
+ self.assertRaises(TypeError, lambda: cache.Cache(object()))
+
+ def test_fecth_once(self):
+ value = 'value'
+
+ given_fetch_method = mock.Mock(return_value=iter([('key', value)]))
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result = given_cache.fetch('key', 60.0)
+
+ # Result is returned
+ self.assertIs(value, result)
+
+ # Then fetch method is called once
+ given_fetch_method.assert_called_once_with(('key',))
+
+ def test_fecth_with_no_result(self):
+ given_fetch_method = mock.Mock(return_value=iter([]))
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ try:
+ given_cache.fetch('key', 60.0)
+ except cache.CacheFetchError as error:
+ given_fetch_method.assert_called_once_with(('key',))
+ self.assertRaises(KeyError, error.reraise_cause)
+ else:
+ self.fail('Expecting CacheFetchError to be raised.')
+
+ @mock.patch.object(cache, 'LOG')
+ def test_fecth_with_failure(self, logger):
+ # pylint: disable=unused-argument
+
+ given_error = RuntimeError("It doesn't work like this!")
+
+ def failing_function(keys):
+ raise given_error
+
+ given_fetch_method = mock.Mock(side_effect=failing_function)
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ try:
+ given_cache.fetch('key', 60.0)
+ except cache.CacheFetchError as error:
+ given_fetch_method.assert_called_once_with(('key',))
+ self.assertRaises(RuntimeError, error.reraise_cause)
+ else:
+ self.fail('Expecting CacheFetchError to be raised.')
+ logger.warning.assert_called_once_with(
+ 'Error fetching values for keys: %r', "'key'",
+ exc_info=(type(given_error), given_error, mock.ANY))
+
+ def test_fecth_again_after_clear(self):
+ value1 = 'value1'
+ value2 = 'value2'
+ given_fetch_method = mock.Mock(
+ side_effect=[iter([('key', value1)]),
+ iter([('key', value2)])])
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result1 = given_cache.fetch('key', 60.0)
+
+ # When cache is cleared
+ given_cache.clear()
+
+ # When value with same key is fetched again
+ result2 = given_cache.fetch('key', 0.0)
+
+ # Then first result is returned
+ self.assertIs(value1, result1)
+
+ # Then fetch method is called twice
+ self.assertEqual(
+ [mock.call(('key',)), mock.call(('key',))],
+ given_fetch_method.mock_calls)
+
+ # Then second result is returned
+ self.assertIs(value2, result2)
+
+ def test_fecth_again_before_timeout(self):
+ value1 = 'value1'
+ value2 = 'value2'
+ given_fetch_method = mock.Mock(
+ side_effect=[iter([('key', value1)]),
+ iter([('key', value2)])])
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result1 = given_cache.fetch('key', 1.0)
+
+ # When value with same key is fetched again and cached entry is not
+ # expired
+ result2 = given_cache.fetch('key', 0.0)
+
+ # First result is returned
+ self.assertIs(value1, result1)
+
+ # Then fetch method is called once
+ given_fetch_method.assert_called_once_with(('key',))
+
+ # Then first result is returned twice
+ self.assertIs(value1, result2)
+
+ def test_fecth_again_after_timeout(self):
+ value1 = 'value1'
+ value2 = 'value2'
+ given_fetch_method = mock.Mock(
+ side_effect=[iter([('key', value1)]),
+ iter([('key', value2)])])
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result1 = given_cache.fetch('key', 0.0)
+
+ # When value with same key is fetched again and cached entry is
+ # expired
+ result2 = given_cache.fetch('key', 0.0)
+
+ # Then first result is returned
+ self.assertIs(value1, result1)
+
+ # Then fetch method is called twice
+ self.assertEqual(
+ [mock.call(('key',)), mock.call(('key',))],
+ given_fetch_method.mock_calls)
+
+ # Then second result is returned
+ self.assertIs(value2, result2)
+
+ def test_fecth_two_values_yielding_both_before_timeout(self):
+ value1 = 'value1'
+ value2 = 'value2'
+ given_fetch_method = mock.Mock(
+ return_value=iter([('key1', value1),
+ ('key2', value2)]))
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result1 = given_cache.fetch('key1', 60.0)
+
+ # When value with another key is fetched and cached entry is not
+ # expired
+ result2 = given_cache.fetch('key2', 60.0)
+
+ # Then first result is returned
+ self.assertIs(value1, result1)
+
+ # Then fetch method is called once
+ given_fetch_method.assert_called_once_with(('key1',))
+
+ # Then second result is returned
+ self.assertIs(value2, result2)
+
+ def test_fecth_two_values_yielding_both_after_timeout(self):
+ value1 = 'value1'
+ value2 = 'value2'
+ given_fetch_method = mock.Mock(
+ return_value=[('key1', value1), ('key2', value2)])
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ result1 = given_cache.fetch('key1', 0.0)
+
+ # When value with another key is fetched and cached entry is
+ # expired
+ result2 = given_cache.fetch('key2', 0.0)
+
+ # Then first result is returned
+ self.assertIs(value1, result1)
+
+ # Then fetch method is called twice
+ self.assertEqual(
+ [mock.call(('key1',)), mock.call(('key2',))],
+ given_fetch_method.mock_calls)
+
+ # Then second result is returned
+ self.assertIs(value2, result2)
+
+ def test_fecth_all_with_multiple_entries(self):
+ given_fetch_method = mock.Mock(
+ return_value=iter([('key', 'value1'),
+ ('key', 'value2')]))
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ results = list(given_cache.fetch_all(['key'], 0.0))
+
+ # Then fetch method is once
+ given_fetch_method.assert_called_once_with(('key',))
+
+ # Then both results are yield in the right order
+ self.assertEqual([('key', 'value1'), ('key', 'value2')], results)
+
+ def test_fecth_all_with_repeated_entries(self):
+ entry = ('key', 'value')
+ given_fetch_method = mock.Mock(
+ return_value=iter([entry, entry, entry]))
+ given_cache = cache.Cache(given_fetch_method)
+
+ # When value with key is fetched
+ results = list(given_cache.fetch_all(['key'], 0.0))
+
+ # Then fetch method is once
+ given_fetch_method.assert_called_once_with(('key',))
+
+ # Then results are yield in the right order
+ self.assertEqual([entry, entry, entry], results)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_callback.py b/networking-odl/networking_odl/tests/unit/common/test_callback.py
new file mode 100644
index 0000000..f5e2ee6
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/common/test_callback.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from networking_odl.common import callback
+from networking_odl.common import constants as odl_const
+from networking_odl.ml2.mech_driver import OpenDaylightDriver
+
+import mock
+import testtools
+
+from neutron.callbacks import events
+from neutron.callbacks import resources
+
+
+FAKE_ID = 'fakeid'
+
+
+class ODLCallbackTestCase(testtools.TestCase):
+ odl_driver = OpenDaylightDriver()
+ sgh = callback.OdlSecurityGroupsHandler(odl_driver)
+
+ def setUp(self):
+ super(ODLCallbackTestCase, self).setUp()
+
+ @mock.patch.object(OpenDaylightDriver, 'sync_from_callback')
+ def _test_callback_for_sg(self, event, op, sg, sg_id, sfc):
+ self.sgh.sg_callback(resources.SECURITY_GROUP,
+ event,
+ None,
+ security_group=sg,
+ security_group_id=sg_id)
+
+ expected_dict = ({resources.SECURITY_GROUP: sg}
+ if sg is not None else None)
+ sfc.assert_called_with(
+ op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id,
+ expected_dict)
+
+ def test_callback_sg_create(self):
+ self._test_callback_for_sg(events.AFTER_CREATE, odl_const.ODL_CREATE,
+ mock.Mock(), None)
+
+ def test_callback_sg_update(self):
+ self._test_callback_for_sg(events.AFTER_UPDATE, odl_const.ODL_UPDATE,
+ mock.Mock(), FAKE_ID)
+
+ def test_callback_sg_delete(self):
+ self._test_callback_for_sg(events.AFTER_DELETE, odl_const.ODL_DELETE,
+ None, FAKE_ID)
+
+ @mock.patch.object(OpenDaylightDriver, 'sync_from_callback')
+ def _test_callback_for_sg_rules(self, event, op, sg_rule, sg_rule_id, sfc):
+ self.sgh.sg_callback(resources.SECURITY_GROUP_RULE,
+ event,
+ None,
+ security_group_rule=sg_rule,
+ security_group_rule_id=sg_rule_id)
+
+ expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule}
+ if sg_rule is not None else None)
+ sfc.assert_called_with(
+ op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE],
+ sg_rule_id, expected_dict)
+
+ def test_callback_sg_rules_create(self):
+ self._test_callback_for_sg_rules(
+ events.AFTER_CREATE, odl_const.ODL_CREATE, mock.Mock(), None)
+
+ def test_callback_sg_rules_delete(self):
+ self._test_callback_for_sg_rules(
+ events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py b/networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py
new file mode 100644
index 0000000..ea3b5a8
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2015 Intel Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from networking_odl.common import lightweight_testing as lwt
+
+from neutron.tests import base
+
+
+class LightweightTestingTestCase(base.DietTestCase):
+
+ def test_create_client_with_lwt_enabled(self):
+ """Have to do the importation here, otherwise there will be a loop"""
+ from networking_odl.common import client as odl_client
+ odl_client.cfg.CONF.set_override('enable_lightweight_testing',
+ True, 'ml2_odl')
+ # DietTestCase does not automatically cleans configuration overrides
+ self.addCleanup(odl_client.cfg.CONF.reset)
+
+ client = odl_client.OpenDaylightRestClient.create_client()
+ self.assertIsInstance(client, lwt.OpenDaylightLwtClient)
+
+ def test_create_client_with_lwt_disabled(self):
+ """Have to do the importation here, otherwise there will be a loop"""
+ from networking_odl.common import client as odl_client
+ odl_client.cfg.CONF.set_override('enable_lightweight_testing',
+ False, 'ml2_odl')
+ # DietTestCase does not automatically cleans configuration overrides
+ self.addCleanup(odl_client.cfg.CONF.reset)
+
+ client = odl_client.OpenDaylightRestClient.create_client()
+ self.assertIsInstance(client, odl_client.OpenDaylightRestClient)
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'networks': {}}, clear=True)
+ def test_post_single_resource(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
+ obj = {'networks': fake_network1}
+ response = client.sendjson('post', 'networks', obj)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ self.assertEqual(lwt_dict['networks']['fakeid1'],
+ fake_network1)
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'networks': {}}, clear=True)
+ def test_post_multiple_resources(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
+ fake_network2 = {'id': 'fakeid2', 'name': 'fake_network2'}
+ obj = {'networks': [fake_network1, fake_network2]}
+ response = client.sendjson('post', 'networks', obj)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ self.assertEqual(lwt_dict['networks']['fakeid1'],
+ fake_network1)
+ self.assertEqual(lwt_dict['networks']['fakeid2'],
+ fake_network2)
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'ports': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_port1'}}},
+ clear=True)
+ def test_get_single_resource(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ url_path = 'ports/fakeid1'
+ response = client.sendjson('get', url_path, None)
+ self.assertEqual(lwt.OK, response.status_code)
+ res = response.json()
+ # For single resource, the return value is a dict
+ self.assertEqual(res['port']['name'], 'fake_port1')
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'ports': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_port1'},
+ 'fakeid2': {'id': 'fakeid2',
+ 'name': 'fake_port2'}}},
+ clear=True)
+ def test_get_multiple_resources(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ url_path = 'ports/'
+ response = client.sendjson('get', url_path, None)
+ self.assertEqual(lwt.OK, response.status_code)
+ res = response.json()
+ for port in res:
+ self.assertIn(port['port']['name'],
+ ['fake_port1', 'fake_port2'])
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'subnets': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_subnet1'}}},
+ clear=True)
+ def test_put_single_resource(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ changed = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
+ obj = {'subnets': changed}
+
+ url_path = 'subnets/fakeid1'
+ response = client.sendjson('put', url_path, obj)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ self.assertEqual('fake_subnet1_changed',
+ lwt_dict['subnets']['fakeid1']['name'])
+
+ """Check the client does not change the parameter"""
+ self.assertEqual('fakeid1', changed['id'])
+ self.assertEqual('fake_subnet1_changed', changed['name'])
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'subnets': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_subnet1'},
+ 'fakeid2': {'id': 'fakeid2',
+ 'name': 'fake_subnet2'}}},
+ clear=True)
+ def test_put_multiple_resources(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ changed1 = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
+ changed2 = {'id': 'fakeid2', 'name': 'fake_subnet2_changed'}
+ obj = {'subnets': [changed1, changed2]}
+
+ url_path = 'subnets/'
+ response = client.sendjson('put', url_path, obj)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ self.assertEqual('fake_subnet1_changed',
+ lwt_dict['subnets']['fakeid1']['name'])
+ self.assertEqual('fake_subnet2_changed',
+ lwt_dict['subnets']['fakeid2']['name'])
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'networks': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_network1'}}},
+ clear=True)
+ def test_delete_single_resource(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ url_path = 'networks/fakeid1'
+ response = client.sendjson('delete', url_path, None)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ network = lwt_dict['networks'].get('fakeid1')
+ self.assertIsNone(network)
+
+ @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+ {'networks': {'fakeid1': {'id': 'fakeid1',
+ 'name': 'fake_network1'},
+ 'fakeid2': {'id': 'fakeid2',
+ 'name': 'fake_network2'}}},
+ clear=True)
+ def test_delete_multiple_resources(self):
+ client = lwt.OpenDaylightLwtClient.create_client()
+ network1 = {'id': 'fakeid1'}
+ network2 = {'id': 'fakeid2'}
+ obj = {'networks': [network1, network2]}
+ response = client.sendjson('delete', 'networks/', obj)
+ self.assertEqual(lwt.NO_CONTENT, response.status_code)
+ lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+ network = lwt_dict['networks'].get('fakeid1')
+ self.assertIsNone(network)
+ network = lwt_dict['networks'].get('fakeid2')
+ self.assertIsNone(network)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_utils.py b/networking-odl/networking_odl/tests/unit/common/test_utils.py
new file mode 100644
index 0000000..dcfb50e
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/common/test_utils.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.common import cache
+from networking_odl.common import utils
+
+
+class TestGetAddressesByName(base.DietTestCase):
+
+ # pylint: disable=protected-access, unused-argument
+
+ def setUp(self):
+ super(TestGetAddressesByName, self).setUp()
+ self.clear_cache()
+ self.addCleanup(self.clear_cache)
+ time = self.patch(
+ utils.cache, 'time', clock=mock.Mock(return_value=0.0))
+ self.clock = time.clock
+ socket = self.patch(utils, 'socket')
+ self.getaddrinfo = socket.getaddrinfo
+
+ def patch(self, target, name, *args, **kwargs):
+ context = mock.patch.object(target, name, *args, **kwargs)
+ mocked = context.start()
+ self.addCleanup(context.stop)
+ return mocked
+
+ def clear_cache(self):
+ utils._addresses_by_name_cache.clear()
+
+ def test_get_addresses_by_valid_name(self):
+ self.getaddrinfo.return_value = [
+ (2, 1, 6, '', ('127.0.0.1', 0)),
+ (2, 2, 17, '', ('127.0.0.1', 0)),
+ (2, 3, 0, '', ('127.0.0.1', 0)),
+ (2, 1, 6, '', ('10.237.214.247', 0)),
+ (2, 2, 17, '', ('10.237.214.247', 0)),
+ (2, 3, 0, '', ('10.237.214.247', 0))]
+
+ # When valid host name is requested
+ result = utils.get_addresses_by_name('some_host_name')
+
+ # Then correct addresses are returned
+ self.assertEqual(('127.0.0.1', '10.237.214.247'), result)
+
+ # Then fetched addresses are cached
+ self.assertEqual(result, utils.get_addresses_by_name('some_host_name'))
+
+ # Then addresses are fetched only once
+ self.getaddrinfo.assert_called_once_with('some_host_name', None)
+
+ def test_get_addresses_by_valid_name_when_cache_expires(self):
+ self.getaddrinfo.return_value = [
+ (2, 1, 6, '', ('127.0.0.1', 0)),
+ (2, 2, 17, '', ('127.0.0.1', 0)),
+ (2, 3, 0, '', ('127.0.0.1', 0)),
+ (2, 1, 6, '', ('10.237.214.247', 0)),
+ (2, 2, 17, '', ('10.237.214.247', 0)),
+ (2, 3, 0, '', ('10.237.214.247', 0))]
+
+ # When valid host name is requested
+ result1 = utils.get_addresses_by_name('some_host_name')
+
+ # and after a long time
+ self.clock.return_value = 1.0e6
+
+ # When valid host name is requested
+ result2 = utils.get_addresses_by_name('some_host_name')
+
+ # Then correct addresses are returned
+ self.assertEqual(('127.0.0.1', '10.237.214.247'), result1)
+ self.assertEqual(('127.0.0.1', '10.237.214.247'), result2)
+
+ # Then addresses are fetched twice
+ self.getaddrinfo.assert_has_calls(
+ [mock.call('some_host_name', None),
+ mock.call('some_host_name', None)])
+
+ @mock.patch.object(cache, 'LOG')
+ def test_get_addresses_by_invalid_name(self, cache_logger):
+
+ # Given addresses resolution is failing
+ given_error = RuntimeError("I don't know him!")
+
+ def failing_getaddrinfo(name, service):
+ raise given_error
+
+ self.getaddrinfo.side_effect = failing_getaddrinfo
+
+ # When invalid name is requested
+ self.assertRaises(
+ RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+ # When invalid name is requested again
+ self.assertRaises(
+ RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+ # Then result is fetched only once
+ self.getaddrinfo.assert_has_calls(
+ [mock.call('some_host_name', None)])
+ cache_logger.warning.assert_has_calls(
+ [mock.call(
+ 'Error fetching values for keys: %r', "'some_host_name'",
+ exc_info=(RuntimeError, given_error, mock.ANY)),
+ mock.call(
+ 'Error fetching values for keys: %r', "'some_host_name'",
+ exc_info=(RuntimeError, given_error, mock.ANY))])
+
+ @mock.patch.object(cache, 'LOG')
+ def test_get_addresses_failing_when_expired_in_cache(self, cache_logger):
+ self.getaddrinfo.return_value = [
+ (2, 1, 6, '', ('127.0.0.1', 0)),
+ (2, 2, 17, '', ('127.0.0.1', 0)),
+ (2, 3, 0, '', ('127.0.0.1', 0)),
+ (2, 1, 6, '', ('10.237.214.247', 0)),
+ (2, 2, 17, '', ('10.237.214.247', 0)),
+ (2, 3, 0, '', ('10.237.214.247', 0))]
+
+ # Given valid result is in chache but expired
+ utils.get_addresses_by_name('some_host_name')
+ self.clock.return_value = 1.0e6
+
+ # Given addresses resolution is now failing
+ given_error = RuntimeError("This is top secret.")
+
+ def failing_getaddrinfo(name, service):
+ raise given_error
+
+ self.getaddrinfo.side_effect = failing_getaddrinfo
+
+ self.assertRaises(
+ RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+ # Then result is fetched more times
+ self.getaddrinfo.assert_has_calls(
+ [mock.call('some_host_name', None),
+ mock.call('some_host_name', None)])
+ cache_logger.warning.assert_called_once_with(
+ 'Error fetching values for keys: %r', "'some_host_name'",
+ exc_info=(RuntimeError, given_error, mock.ANY))
diff --git a/networking-odl/networking_odl/tests/unit/db/__init__.py b/networking-odl/networking_odl/tests/unit/db/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/db/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/db/test_db.py b/networking-odl/networking_odl/tests/unit/db/test_db.py
new file mode 100644
index 0000000..72749ad
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/db/test_db.py
@@ -0,0 +1,243 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+
+from datetime import datetime
+from datetime import timedelta
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.db import models
+
+from neutron.db import api as neutron_db_api
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+from oslo_db.exception import DBDeadlock
+from unittest2.case import TestCase
+
+
+class DbTestCase(SqlTestCaseLight, TestCase):
+
+ UPDATE_ROW = [odl_const.ODL_NETWORK, 'id', odl_const.ODL_UPDATE,
+ {'test': 'data'}]
+
+ def setUp(self):
+ super(DbTestCase, self).setUp()
+ self.db_session = neutron_db_api.get_session()
+ self.addCleanup(self._db_cleanup)
+
+ def _db_cleanup(self):
+ self.db_session.query(models.OpendaylightJournal).delete()
+ self.db_session.query(models.OpendaylightMaintenance).delete()
+
+ def _update_row(self, row):
+ self.db_session.merge(row)
+ self.db_session.flush()
+
+ def _test_validate_updates(self, rows, time_deltas, expected_validations):
+ for row in rows:
+ db.create_pending_row(self.db_session, *row)
+
+ # update row created_at
+ rows = db.get_all_db_rows(self.db_session)
+ now = datetime.now()
+ for row, time_delta in zip(rows, time_deltas):
+ row.created_at = now - timedelta(hours=time_delta)
+ self._update_row(row)
+
+ # validate if there are older rows
+ for row, expected_valid in zip(rows, expected_validations):
+ valid = not db.check_for_older_ops(self.db_session, row)
+ self.assertEqual(expected_valid, valid)
+
+ def _test_retry_count(self, retry_num, max_retry,
+ expected_retry_count, expected_state):
+ # add new pending row
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+ # update the row with the requested retry_num
+ row = db.get_all_db_rows(self.db_session)[0]
+ row.retry_count = retry_num - 1
+ db.update_pending_db_row_retry(self.db_session, row, max_retry)
+
+ # validate the state and the retry_count of the row
+ row = db.get_all_db_rows(self.db_session)[0]
+ self.assertEqual(expected_state, row.state)
+ self.assertEqual(expected_retry_count, row.retry_count)
+
+ def _test_update_row_state(self, from_state, to_state):
+ # add new pending row
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+ row = db.get_all_db_rows(self.db_session)[0]
+ for state in [from_state, to_state]:
+ # update the row state
+ db.update_db_row_state(self.db_session, row, state)
+
+ # validate the new state
+ row = db.get_all_db_rows(self.db_session)[0]
+ self.assertEqual(state, row.state)
+
+ def test_validate_updates_same_object_uuid(self):
+ self._test_validate_updates(
+ [self.UPDATE_ROW, self.UPDATE_ROW], [1, 0], [True, False])
+
+ def test_validate_updates_same_created_time(self):
+ self._test_validate_updates(
+ [self.UPDATE_ROW, self.UPDATE_ROW], [0, 0], [True, True])
+
+ def test_validate_updates_different_object_uuid(self):
+ other_row = list(self.UPDATE_ROW)
+ other_row[1] += 'a'
+ self._test_validate_updates(
+ [self.UPDATE_ROW, other_row], [1, 0], [True, True])
+
+ def test_validate_updates_different_object_type(self):
+ other_row = list(self.UPDATE_ROW)
+ other_row[0] = odl_const.ODL_PORT
+ other_row[1] += 'a'
+ self._test_validate_updates(
+ [self.UPDATE_ROW, other_row], [1, 0], [True, True])
+
+ def test_get_oldest_pending_row_none_when_no_rows(self):
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertIsNone(row)
+
+ def _test_get_oldest_pending_row_none(self, state):
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+ row = db.get_all_db_rows(self.db_session)[0]
+ row.state = state
+ self._update_row(row)
+
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertIsNone(row)
+
+ def test_get_oldest_pending_row_none_when_row_processing(self):
+ self._test_get_oldest_pending_row_none(odl_const.PROCESSING)
+
+ def test_get_oldest_pending_row_none_when_row_failed(self):
+ self._test_get_oldest_pending_row_none(odl_const.FAILED)
+
+ def test_get_oldest_pending_row_none_when_row_completed(self):
+ self._test_get_oldest_pending_row_none(odl_const.COMPLETED)
+
+ def test_get_oldest_pending_row(self):
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertIsNotNone(row)
+ self.assertEqual(odl_const.PROCESSING, row.state)
+
+ def test_get_oldest_pending_row_order(self):
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+ older_row = db.get_all_db_rows(self.db_session)[0]
+ older_row.last_retried -= timedelta(minutes=1)
+ self._update_row(older_row)
+
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertEqual(older_row, row)
+
+ def test_get_oldest_pending_row_when_deadlock(self):
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+ update_mock = mock.MagicMock(side_effect=(DBDeadlock, mock.DEFAULT))
+
+ # Mocking is mandatory to achieve a deadlock regardless of the DB
+ # backend being used when running the tests
+ with mock.patch.object(db, 'update_db_row_state', new=update_mock):
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertIsNotNone(row)
+
+ self.assertEqual(2, update_mock.call_count)
+
+ def _test_delete_rows_by_state_and_time(self, last_retried, row_retention,
+ state, expected_rows):
+ db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+ # update state and last retried
+ row = db.get_all_db_rows(self.db_session)[0]
+ row.state = state
+ row.last_retried = row.last_retried - timedelta(seconds=last_retried)
+ self._update_row(row)
+
+ db.delete_rows_by_state_and_time(self.db_session,
+ odl_const.COMPLETED,
+ timedelta(seconds=row_retention))
+
+ # validate the number of rows in the journal
+ rows = db.get_all_db_rows(self.db_session)
+ self.assertEqual(expected_rows, len(rows))
+
+ def test_delete_completed_rows_no_new_rows(self):
+ self._test_delete_rows_by_state_and_time(0, 10, odl_const.COMPLETED, 1)
+
+ def test_delete_completed_rows_one_new_row(self):
+ self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0)
+
+ def test_delete_completed_rows_wrong_state(self):
+ self._test_delete_rows_by_state_and_time(10, 8, odl_const.PENDING, 1)
+
+ def test_valid_retry_count(self):
+ self._test_retry_count(1, 1, 1, odl_const.PENDING)
+
+ def test_invalid_retry_count(self):
+ self._test_retry_count(2, 1, 1, odl_const.FAILED)
+
+ def test_update_row_state_to_pending(self):
+ self._test_update_row_state(odl_const.PROCESSING, odl_const.PENDING)
+
+ def test_update_row_state_to_processing(self):
+ self._test_update_row_state(odl_const.PENDING, odl_const.PROCESSING)
+
+ def test_update_row_state_to_failed(self):
+ self._test_update_row_state(odl_const.PROCESSING, odl_const.FAILED)
+
+ def test_update_row_state_to_completed(self):
+ self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED)
+
+ def _test_maintenance_lock_unlock(self, db_func, existing_state,
+ expected_state, expected_result):
+ row = models.OpendaylightMaintenance(id='test',
+ state=existing_state)
+ self.db_session.add(row)
+ self.db_session.flush()
+
+ self.assertEqual(expected_result, db_func(self.db_session))
+ row = self.db_session.query(models.OpendaylightMaintenance).one()
+ self.assertEqual(expected_state, row['state'])
+
+ def test_lock_maintenance(self):
+ self._test_maintenance_lock_unlock(db.lock_maintenance,
+ odl_const.PENDING,
+ odl_const.PROCESSING,
+ True)
+
+ def test_lock_maintenance_fails_when_processing(self):
+ self._test_maintenance_lock_unlock(db.lock_maintenance,
+ odl_const.PROCESSING,
+ odl_const.PROCESSING,
+ False)
+
+ def test_unlock_maintenance(self):
+ self._test_maintenance_lock_unlock(db.unlock_maintenance,
+ odl_const.PROCESSING,
+ odl_const.PENDING,
+ True)
+
+ def test_unlock_maintenance_fails_when_pending(self):
+ self._test_maintenance_lock_unlock(db.unlock_maintenance,
+ odl_const.PENDING,
+ odl_const.PENDING,
+ False)
diff --git a/networking-odl/networking_odl/tests/unit/fwaas/__init__.py b/networking-odl/networking_odl/tests/unit/fwaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/fwaas/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py b/networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py
new file mode 100644
index 0000000..b50016c
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_fwaas_odl
+----------------------------------
+
+Tests for the L3 FWaaS plugin for networking-odl.
+"""
+
+from networking_odl.fwaas import driver as fwaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_FWaaS(base.BaseTestCase):
+
+ def test_init(self):
+ # just create an instance of OpenDaylightFwaasDriver
+ fwaas_odl.OpenDaylightFwaasDriver()
diff --git a/networking-odl/networking_odl/tests/unit/journal/__init__.py b/networking-odl/networking_odl/tests/unit/journal/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/journal/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py b/networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py
new file mode 100644
index 0000000..39a4b98
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2016 Intel Corp. Isaku Yamahata <isaku.yamahata@gmail com>
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.journal import dependency_validations
+
+
+class DependencyValidationsTestCase(base.DietTestCase):
+ _RESOURCE_DUMMY = 'test_type'
+
+ def setUp(self):
+ super(DependencyValidationsTestCase, self).setUp()
+ mock_validation_map = mock.patch.dict(
+ dependency_validations._VALIDATION_MAP)
+ mock_validation_map.start()
+ self.addCleanup(mock_validation_map.stop)
+
+ def test_register_validator(self):
+ mock_session = mock.Mock()
+ mock_validator = mock.Mock(return_value=False)
+ mock_row = mock.Mock()
+ mock_row.object_type = self._RESOURCE_DUMMY
+ dependency_validations.register_validator(self._RESOURCE_DUMMY,
+ mock_validator)
+ valid = dependency_validations.validate(mock_session, mock_row)
+ mock_validator.assert_called_once_with(mock_session, mock_row)
+ self.assertFalse(valid)
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_full_sync.py b/networking-odl/networking_odl/tests/unit/journal/test_full_sync.py
new file mode 100644
index 0000000..cedccbd
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/journal/test_full_sync.py
@@ -0,0 +1,152 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+import requests
+
+from neutron.db import api as neutron_db_api
+from neutron import manager
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.db import models
+from networking_odl.journal import full_sync
+
+
+class FullSyncTestCase(SqlTestCaseLight):
+ def setUp(self):
+ super(FullSyncTestCase, self).setUp()
+ self.db_session = neutron_db_api.get_session()
+
+ full_sync._CLIENT = mock.MagicMock()
+ self.plugin_mock = mock.patch.object(manager.NeutronManager,
+ 'get_plugin').start()
+ self.l3_plugin_mock = mock.patch.object(manager.NeutronManager,
+ 'get_service_plugins').start()
+
+ self.addCleanup(self._db_cleanup)
+
+ def _db_cleanup(self):
+ self.db_session.query(models.OpendaylightJournal).delete()
+
+ def test_no_full_sync_when_canary_exists(self):
+ full_sync.full_sync(self.db_session)
+ self.assertEqual([], db.get_all_db_rows(self.db_session))
+
+ def _mock_l2_resources(self):
+ expected_journal = {odl_const.ODL_NETWORK: '1',
+ odl_const.ODL_SUBNET: '2',
+ odl_const.ODL_PORT: '3'}
+ plugin_instance = self.plugin_mock.return_value
+ plugin_instance.get_networks.return_value = [
+ {'id': expected_journal[odl_const.ODL_NETWORK]}]
+ plugin_instance.get_subnets.return_value = [
+ {'id': expected_journal[odl_const.ODL_SUBNET]}]
+ plugin_instance.get_ports.side_effect = ([
+ {'id': expected_journal[odl_const.ODL_PORT]}], [])
+ return expected_journal
+
+ def _filter_out_canary(self, rows):
+ return [row for row in rows if row['object_uuid'] !=
+ full_sync._CANARY_NETWORK_ID]
+
+ def _test_no_full_sync_when_canary_in_journal(self, state):
+ self._mock_canary_missing()
+ self._mock_l2_resources()
+ db.create_pending_row(self.db_session, odl_const.ODL_NETWORK,
+ full_sync._CANARY_NETWORK_ID,
+ odl_const.ODL_CREATE, {})
+ row = db.get_all_db_rows(self.db_session)[0]
+ db.update_db_row_state(self.db_session, row, state)
+
+ full_sync.full_sync(self.db_session)
+
+ rows = db.get_all_db_rows(self.db_session)
+ self.assertEqual([], self._filter_out_canary(rows))
+
+ def test_no_full_sync_when_canary_pending_creation(self):
+ self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING)
+
+ def test_no_full_sync_when_canary_is_processing(self):
+ self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING)
+
+ def test_client_error_propagates(self):
+ class TestException(Exception):
+ def __init__(self):
+ pass
+
+ full_sync._CLIENT.get.side_effect = TestException()
+ self.assertRaises(TestException, full_sync.full_sync, self.db_session)
+
+ def _mock_canary_missing(self):
+ get_return = mock.MagicMock()
+ get_return.status_code = requests.codes.not_found
+ full_sync._CLIENT.get.return_value = get_return
+
+ def _assert_canary_created(self):
+ rows = db.get_all_db_rows(self.db_session)
+ self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID
+ for r in rows))
+ return rows
+
+ def _test_full_sync_resources(self, expected_journal):
+ self._mock_canary_missing()
+
+ full_sync.full_sync(self.db_session)
+
+ rows = self._assert_canary_created()
+ rows = self._filter_out_canary(rows)
+ self.assertItemsEqual(expected_journal.keys(),
+ [row['object_type'] for row in rows])
+ for row in rows:
+ self.assertEqual(expected_journal[row['object_type']],
+ row['object_uuid'])
+
+ def test_full_sync_removes_pending_rows(self):
+ db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, "uuid",
+ odl_const.ODL_CREATE, {'foo': 'bar'})
+ self._test_full_sync_resources({})
+
+ def test_full_sync_no_resources(self):
+ self._test_full_sync_resources({})
+
+ def test_full_sync_l2_resources(self):
+ self._test_full_sync_resources(self._mock_l2_resources())
+
+ def _mock_router_port(self, port_id):
+ router_port = {'id': port_id,
+ 'device_id': '1',
+ 'tenant_id': '1',
+ 'fixed_ips': [{'subnet_id': '1'}]}
+ plugin_instance = self.plugin_mock.return_value
+ plugin_instance.get_ports.side_effect = ([], [router_port])
+
+ def _mock_l3_resources(self):
+ expected_journal = {odl_const.ODL_ROUTER: '1',
+ odl_const.ODL_FLOATINGIP: '2',
+ odl_const.ODL_ROUTER_INTF: '3'}
+ plugin_instance = self.l3_plugin_mock.return_value.get.return_value
+ plugin_instance.get_routers.return_value = [
+ {'id': expected_journal[odl_const.ODL_ROUTER]}]
+ plugin_instance.get_floatingips.return_value = [
+ {'id': expected_journal[odl_const.ODL_FLOATINGIP]}]
+ self._mock_router_port(expected_journal[odl_const.ODL_ROUTER_INTF])
+
+ return expected_journal
+
+ def test_full_sync_l3_resources(self):
+ self._test_full_sync_resources(self._mock_l3_resources())
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_maintenance.py b/networking-odl/networking_odl/tests/unit/journal/test_maintenance.py
new file mode 100644
index 0000000..eb823cd
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/journal/test_maintenance.py
@@ -0,0 +1,93 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+import threading
+from unittest2.case import TestCase
+
+from neutron.db import api as neutron_db_api
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import models
+from networking_odl.journal import maintenance
+
+
+class MaintenanceThreadTestCase(SqlTestCaseLight, TestCase):
+ def setUp(self):
+ super(MaintenanceThreadTestCase, self).setUp()
+ self.db_session = neutron_db_api.get_session()
+
+ row = models.OpendaylightMaintenance(state=odl_const.PENDING)
+ self.db_session.add(row)
+ self.db_session.flush()
+
+ self.thread = maintenance.MaintenanceThread()
+ self.thread.maintenance_interval = 0.01
+
+ def test__execute_op_no_exception(self):
+ with mock.patch.object(maintenance, 'LOG') as mock_log:
+ operation = mock.MagicMock()
+ operation.__name__ = "test"
+ self.thread._execute_op(operation, self.db_session)
+ self.assertTrue(operation.called)
+ self.assertTrue(mock_log.info.called)
+ self.assertFalse(mock_log.exception.called)
+
+ def test__execute_op_with_exception(self):
+ with mock.patch.object(maintenance, 'LOG') as mock_log:
+ operation = mock.MagicMock(side_effect=Exception())
+ operation.__name__ = "test"
+ self.thread._execute_op(operation, self.db_session)
+ self.assertTrue(mock_log.exception.called)
+
+ def test_thread_works(self):
+ callback_event = threading.Event()
+ count = [0]
+
+ def callback_op(**kwargs):
+ count[0] += 1
+
+ # The following should be true on the second call, so we're making
+ # sure that the thread runs more than once.
+ if count[0] > 1:
+ callback_event.set()
+
+ self.thread.register_operation(callback_op)
+ self.thread.start()
+
+ # Make sure the callback event was called and not timed out
+ self.assertTrue(callback_event.wait(timeout=5))
+
+ def test_thread_continues_after_exception(self):
+ exception_event = threading.Event()
+ callback_event = threading.Event()
+
+ def exception_op(**kwargs):
+ if not exception_event.is_set():
+ exception_event.set()
+ raise Exception()
+
+ def callback_op(**kwargs):
+ callback_event.set()
+
+ for op in [exception_op, callback_op]:
+ self.thread.register_operation(op)
+
+ self.thread.start()
+
+ # Make sure the callback event was called and not timed out
+ self.assertTrue(callback_event.wait(timeout=5))
diff --git a/networking-odl/networking_odl/tests/unit/l2gateway/__init__.py b/networking-odl/networking_odl/tests/unit/l2gateway/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/l2gateway/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py b/networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py
new file mode 100644
index 0000000..2506332
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py
@@ -0,0 +1,127 @@
+#
+# Copyright (C) 2016 Ericsson India Global Services Pvt Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import copy
+import mock
+
+from networking_odl.l2gateway import driver
+from neutron.tests import base
+
+
+class TestOpenDaylightL2gwDriver(base.DietTestCase):
+
+ def setUp(self):
+ self.mocked_odlclient = mock.patch(
+ 'networking_odl.common.client'
+ '.OpenDaylightRestClient.create_client').start().return_value
+ self.driver = driver.OpenDaylightL2gwDriver(service_plugin=None,
+ validator=None)
+ super(TestOpenDaylightL2gwDriver, self).setUp()
+
+ def _get_fake_l2_gateway(self):
+ fake_l2_gateway_id = "5227c228-6bba-4bbe-bdb8-6942768ff0f1"
+ fake_l2_gateway = {
+ "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
+ "id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1",
+ "name": "test-gateway",
+ "devices": [
+ {
+ "device_name": "switch1",
+ "interfaces": [
+ {
+ "name": "port1",
+ "segmentation_id": [100]
+ },
+ {
+ "name": "port2",
+ "segmentation_id": [151, 152]
+ }
+ ]
+ },
+ {
+ "device_name": "switch2",
+ "interfaces": [
+ {
+ "name": "port5",
+ "segmentation_id": [200]
+ },
+ {
+ "name": "port6",
+ "segmentation_id": [251, 252]
+ }
+ ]
+ }
+ ]
+ }
+ return fake_l2_gateway_id, fake_l2_gateway
+
+ def _get_fake_l2_gateway_connection(self):
+ fake_l2_gateway_connection_id = "5227c228-6bba-4bbe-bdb8-6942768ff02f"
+ fake_l2_gateway_connection = {
+ "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
+ "id": "5227c228-6bba-4bbe-bdb8-6942768ff02f",
+ "network_id": "be0a7495-05c4-4be0-b796-1412835c6830",
+ "default_segmentation_id": 77,
+ "l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1"
+ }
+ return fake_l2_gateway_connection_id, fake_l2_gateway_connection
+
+ def test_create_l2_gateway_postcommit(self):
+ mocked_sendjson = self.mocked_odlclient.sendjson
+ fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+ expected = {"l2_gateway": fake_l2gateway}
+ self.driver.create_l2_gateway_postcommit(mock.ANY, fake_l2gateway)
+ mocked_sendjson.assert_called_once_with('post', driver.L2GATEWAYS,
+ expected)
+
+ def test_delete_l2_gateway_postcommit(self):
+ mocked_trydelete = self.mocked_odlclient.try_delete
+ fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+ self.driver.delete_l2_gateway_postcommit(mock.ANY, fake_l2gateway_id)
+ url = driver.L2GATEWAYS + '/' + fake_l2gateway_id
+ mocked_trydelete.assert_called_once_with(url)
+
+ def test_update_l2_gateway_postcommit(self):
+ mocked_sendjson = self.mocked_odlclient.sendjson
+ fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+ expected = {"l2_gateway": fake_l2gateway}
+ self.driver.update_l2_gateway_postcommit(mock.ANY, fake_l2gateway)
+ url = driver.L2GATEWAYS + '/' + fake_l2gateway_id
+ mocked_sendjson.assert_called_once_with('put', url, expected)
+
+ def test_create_l2_gateway_connection_postcommit(self):
+ mocked_sendjson = self.mocked_odlclient.sendjson
+ (fake_l2gateway_conn_id,
+ fake_l2gateway_conn) = self._get_fake_l2_gateway_connection()
+ expected_l2gateway_conn = copy.deepcopy(fake_l2gateway_conn)
+ expected_l2gateway_conn['gateway_id'] = (
+ fake_l2gateway_conn['l2_gateway_id'])
+ expected_l2gateway_conn.pop('l2_gateway_id')
+ expected = {"l2gateway_connection": expected_l2gateway_conn}
+ self.driver.create_l2_gateway_connection_postcommit(
+ mock.ANY, fake_l2gateway_conn)
+ mocked_sendjson.assert_called_once_with('post',
+ driver.L2GATEWAY_CONNECTIONS,
+ expected)
+
+ def test_delete_l2_gateway_connection_postcommit(self):
+ mocked_trydelete = self.mocked_odlclient.try_delete
+ (fake_l2gateway_conn_id,
+ fake_l2gateway_conn) = self._get_fake_l2_gateway_connection()
+ url = driver.L2GATEWAY_CONNECTIONS + '/' + fake_l2gateway_conn_id
+ self.driver.delete_l2_gateway_connection_postcommit(
+ mock.ANY, fake_l2gateway_conn_id)
+ mocked_trydelete.assert_called_once_with(url)
diff --git a/networking-odl/networking_odl/tests/unit/l3/__init__.py b/networking-odl/networking_odl/tests/unit/l3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/l3/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py
new file mode 100644
index 0000000..232864d
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py
@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_l3_odl
+----------------------------------
+
+Tests for the L3 service plugin for networking-odl.
+"""
+import copy
+import mock
+
+from neutron.extensions import l3
+from neutron.extensions import l3_ext_gw_mode
+from neutron.tests.unit.api.v2 import test_base
+from neutron.tests.unit.extensions import base as test_extensions_base
+from webob import exc
+
+_get_path = test_base._get_path
+
+
+class Testodll3(test_extensions_base.ExtensionTestCase):
+
+ fmt = 'json'
+
+ def setUp(self):
+ super(Testodll3, self).setUp()
+ # support ext-gw-mode
+ for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
+ l3.RESOURCE_ATTRIBUTE_MAP[key].update(
+ l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
+ self._setUpExtension(
+ 'neutron.extensions.l3.RouterPluginBase', None,
+ l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '',
+ allow_pagination=True, allow_sorting=True,
+ supported_extension_aliases=['router', 'ext-gw-mode'],
+ use_quota=True)
+
+ @staticmethod
+ def _get_mock_network_operation_context():
+ current = {'status': 'ACTIVE',
+ 'subnets': [],
+ 'name': 'net1',
+ 'provider:physical_network': None,
+ 'admin_state_up': True,
+ 'tenant_id': 'test-tenant',
+ 'provider:network_type': 'local',
+ 'router:external': False,
+ 'shared': False,
+ 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'provider:segmentation_id': None}
+ context = mock.Mock(current=current)
+ return context
+
+ @staticmethod
+ def _get_router_test():
+ router_id = "234237d4-1e7f-11e5-9bd7-080027328c3a"
+ router = {'router': {'name': 'router1', 'admin_state_up': True,
+ 'tenant_id': router_id,
+ 'external_gateway_info': None}}
+ return router_id, router
+
+ @staticmethod
+ def _get_floating_ip_test():
+ floating_ip_id = "e4997650-6a83-4230-950a-8adab8e524b2"
+ floating_ip = {
+ "floatingip": {"fixed_ip_address": None,
+ "floating_ip_address": None,
+ "floating_network_id": None,
+ "id": floating_ip_id,
+ "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72",
+ "port_id": None,
+ "status": None,
+ "tenant_id": "test-tenant"
+ }
+ }
+ return floating_ip_id, floating_ip
+
+ @staticmethod
+ def _get_port_test():
+ port_id = "3a44f4e5-1694-493a-a1fb-393881c673a4"
+ subnet_id = "a2f1f29d-571b-4533-907f-5803ab96ead1"
+ port = {'id': port_id,
+ 'network_id': "84b126bb-f45e-4b2e-8202-7e5ce9e21fe7",
+ 'fixed_ips': [{'ip_address': '19.4.4.4',
+ 'prefixlen': 24,
+ 'subnet_id': subnet_id}],
+ 'subnets': [{'id': subnet_id,
+ 'cidr': '19.4.4.0/24',
+ 'gateway_ip': '19.4.4.1'}]}
+ return port_id, port
+
+ def test_create_router(self):
+ router_id, router = self._get_router_test()
+
+ return_value = copy.deepcopy(router['router'])
+ return_value.update({'status': "ACTIVE", 'id': router_id})
+
+ instance = self.plugin.return_value
+ instance.create_router.return_value = return_value
+ instance.get_routers_count.return_value = 0
+
+ res = self.api.post(_get_path('routers', fmt=self.fmt),
+ self.serialize(router),
+ content_type='application/%s' % self.fmt)
+
+ instance.create_router.assert_called_once_with(mock.ANY, router=router)
+ self.assertEqual(exc.HTTPCreated.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertIn('router', res)
+ router = res['router']
+ self.assertEqual(router_id, router['id'])
+ self.assertEqual("ACTIVE", router['status'])
+ self.assertEqual(True, router['admin_state_up'])
+
+ def test_update_router(self):
+ router_id, router = self._get_router_test()
+
+ router_request_info = {'external_gateway_info': {
+ "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8",
+ "enable_snat": True}
+ }
+ return_value = copy.deepcopy(router['router'])
+ return_value.update(router_request_info)
+ return_value.update({'status': "ACTIVE", 'id': router_id})
+
+ instance = self.plugin.return_value
+ instance.update_router.return_value = return_value
+
+ router_request = {'router': router_request_info}
+ res = self.api.put(_get_path('routers', id=router_id, fmt=self.fmt),
+ self.serialize(router_request))
+ instance.update_router.assert_called_once_with(mock.ANY, router_id,
+ router=router_request)
+
+ self.assertEqual(exc.HTTPOk.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertIn('router', res)
+ router = res['router']
+ self.assertEqual(router_id, router['id'])
+ self.assertEqual("3c5bcddd-6af9-4e6b-9c3e-c153e521cab8",
+ router["external_gateway_info"]['network_id'])
+ self.assertEqual(True, router["external_gateway_info"]['enable_snat'])
+
+ def test_delete_router(self):
+ router_id, router = self._get_router_test()
+
+ instance = self.plugin.return_value
+
+ res = self.api.delete(_get_path('routers', id=router_id, fmt=self.fmt))
+ instance.delete_router.assert_called_once_with(mock.ANY, router_id)
+
+ self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+
+ def test_create_floating_ip(self):
+ floating_ip_id, floating_ip = self._get_floating_ip_test()
+ port_id, port = self._get_port_test()
+
+ floating_ip_request_info = {"floating_network_id":
+ "376da547-b977-4cfe-9cba-275c80debf57",
+ "tenant_id": "test-tenant",
+ "fixed_ip_address": "10.0.0.3",
+ "subnet_id": port['subnets'][0]['id'],
+ "port_id": port_id,
+ "floating_ip_address": "172.24.4.228"
+ }
+
+ return_value = copy.deepcopy(floating_ip['floatingip'])
+ return_value.update(floating_ip_request_info)
+ return_value.update({'status': "ACTIVE"})
+
+ instance = self.plugin.return_value
+ instance.create_floatingip.return_value = return_value
+ instance.get_floatingips_count.return_value = 0
+ instance.get_port = mock.Mock(return_value=port)
+
+ floating_ip_request = {'floatingip': floating_ip_request_info}
+
+ res = self.api.post(_get_path('floatingips', fmt=self.fmt),
+ self.serialize(floating_ip_request))
+
+ instance.create_floatingip.\
+ assert_called_once_with(mock.ANY,
+ floatingip=floating_ip_request)
+
+ self.assertEqual(exc.HTTPCreated.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertIn('floatingip', res)
+ floatingip = res['floatingip']
+ self.assertEqual(floating_ip_id, floatingip['id'])
+ self.assertEqual("ACTIVE", floatingip['status'])
+
+ def test_update_floating_ip(self):
+ floating_ip_id, floating_ip = self._get_floating_ip_test()
+
+ floating_ip_request_info = {"port_id": None}
+
+ return_value = copy.deepcopy(floating_ip['floatingip'])
+ return_value.update(floating_ip_request_info)
+ return_value.update({"status": "ACTIVE",
+ "tenant_id": "test-tenant",
+ "floating_network_id":
+ "376da547-b977-4cfe-9cba-275c80debf57",
+ "fixed_ip_address": None,
+ "floating_ip_address": "172.24.4.228"
+ })
+
+ instance = self.plugin.return_value
+ instance.update_floatingip.return_value = return_value
+ port_id, port = self._get_port_test()
+ instance.get_port = mock.Mock(return_value=port)
+
+ floating_ip_request = {'floatingip': floating_ip_request_info}
+
+ res = self.api.put(_get_path('floatingips', id=floating_ip_id,
+ fmt=self.fmt),
+ self.serialize(floating_ip_request))
+
+ instance.update_floatingip.\
+ assert_called_once_with(mock.ANY,
+ floating_ip_id,
+ floatingip=floating_ip_request)
+
+ self.assertEqual(exc.HTTPOk.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertIn('floatingip', res)
+ floatingip = res['floatingip']
+ self.assertEqual(floating_ip_id, floatingip['id'])
+ self.assertIsNone(floatingip['port_id'])
+ self.assertIsNone(floatingip['fixed_ip_address'])
+
+ def test_delete_floating_ip(self):
+ floating_ip_id, floating_ip = self._get_floating_ip_test()
+
+ instance = self.plugin.return_value
+ port_id, port = self._get_port_test()
+ instance.get_port = mock.Mock(return_value=port)
+ res = self.api.delete(_get_path('floatingips', id=floating_ip_id))
+ instance.delete_floatingip.assert_called_once_with(mock.ANY,
+ floating_ip_id)
+
+ self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+
+ def test_add_router_interface(self):
+ router_id, router = self._get_router_test()
+ interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"}
+ return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e",
+ "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4",
+ "id": router_id
+ }
+ return_value.update(interface_info)
+
+ instance = self.plugin.return_value
+ instance.add_router_interface.return_value = return_value
+
+ res = self.api.put(_get_path('routers', id=router_id,
+ action="add_router_interface",
+ fmt=self.fmt),
+ self.serialize(interface_info)
+ )
+
+ instance.add_router_interface.assert_called_once_with(mock.ANY,
+ router_id,
+ interface_info)
+
+ self.assertEqual(exc.HTTPOk.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertEqual(router_id, res['id'])
+ self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1",
+ res['subnet_id'])
+
+ def test_remove_router_interface(self):
+ router_id, router = self._get_router_test()
+ interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1",
+ "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4"
+ }
+ return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e",
+ "id": router_id
+ }
+ return_value.update(interface_info)
+
+ instance = self.plugin.return_value
+ instance.remove_router_interface.return_value = return_value
+ res = self.api.put(_get_path('routers', id=router_id,
+ action="remove_router_interface",
+ fmt=self.fmt),
+ self.serialize(interface_info)
+ )
+
+ instance.remove_router_interface.\
+ assert_called_once_with(mock.ANY,
+ router_id,
+ interface_info)
+
+ self.assertEqual(exc.HTTPOk.code, res.status_int)
+ res = self.deserialize(res)
+ self.assertEqual(router_id, res['id'])
+ self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1",
+ res['subnet_id'])
diff --git a/networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py
new file mode 100644
index 0000000..da3f644
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py
@@ -0,0 +1,526 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl.db import db
+from networking_odl.journal import journal
+from networking_odl.l3 import l3_odl_v2
+from networking_odl.ml2 import mech_driver_v2
+
+import mock
+from oslo_serialization import jsonutils
+import requests
+
+from neutron import context
+from neutron.db import api as neutron_db_api
+from neutron.extensions import external_net as external_net
+from neutron import manager
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import plugin
+from neutron.tests import base
+from neutron.tests.unit.db import test_db_base_plugin_v2
+from neutron.tests.unit import testlib_api
+
+EMPTY_DEP = []
+FLOATINGIP_ID = 'floatingip_uuid'
+NETWORK_ID = 'network_uuid'
+ROUTER_ID = 'router_uuid'
+SUBNET_ID = 'subnet_uuid'
+PORT_ID = 'port_uuid'
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+
+ def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+ password='somepass'):
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'],
+ 'ml2')
+ config.cfg.CONF.set_override('url', url, 'ml2_odl')
+ config.cfg.CONF.set_override('username', username, 'ml2_odl')
+ config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+ def _test_missing_config(self, **kwargs):
+ self._set_config(**kwargs)
+ self.assertRaises(config.cfg.RequiredOptError,
+ plugin.Ml2Plugin)
+
+ def test_valid_config(self):
+ self._set_config()
+ plugin.Ml2Plugin()
+
+ def test_missing_url_raises_exception(self):
+ self._test_missing_config(url=None)
+
+ def test_missing_username_raises_exception(self):
+ self._test_missing_config(username=None)
+
+ def test_missing_password_raises_exception(self):
+ self._test_missing_config(password=None)
+
+
+class DataMatcher(object):
+
+ def __init__(self, operation, object_type, object_dict):
+ self._data = object_dict.copy()
+ self._object_type = object_type
+ filters.filter_for_odl(object_type, operation, self._data)
+
+ def __eq__(self, s):
+ data = jsonutils.loads(s)
+ if self._object_type == odl_const.ODL_ROUTER_INTF:
+ return self._data == data
+ else:
+ return self._data == data[self._object_type]
+
+ def __ne__(self, s):
+ return not self.__eq__(s)
+
+
+class OpenDaylightL3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
+ base.BaseTestCase):
+ def setUp(self):
+ config.cfg.CONF.set_override("core_plugin",
+ 'neutron.plugins.ml2.plugin.Ml2Plugin')
+ core_plugin = config.cfg.CONF.core_plugin
+ super(OpenDaylightL3TestCase, self).setUp(plugin=core_plugin)
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'], 'ml2')
+ config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+ config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+ config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+ mock.patch.object(journal.OpendaylightJournalThread,
+ 'start_odl_sync_thread').start()
+ self.db_session = neutron_db_api.get_session()
+ self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+ self.plugin = manager.NeutronManager.get_plugin()
+ self.plugin._network_is_external = mock.Mock(return_value=True)
+ self.driver = l3_odl_v2.OpenDaylightL3RouterPlugin()
+ self.thread = journal.OpendaylightJournalThread()
+ self.driver.get_floatingip = mock.Mock(
+ return_value={'router_id': ROUTER_ID,
+ 'floating_network_id': NETWORK_ID})
+ self.addCleanup(self._db_cleanup)
+
+ @staticmethod
+ def _get_mock_router_operation_info(network, subnet):
+ router_context = context.get_admin_context()
+ router = {odl_const.ODL_ROUTER:
+ {'name': 'router1',
+ 'admin_state_up': True,
+ 'tenant_id': network['network']['tenant_id'],
+ 'external_gateway_info': {'network_id':
+ network['network']['id']}}}
+ return router_context, router
+
+ @staticmethod
+ def _get_mock_floatingip_operation_info(network, subnet):
+ floatingip_context = context.get_admin_context()
+ floatingip = {odl_const.ODL_FLOATINGIP:
+ {'floating_network_id': network['network']['id'],
+ 'tenant_id': network['network']['tenant_id']}}
+ return floatingip_context, floatingip
+
+ @staticmethod
+ def _get_mock_router_interface_operation_info(network, subnet):
+ router_intf_context = context.get_admin_context()
+ router_intf_dict = {'subnet_id': subnet['subnet']['id'],
+ 'id': network['network']['id']}
+ return router_intf_context, router_intf_dict
+
+ @classmethod
+ def _get_mock_operation_info(cls, object_type, *args):
+ getter = getattr(cls, '_get_mock_' + object_type + '_operation_info')
+ return getter(*args)
+
+ def _db_cleanup(self):
+ rows = db.get_all_db_rows(self.db_session)
+ for row in rows:
+ db.delete_row(self.db_session, row=row)
+
+ @classmethod
+ def _get_mock_request_response(cls, status_code):
+ response = mock.Mock(status_code=status_code)
+ response.raise_for_status = mock.Mock() if status_code < 400 else (
+ mock.Mock(side_effect=requests.exceptions.HTTPError(
+ cls._status_code_msgs[status_code])))
+ return response
+
+ def _test_operation(self, status_code, expected_calls, *args, **kwargs):
+ request_response = self._get_mock_request_response(status_code)
+ with mock.patch('requests.request',
+ return_value=request_response) as mock_method:
+ with mock.patch.object(self.thread.event, 'wait',
+ return_value=False):
+ self.thread.run_sync_thread(exit_after_run=True)
+
+ if expected_calls:
+ mock_method.assert_called_with(
+ headers={'Content-Type': 'application/json'},
+ auth=(config.cfg.CONF.ml2_odl.username,
+ config.cfg.CONF.ml2_odl.password),
+ timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+ self.assertEqual(expected_calls, mock_method.call_count)
+
+ def _call_operation_object(self, operation, object_type, object_id,
+ network, subnet):
+ object_context, object_dict = self._get_mock_operation_info(
+ object_type, network, subnet)
+ method = getattr(self.driver, operation + '_' + object_type)
+
+ if operation == odl_const.ODL_CREATE:
+ new_object_dict = method(object_context, object_dict)
+ elif operation == odl_const.ODL_UPDATE:
+ new_object_dict = method(object_context, object_id, object_dict)
+ elif operation in [odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+ router_dict = method(object_context, object_id, object_dict)
+ new_object_dict = self.driver._generate_router_dict(
+ object_id, object_dict, router_dict)
+ else:
+ new_object_dict = method(object_context, object_id)
+
+ return object_context, new_object_dict
+
+ def _test_operation_thread_processing(self, object_type, operation,
+ network, subnet, object_id,
+ expected_calls=1):
+ http_requests = {odl_const.ODL_CREATE: 'post',
+ odl_const.ODL_UPDATE: 'put',
+ odl_const.ODL_DELETE: 'delete',
+ odl_const.ODL_ADD: 'put',
+ odl_const.ODL_REMOVE: 'put'}
+ status_codes = {odl_const.ODL_CREATE: requests.codes.created,
+ odl_const.ODL_UPDATE: requests.codes.ok,
+ odl_const.ODL_DELETE: requests.codes.no_content,
+ odl_const.ODL_ADD: requests.codes.created,
+ odl_const.ODL_REMOVE: requests.codes.created}
+
+ http_request = http_requests[operation]
+ status_code = status_codes[operation]
+
+ # Create database entry.
+ object_context, new_object_dict = self._call_operation_object(
+ operation, object_type, object_id, network, subnet)
+
+ # Setup expected results.
+ if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
+ url = (config.cfg.CONF.ml2_odl.url + '/' + object_type + 's/' +
+ object_id)
+ elif operation in [odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+ url = (config.cfg.CONF.ml2_odl.url + '/' + odl_const.ODL_ROUTER +
+ 's/' + object_id + '/' + operation + '_router_interface')
+ else:
+ url = config.cfg.CONF.ml2_odl.url + '/' + object_type + 's'
+
+ if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE,
+ odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+ kwargs = {
+ 'url': url,
+ 'data': DataMatcher(operation, object_type, new_object_dict)}
+ else:
+ kwargs = {'url': url, 'data': None}
+
+ # Call threading routine to process database entry. Test results.
+ self._test_operation(status_code, expected_calls, http_request,
+ **kwargs)
+
+ return new_object_dict
+
+ def _test_thread_processing(self, object_type):
+ # Create network and subnet.
+ kwargs = {'arg_list': (external_net.EXTERNAL,),
+ external_net.EXTERNAL: True}
+ with self.network(**kwargs) as network:
+ with self.subnet(network=network, cidr='10.0.0.0/24'):
+ # Add and process create request.
+ new_object_dict = self._test_operation_thread_processing(
+ object_type, odl_const.ODL_CREATE, network, None, None)
+ object_id = new_object_dict['id']
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(1, len(rows))
+
+ # Add and process 'update' request. Adds to database.
+ self._test_operation_thread_processing(
+ object_type, odl_const.ODL_UPDATE, network, None,
+ object_id)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(2, len(rows))
+
+ # Add and process 'delete' request. Adds to database.
+ self._test_operation_thread_processing(
+ object_type, odl_const.ODL_DELETE, network, None,
+ object_id)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(3, len(rows))
+
+ def _test_db_results(self, object_id, operation, object_type):
+ rows = db.get_all_db_rows(self.db_session)
+
+ self.assertEqual(1, len(rows))
+ self.assertEqual(operation, rows[0]['operation'])
+ self.assertEqual(object_type, rows[0]['object_type'])
+ self.assertEqual(object_id, rows[0]['object_uuid'])
+
+ self._db_cleanup()
+
+ def _test_object_db(self, object_type):
+ # Create network and subnet for testing.
+ kwargs = {'arg_list': (external_net.EXTERNAL,),
+ external_net.EXTERNAL: True}
+ with self.network(**kwargs) as network:
+ with self.subnet(network=network):
+ object_context, object_dict = self._get_mock_operation_info(
+ object_type, network, None)
+
+ # Add and test 'create' database entry.
+ method = getattr(self.driver,
+ odl_const.ODL_CREATE + '_' + object_type)
+ new_object_dict = method(object_context, object_dict)
+ object_id = new_object_dict['id']
+ self._test_db_results(object_id, odl_const.ODL_CREATE,
+ object_type)
+
+ # Add and test 'update' database entry.
+ method = getattr(self.driver,
+ odl_const.ODL_UPDATE + '_' + object_type)
+ method(object_context, object_id, object_dict)
+ self._test_db_results(object_id, odl_const.ODL_UPDATE,
+ object_type)
+
+ # Add and test 'delete' database entry.
+ method = getattr(self.driver,
+ odl_const.ODL_DELETE + '_' + object_type)
+ method(object_context, object_id)
+ self._test_db_results(object_id, odl_const.ODL_DELETE,
+ object_type)
+
+ def _test_dependency_processing(
+ self, test_operation, test_object, test_id, test_context,
+ dep_operation, dep_object, dep_id, dep_context):
+
+ # Mock sendjson to verify that it never gets called.
+ mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
+ 'sendjson').start()
+
+ # Create dependency db row and mark as 'processing' so it won't
+ # be processed by the journal thread.
+ db.create_pending_row(self.db_session, dep_object,
+ dep_id, dep_operation, dep_context)
+ row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+ db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING)
+
+ # Create test row with dependent ID.
+ db.create_pending_row(self.db_session, test_object,
+ test_id, test_operation, test_context)
+
+ # Call journal thread.
+ with mock.patch.object(self.thread.event, 'wait',
+ return_value=False):
+ self.thread.run_sync_thread(exit_after_run=True)
+
+ # Verify that dependency row is still set at 'processing'.
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.PROCESSING)
+ self.assertEqual(1, len(rows))
+
+ # Verify that the test row was processed and set back to 'pending'
+ # to be processed again.
+ rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+ self.assertEqual(1, len(rows))
+
+ # Verify that _json_data was not called.
+ self.assertFalse(mock_sendjson.call_count)
+
+ def test_router_db(self):
+ self._test_object_db(odl_const.ODL_ROUTER)
+
+ def test_floatingip_db(self):
+ self._test_object_db(odl_const.ODL_FLOATINGIP)
+
+ def test_router_intf_db(self):
+ # Create network, subnet and router for testing.
+ kwargs = {'arg_list': (external_net.EXTERNAL,),
+ external_net.EXTERNAL: True}
+ with self.network(**kwargs) as network:
+ with self.subnet(cidr='10.0.0.0/24') as subnet:
+ router_context, router_dict = (
+ self._get_mock_router_operation_info(network, None))
+ new_router_dict = self.driver.create_router(router_context,
+ router_dict)
+ router_id = new_router_dict['id']
+
+ object_type = odl_const.ODL_ROUTER_INTF
+ router_intf_context, router_intf_dict = \
+ self._get_mock_router_interface_operation_info(network,
+ subnet)
+
+ # Remove 'router' database entry to allow tests to pass.
+ self._db_cleanup()
+
+ # Add and test router interface 'add' database entry.
+ # Note that router interface events do not generate unique
+ # UUIDs.
+ self.driver.add_router_interface(router_intf_context,
+ router_id, router_intf_dict)
+ self._test_db_results(odl_const.ODL_UUID_NOT_USED,
+ odl_const.ODL_ADD, object_type)
+
+ # Add and test 'remove' database entry.
+ self.driver.remove_router_interface(router_intf_context,
+ router_id,
+ router_intf_dict)
+ self._test_db_results(odl_const.ODL_UUID_NOT_USED,
+ odl_const.ODL_REMOVE, object_type)
+
+ def test_router_threading(self):
+ self._test_thread_processing(odl_const.ODL_ROUTER)
+
+ def test_floatingip_threading(self):
+ self._test_thread_processing(odl_const.ODL_FLOATINGIP)
+
+ def test_router_intf_threading(self):
+ # Create network, subnet and router for testing.
+ kwargs = {'arg_list': (external_net.EXTERNAL,),
+ external_net.EXTERNAL: True}
+ with self.network(**kwargs) as network:
+ with self.subnet(cidr='10.0.0.0/24') as subnet:
+ router_context, router_dict = (
+ self._get_mock_router_operation_info(network, None))
+ new_router_dict = self.driver.create_router(router_context,
+ router_dict)
+ router_id = new_router_dict['id']
+ object_type = odl_const.ODL_ROUTER_INTF
+
+ # Add and process router interface 'add' request. Adds to
+ # database. Expected calls = 2 because the create_router db
+ # entry is also processed.
+ self._test_operation_thread_processing(
+ object_type, odl_const.ODL_ADD, network, subnet, router_id,
+ expected_calls=2)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(2, len(rows))
+
+ # Add and process 'remove' request. Adds to database.
+ self._test_operation_thread_processing(
+ object_type, odl_const.ODL_REMOVE, network, subnet,
+ router_id)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(3, len(rows))
+
+ def test_delete_network_validate_ext_delete_router_dep(self):
+ router_context = [NETWORK_ID]
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_NETWORK, NETWORK_ID, None,
+ odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID,
+ router_context)
+
+ def test_create_router_validate_ext_create_port_dep(self):
+ router_context = {'gw_port_id': PORT_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID,
+ router_context,
+ odl_const.ODL_CREATE, odl_const.ODL_PORT, PORT_ID, None)
+
+ def test_delete_router_validate_ext_delete_floatingip_dep(self):
+ floatingip_context = [ROUTER_ID]
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None,
+ odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ floatingip_context)
+
+ def test_delete_router_validate_ext_remove_routerintf_dep(self):
+ router_intf_dict = {'id': ROUTER_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None,
+ odl_const.ODL_REMOVE, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED, router_intf_dict)
+
+ def test_delete_router_validate_self_create_dep(self):
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
+ odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+ def test_delete_router_validate_self_update_dep(self):
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
+ odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+ def test_update_router_validate_self_create_dep(self):
+ router_context = {'gw_port_id': None}
+ self._test_dependency_processing(
+ odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID,
+ router_context,
+ odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+ def test_create_floatingip_validate_ext_create_network_dep(self):
+ floatingip_context = {'floating_network_id': NETWORK_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ floatingip_context,
+ odl_const.ODL_CREATE, odl_const.ODL_NETWORK, NETWORK_ID, None)
+
+ def test_update_floatingip_validate_self_create_dep(self):
+ floatingip_context = {'floating_network_id': NETWORK_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ floatingip_context,
+ odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ EMPTY_DEP)
+
+ def test_delete_floatingip_validate_self_create_dep(self):
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ EMPTY_DEP,
+ odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ None)
+
+ def test_delete_floatingip_validate_self_update_dep(self):
+ self._test_dependency_processing(
+ odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ EMPTY_DEP,
+ odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+ None)
+
+ def test_add_router_intf_validate_ext_create_router_dep(self):
+ router_intf_context = {'subnet_id': SUBNET_ID,
+ 'id': ROUTER_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED, router_intf_context,
+ odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+ def test_add_router_intf_validate_ext_create_subnet_dep(self):
+ router_intf_context = {'subnet_id': SUBNET_ID,
+ 'id': ROUTER_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED, router_intf_context,
+ odl_const.ODL_CREATE, odl_const.ODL_SUBNET, SUBNET_ID, None)
+
+ def test_remove_router_intf_validate_self_remove_router_intf_dep(self):
+ router_intf_context = {'subnet_id': SUBNET_ID,
+ 'id': ROUTER_ID}
+ self._test_dependency_processing(
+ odl_const.ODL_REMOVE, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED, router_intf_context,
+ odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+ odl_const.ODL_UUID_NOT_USED, router_intf_context)
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/__init__.py b/networking-odl/networking_odl/tests/unit/lbaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/lbaas/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py
new file mode 100644
index 0000000..bca0ccb
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_lbaas_odl
+----------------------------------
+
+Tests for the LBaaS plugin for networking-odl.
+"""
+
+import mock
+
+from networking_odl.lbaas import driver_v1 as lbaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_LBaaS(base.BaseTestCase):
+
+ def test_init(self):
+ # just create an instance of OpenDaylightLbaasDriverV1
+ self.plugin = mock.Mock()
+ lbaas_odl.OpenDaylightLbaasDriverV1(self.plugin)
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py
new file mode 100644
index 0000000..f8292f6
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_lbaas_odl
+----------------------------------
+
+Tests for the LBaaS plugin for networking-odl.
+"""
+
+import mock
+
+from networking_odl.lbaas import driver_v2 as lbaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_LBaaS(base.BaseTestCase):
+
+ def test_init(self):
+ # just create an instance of OpenDaylightLbaasDriverV2
+ self.plugin = mock.Mock()
+ lbaas_odl.OpenDaylightLbaasDriverV2(self.plugin)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/__init__.py b/networking-odl/networking_odl/tests/unit/ml2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/__init__.py
diff --git a/networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh b/networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh
new file mode 100755
index 0000000..15f9b93
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+uuid=$(sudo ovs-vsctl get Open_vSwitch . _uuid)
+
+# Test data
+sudo ovs-vsctl set Open_vSwitch $uuid \
+ external_ids:odl_os_hostconfig_hostid="devstack"
+
+# sudo ovs-vsctl set Open_vSwitch $uuid \
+# external_ids:odl_os_hostconfig_hosttype="ODL L2"
+
+config=$(cat <<____CONFIG
+{"supported_vnic_types":[
+ {"vnic_type":"normal","vif_type":"ovs","vif_details":{}}],
+ "allowed_network_types":["local","vlan","vxlan","gre"],
+ "bridge_mappings":{"physnet1":"br-ex"}}
+____CONFIG
+)
+
+echo config: $config
+
+sudo ovs-vsctl set Open_vSwitch $uuid \
+ external_ids:odl_os_hostconfig_config_odl_l2="$config"
diff --git a/networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js b/networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js
new file mode 100644
index 0000000..1ee02d5
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 OpenStack Foundation
+ * All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License. You may obtain
+ * a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ * $nodejs odl_teststub.js
+ *
+ * local.conf or ml2_conf.ini should be set to the following:
+ *
+ * [ml2_odl]
+ * port_binding_controller = pseudo-agentdb-binding
+ * password = admin
+ * username = admin
+ * url = http://localhost:8080/controller/nb/v2/neutron
+ * restconf_uri = http://localhost:8125/ # for this stub
+ *
+ * To test with ODL *end to end* use below URL for restconf_uri and configure
+ * ovsdb external_ids using the test script: config-ovs-external_ids.sh
+ *
+ * http://localhost:8181/restconf/operational/neutron:neutron/hostconfigs
+ */
+
+var http = require('http');
+
+const PORT=8125;
+
+__test_odl_hconfig = {"hostconfigs": {"hostconfig": [
+ {"host-id": "devstack",
+ "host-type": "ODL L2",
+ "config": {
+ "supported_vnic_types": [
+ {"vnic_type": "normal",
+ "vif_type": "ovs",
+ "vif_details": {}}],
+ "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1":"br-ex"}
+ }
+ }]
+ }}
+
+
+function handleRequest(req, res){
+ res.setHeader('Content-Type', 'application/json');
+ res.end(JSON.stringify(__test_odl_hconfig));
+}
+
+var server = http.createServer(handleRequest);
+
+server.listen(PORT, function(){
+ console.log("Server listening on: http://localhost:%s", PORT);
+ });
diff --git a/networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json b/networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json
new file mode 100644
index 0000000..f855ce7
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json
@@ -0,0 +1,171 @@
+{
+ "network-topology": {
+ "topology": [
+ {
+ "topology-id": "flow:1"
+ },
+ {
+ "node": [
+ {
+ "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex",
+ "ovsdb:bridge-external-ids": [
+ {
+ "bridge-external-id-key": "bridge-id",
+ "bridge-external-id-value": "br-ex"
+ }
+ ],
+ "ovsdb:bridge-name": "br-ex",
+ "ovsdb:bridge-other-configs": [
+ {
+ "bridge-other-config-key": "disable-in-band",
+ "bridge-other-config-value": "true"
+ }
+ ],
+ "ovsdb:bridge-uuid": "4ba78705-3ac2-4e36-a2e1-32f1647d97a7",
+ "ovsdb:datapath-id": "00:00:06:87:a7:4b:36:4e",
+ "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+ "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
+ "termination-point": [
+ {
+ "ovsdb:interface-external-ids": [
+ {
+ "external-id-key": "iface-id",
+ "external-id-value": "c44000c6-f199-4609-9325-afd8c72b6777"
+ },
+ {
+ "external-id-key": "iface-status",
+ "external-id-value": "active"
+ },
+ {
+ "external-id-key": "attached-mac",
+ "external-id-value": "fa:16:3e:a0:d5:49"
+ }
+ ],
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "c1081aa3-607f-404e-a71e-ea1dd334b263",
+ "ovsdb:name": "qg-c44000c6-f1",
+ "ovsdb:ofport": 1,
+ "ovsdb:port-uuid": "1a2ef41e-4836-420c-977f-7a662c7abe62",
+ "tp-id": "qg-c44000c6-f1"
+ },
+ {
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "54439f6a-7a88-4cf6-84b7-0645642618f9",
+ "ovsdb:name": "br-ex",
+ "ovsdb:ofport": 65534,
+ "ovsdb:port-uuid": "9bf4c1ab-d111-479d-84ab-1874f166153b",
+ "tp-id": "br-ex"
+ }
+ ]
+ },
+ {
+ "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2",
+ "ovsdb:connection-info": {
+ "local-ip": "10.237.214.247",
+ "local-port": 6640,
+ "remote-ip": "10.237.214.247",
+ "remote-port": 43247
+ },
+ "ovsdb:managed-node-entry": [
+ {
+ "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int']"
+ },
+ {
+ "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex']"
+ }
+ ],
+ "ovsdb:openvswitch-external-ids": [
+ {
+ "external-id-key": "system-id",
+ "external-id-value": "c4dcfd6c-8f0e-43a6-9cf5-d2a0c37f5c52"
+ }
+ ],
+ "ovsdb:openvswitch-other-configs": [
+ {
+ "other-config-key": "local_ip",
+ "other-config-value": "10.237.214.247"
+ },
+ {
+ "other-config-key": "provider_mappings",
+ "other-config-value": "default:ens786f0"
+ }
+ ],
+ "ovsdb:ovs-version": "2.3.2"
+ },
+ {
+ "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int",
+ "ovsdb:bridge-external-ids": [
+ {
+ "bridge-external-id-key": "bridge-id",
+ "bridge-external-id-value": "br-int"
+ }
+ ],
+ "ovsdb:bridge-name": "br-int",
+ "ovsdb:bridge-uuid": "d3acbe7f-cdab-4ef1-80b8-68e5db3b3b7b",
+ "ovsdb:datapath-id": "00:00:7e:be:ac:d3:f1:4e",
+ "ovsdb:datapath-type": "ovsdb:datapath-type-system",
+ "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
+ "termination-point": [
+ {
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "8164bb4f-2b8c-4405-b8de-4b6b776baa27",
+ "ovsdb:name": "br-int",
+ "ovsdb:ofport": 65534,
+ "ovsdb:port-uuid": "c34e1347-6757-4770-a05e-66cfb4b65167",
+ "tp-id": "br-int"
+ },
+ {
+ "ovsdb:interface-external-ids": [
+ {
+ "external-id-key": "iface-id",
+ "external-id-value": "1d5780fc-da03-4c98-8082-089d70cb65e3"
+ },
+ {
+ "external-id-key": "iface-status",
+ "external-id-value": "active"
+ },
+ {
+ "external-id-key": "attached-mac",
+ "external-id-value": "fa:16:3e:ee:3e:36"
+ }
+ ],
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "00d8d482-abf9-4459-8cb1-9c8e80df4943",
+ "ovsdb:name": "tap1d5780fc-da",
+ "ovsdb:ofport": 1,
+ "ovsdb:port-uuid": "743a236a-a34c-4084-a5ed-8dac56371ca8",
+ "tp-id": "tap1d5780fc-da"
+ },
+ {
+ "ovsdb:interface-external-ids": [
+ {
+ "external-id-key": "iface-id",
+ "external-id-value": "674fd914-74c0-4065-a88a-929919446555"
+ },
+ {
+ "external-id-key": "iface-status",
+ "external-id-value": "active"
+ },
+ {
+ "external-id-key": "attached-mac",
+ "external-id-value": "fa:16:3e:62:0c:d3"
+ }
+ ],
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "41bde142-61bc-4297-a39d-8b0ee86a0731",
+ "ovsdb:name": "qr-674fd914-74",
+ "ovsdb:ofport": 2,
+ "ovsdb:port-uuid": "1c505a53-ccfd-4745-9526-211016d9cbb3",
+ "tp-id": "qr-674fd914-74"
+ }
+ ]
+ }
+ ],
+ "topology-id": "ovsdb:1"
+ },
+ {
+ "topology-id": "netvirt:1"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_driver.py b/networking-odl/networking_odl/tests/unit/ml2/test_driver.py
new file mode 100644
index 0000000..661eb55
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_driver.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2013-2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from neutron import context
+from neutron.tests.unit.plugins.ml2 import test_plugin
+
+from networking_odl.common import constants as const
+from networking_odl.ml2 import mech_driver as driver
+
+
+class TestODLShim(test_plugin.Ml2PluginV2TestCase):
+
+ def setUp(self):
+ super(TestODLShim, self).setUp()
+ self.context = context.get_admin_context()
+ self.plugin = mock.Mock()
+ self.driver = driver.OpenDaylightMechanismDriver()
+ self.driver.odl_drv = mock.Mock()
+
+ def test_create_network_postcommit(self):
+ self.driver.create_network_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+ const.ODL_NETWORKS,
+ self.context)
+
+ def test_update_network_postcommit(self):
+ self.driver.update_network_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+ const.ODL_NETWORKS,
+ self.context)
+
+ def test_delete_network_postcommit(self):
+ self.driver.delete_network_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+ const.ODL_NETWORKS,
+ self.context)
+
+ def test_create_subnet_postcommit(self):
+ self.driver.create_subnet_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+ const.ODL_SUBNETS,
+ self.context)
+
+ def test_update_subnet_postcommit(self):
+ self.driver.update_subnet_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+ const.ODL_SUBNETS,
+ self.context)
+
+ def test_delete_subnet_postcommit(self):
+ self.driver.delete_subnet_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+ const.ODL_SUBNETS,
+ self.context)
+
+ def test_create_port_postcommit(self):
+ self.driver.create_port_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+ const.ODL_PORTS,
+ self.context)
+
+ def test_update_port_postcommit(self):
+ self.driver.update_port_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+ const.ODL_PORTS,
+ self.context)
+
+ def test_delete_port_postcommit(self):
+ self.driver.delete_port_postcommit(self.context)
+ self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+ const.ODL_PORTS,
+ self.context)
+
+ def test_bind_port_delegation(self):
+ # given front-end with attached back-end
+ front_end = self.driver
+ front_end.odl_drv = back_end = mock.MagicMock(
+ spec=driver.OpenDaylightDriver)
+ # given PortContext to be forwarded to back-end without using
+ context = object()
+
+ # when binding port
+ front_end.bind_port(context)
+
+ # then port is bound by back-end
+ back_end.bind_port.assert_called_once_with(context)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py
new file mode 100644
index 0000000..932c961
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as ctx
+from neutron_lib import constants as n_constants
+
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.tests import base
+
+
+class TestLegacyPortBindingManager(base.DietTestCase):
+ # valid and invalid segments
+ valid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_LOCAL,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ invalid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_NONE,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ def test_check_segment(self):
+ """Validate the _check_segment method."""
+
+ all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+ constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN, constants.TYPE_NONE]
+
+ mgr = legacy_port_binding.LegacyPortBindingManager()
+
+ valid_types = {
+ network_type
+ for network_type in all_network_types
+ if mgr._check_segment({api.NETWORK_TYPE: network_type})}
+
+ self.assertEqual({
+ constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN}, valid_types)
+
+ def test_bind_port(self):
+
+ network = mock.MagicMock(spec=api.NetworkContext)
+
+ port_context = mock.MagicMock(
+ spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'},
+ segments_to_bind=[self.valid_segment, self.invalid_segment],
+ network=network)
+
+ mgr = legacy_port_binding.LegacyPortBindingManager()
+ vif_type = mgr._get_vif_type(port_context)
+
+ mgr.bind_port(port_context)
+
+ port_context.set_binding.assert_called_once_with(
+ self.valid_segment[api.ID], vif_type,
+ mgr.vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_unsupported_vnic_type(self):
+ network = mock.MagicMock(spec=api.NetworkContext)
+ port_context = mock.MagicMock(
+ spec=ctx.PortContext,
+ current={'id': 'CURRENT_CONTEXT_ID',
+ portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT},
+ segments_to_bind=[self.valid_segment, self.invalid_segment],
+ network=network)
+
+ mgr = legacy_port_binding.LegacyPortBindingManager()
+ mgr.bind_port(port_context)
+ port_context.set_binding.assert_not_called()
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py
new file mode 100644
index 0000000..95de10c
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py
@@ -0,0 +1,596 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import copy
+import mock
+import socket
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import requests
+import webob.exc
+
+from neutron.db import segments_db
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as driver_context
+from neutron.plugins.ml2 import plugin
+from neutron.tests import base
+from neutron.tests.unit.plugins.ml2 import test_plugin
+from neutron.tests.unit import testlib_api
+from neutron_lib import constants as n_constants
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.ml2 import mech_driver
+from networking_odl.ml2 import network_topology
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+
+HOST = 'fake-host'
+PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+FAKE_NETWORK = {'status': 'ACTIVE',
+ 'subnets': [],
+ 'name': 'net1',
+ 'provider:physical_network': None,
+ 'admin_state_up': True,
+ 'tenant_id': 'test-tenant',
+ 'provider:network_type': 'local',
+ 'router:external': False,
+ 'shared': False,
+ 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'provider:segmentation_id': None}
+
+FAKE_SUBNET = {'ipv6_ra_mode': None,
+ 'allocation_pools': [{'start': '10.0.0.2',
+ 'end': '10.0.1.254'}],
+ 'host_routes': [],
+ 'ipv6_address_mode': None,
+ 'cidr': '10.0.0.0/23',
+ 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+ 'name': '',
+ 'enable_dhcp': True,
+ 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'tenant_id': 'test-tenant',
+ 'dns_nameservers': [],
+ 'gateway_ip': '10.0.0.1',
+ 'ip_version': 4,
+ 'shared': False}
+
+FAKE_PORT = {'status': 'DOWN',
+ 'binding:host_id': '',
+ 'allowed_address_pairs': [],
+ 'device_owner': 'fake_owner',
+ 'binding:profile': {},
+ 'fixed_ips': [],
+ 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+ 'security_groups': [],
+ 'device_id': 'fake_device',
+ 'name': '',
+ 'admin_state_up': True,
+ 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701',
+ 'tenant_id': 'bad_tenant_id',
+ 'binding:vif_details': {},
+ 'binding:vnic_type': 'normal',
+ 'binding:vif_type': 'unbound',
+ 'mac_address': '12:34:56:78:21:b6'}
+
+FAKE_SECURITY_GROUP = {'description': 'Default security group',
+ 'id': '6875fc07-853f-4230-9ab9-23d1af894240',
+ 'name': 'default',
+ 'security_group_rules': [],
+ 'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'}
+
+FAKE_SECURITY_GROUP_RULE = {'direction': 'ingress',
+ 'ethertype': 'IPv4',
+ 'id': '399029df-cefe-4a7a-b6d6-223558627d23',
+ 'port_range_max': 0,
+ 'port_range_min': 0,
+ 'protocol': 0,
+ 'remote_group_id': '6875fc07-853f-4230-9ab9',
+ 'remote_ip_prefix': 0,
+ 'security_group_id': '6875fc07-853f-4230-9ab9',
+ 'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'}
+
+
+class OpenDaylightTestCase(test_plugin.Ml2PluginV2TestCase):
+ _mechanism_drivers = ['opendaylight']
+
+ def setUp(self):
+ # Set URL/user/pass so init doesn't throw a cfg required error.
+ # They are not used in these tests since sendjson is overwritten.
+ config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+ config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+ config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+
+ super(OpenDaylightTestCase, self).setUp()
+ self.port_create_status = 'DOWN'
+ self.mech = mech_driver.OpenDaylightMechanismDriver()
+ mock.patch.object(
+ client.OpenDaylightRestClient,
+ 'sendjson',
+ new=self.check_sendjson).start()
+
+ # Prevent test from accidentally connecting to any web service
+ mock.patch.object(
+ network_topology, 'NetworkTopologyClient',
+ return_value=mock.Mock(
+ specs=network_topology.NetworkTopologyClient,
+ get=mock.Mock(side_effect=requests.HTTPError))).start()
+
+ # Prevent hosts resolution from changing the behaviour of tests
+ mock.patch.object(
+ network_topology.utils,
+ 'get_addresses_by_name',
+ side_effect=socket.gaierror).start()
+
+ def check_sendjson(self, method, urlpath, obj):
+ self.assertFalse(urlpath.startswith("http://"))
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+
+ def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+ password='somepass'):
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'],
+ 'ml2')
+ config.cfg.CONF.set_override('url', url, 'ml2_odl')
+ config.cfg.CONF.set_override('username', username, 'ml2_odl')
+ config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+ def _test_missing_config(self, **kwargs):
+ self._set_config(**kwargs)
+ self.assertRaises(config.cfg.RequiredOptError,
+ plugin.Ml2Plugin)
+
+ def test_valid_config(self):
+ self._set_config()
+ plugin.Ml2Plugin()
+
+ def test_missing_url_raises_exception(self):
+ self._test_missing_config(url=None)
+
+ def test_missing_username_raises_exception(self):
+ self._test_missing_config(username=None)
+
+ def test_missing_password_raises_exception(self):
+ self._test_missing_config(password=None)
+
+
+class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2,
+ OpenDaylightTestCase):
+
+ def setUp(self):
+ mock.patch.object(
+ mech_driver.OpenDaylightDriver,
+ 'out_of_sync',
+ new_callable=mock.PropertyMock(return_value=False)).start()
+ super(OpenDaylightMechanismTestPortsV2, self).setUp()
+
+ def test_update_port_mac(self):
+ self.check_update_port_mac(
+ host_arg={portbindings.HOST_ID: HOST},
+ arg_list=(portbindings.HOST_ID,),
+ expected_status=webob.exc.HTTPConflict.code,
+ expected_error='PortBound')
+
+
+class DataMatcher(object):
+
+ def __init__(self, operation, object_type, context):
+ self._data = context.current.copy()
+ self._object_type = object_type
+ filter_cls = mech_driver.OpenDaylightDriver.FILTER_MAP[
+ '%ss' % object_type]
+ attr_filter = getattr(filter_cls, 'filter_%s_attributes' % operation)
+ attr_filter(self._data, context)
+
+ def __eq__(self, s):
+ data = jsonutils.loads(s)
+ return self._data == data[self._object_type]
+
+ def __ne__(self, s):
+ return not self.__eq__(s)
+
+
+class OpenDaylightSyncTestCase(OpenDaylightTestCase):
+
+ def setUp(self):
+ super(OpenDaylightSyncTestCase, self).setUp()
+ self.given_back_end = mech_driver.OpenDaylightDriver()
+
+ def test_simple_sync_all_with_HTTPError_not_found(self):
+ self.given_back_end.out_of_sync = True
+ ml2_plugin = plugin.Ml2Plugin()
+
+ response = mock.Mock(status_code=requests.codes.not_found)
+ fake_exception = requests.exceptions.HTTPError('Test',
+ response=response)
+
+ def side_eff(*args, **kwargs):
+ # HTTP ERROR exception with 404 status code will be raised when use
+ # sendjson to get the object in ODL DB
+ if args[0] == 'get':
+ raise fake_exception
+
+ with mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
+ side_effect=side_eff), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_networks',
+ return_value=[FAKE_NETWORK.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_network',
+ return_value=FAKE_NETWORK.copy()), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_subnets',
+ return_value=[FAKE_SUBNET.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_ports',
+ return_value=[FAKE_PORT.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_security_groups',
+ return_value=[FAKE_SECURITY_GROUP.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules',
+ return_value=[FAKE_SECURITY_GROUP_RULE.copy()]):
+ self.given_back_end.sync_full(ml2_plugin)
+
+ sync_id_list = [FAKE_NETWORK['id'], FAKE_SUBNET['id'],
+ FAKE_PORT['id'],
+ FAKE_SECURITY_GROUP['id'],
+ FAKE_SECURITY_GROUP_RULE['id']]
+
+ act = []
+ for args, kwargs in \
+ client.OpenDaylightRestClient.sendjson.call_args_list:
+ if args[0] == 'post':
+ for key in args[2]:
+ act.append(args[2][key][0]['id'])
+ self.assertEqual(act, sync_id_list)
+
+ def test_simple_sync_all_with_all_synced(self):
+ self.given_back_end.out_of_sync = True
+ ml2_plugin = plugin.Ml2Plugin()
+
+ with mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
+ return_value=None), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_networks',
+ return_value=[FAKE_NETWORK.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_subnets',
+ return_value=[FAKE_SUBNET.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_ports',
+ return_value=[FAKE_PORT.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_security_groups',
+ return_value=[FAKE_SECURITY_GROUP.copy()]), \
+ mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules',
+ return_value=[FAKE_SECURITY_GROUP_RULE.copy()]):
+ self.given_back_end.sync_full(ml2_plugin)
+
+ # it's only called for GET, there is no call for PUT
+ # 5 = network, subnet, port, security_group, security_group_rule
+ self.assertEqual(5,
+ client.OpenDaylightRestClient.sendjson.call_count)
+
+
+class OpenDaylightMechanismDriverTestCase(base.BaseTestCase):
+
+ def setUp(self):
+ super(OpenDaylightMechanismDriverTestCase, self).setUp()
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'], 'ml2')
+ config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+ config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+ config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+ self.mech = mech_driver.OpenDaylightMechanismDriver()
+ self.mech.initialize()
+
+ @staticmethod
+ def _get_mock_network_operation_context():
+ context = mock.Mock(current=FAKE_NETWORK.copy())
+ return context
+
+ @staticmethod
+ def _get_mock_subnet_operation_context():
+ context = mock.Mock(current=FAKE_SUBNET.copy())
+ return context
+
+ @staticmethod
+ def _get_mock_port_operation_context():
+ context = mock.Mock(current=FAKE_PORT.copy())
+ context._plugin.get_security_group = mock.Mock(return_value={})
+ return context
+
+ @classmethod
+ def _get_mock_operation_context(cls, object_type):
+ getter = getattr(cls, '_get_mock_%s_operation_context' % object_type)
+ return getter()
+
+ _status_code_msgs = {
+ 200: '',
+ 201: '',
+ 204: '',
+ 400: '400 Client Error: Bad Request',
+ 401: '401 Client Error: Unauthorized',
+ 403: '403 Client Error: Forbidden',
+ 404: '404 Client Error: Not Found',
+ 409: '409 Client Error: Conflict',
+ 501: '501 Server Error: Not Implemented',
+ 503: '503 Server Error: Service Unavailable',
+ }
+
+ @classmethod
+ def _get_mock_request_response(cls, status_code):
+ response = mock.Mock(status_code=status_code)
+ response.raise_for_status = mock.Mock() if status_code < 400 else (
+ mock.Mock(side_effect=requests.exceptions.HTTPError(
+ cls._status_code_msgs[status_code], response=response)))
+ return response
+
+ def _test_single_operation(self, method, context, status_code,
+ exc_class=None, *args, **kwargs):
+ self.mech.odl_drv.out_of_sync = False
+ request_response = self._get_mock_request_response(status_code)
+ with mock.patch('requests.request',
+ return_value=request_response) as mock_method:
+ if exc_class is not None:
+ self.assertRaises(exc_class, method, context)
+ else:
+ method(context)
+ mock_method.assert_called_once_with(
+ headers={'Content-Type': 'application/json'},
+ auth=(config.cfg.CONF.ml2_odl.username,
+ config.cfg.CONF.ml2_odl.password),
+ timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+
+ def _test_create_resource_postcommit(self, object_type, status_code,
+ exc_class=None):
+ method = getattr(self.mech, 'create_%s_postcommit' % object_type)
+ context = self._get_mock_operation_context(object_type)
+ url = '%s/%ss' % (config.cfg.CONF.ml2_odl.url, object_type)
+ kwargs = {'url': url,
+ 'data': DataMatcher(odl_const.ODL_CREATE, object_type,
+ context)}
+ self._test_single_operation(method, context, status_code, exc_class,
+ 'post', **kwargs)
+
+ def _test_update_resource_postcommit(self, object_type, status_code,
+ exc_class=None):
+ method = getattr(self.mech, 'update_%s_postcommit' % object_type)
+ context = self._get_mock_operation_context(object_type)
+ url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
+ context.current['id'])
+ kwargs = {'url': url,
+ 'data': DataMatcher(odl_const.ODL_UPDATE, object_type,
+ context)}
+ self._test_single_operation(method, context, status_code, exc_class,
+ 'put', **kwargs)
+
+ def _test_delete_resource_postcommit(self, object_type, status_code,
+ exc_class=None):
+ method = getattr(self.mech, 'delete_%s_postcommit' % object_type)
+ context = self._get_mock_operation_context(object_type)
+ url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
+ context.current['id'])
+ kwargs = {'url': url, 'data': None}
+ self._test_single_operation(method, context, status_code, exc_class,
+ odl_const.ODL_DELETE, **kwargs)
+
+ def test_create_network_postcommit(self):
+ self._test_create_resource_postcommit(odl_const.ODL_NETWORK,
+ requests.codes.created)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.unauthorized):
+ self._test_create_resource_postcommit(
+ odl_const.ODL_NETWORK, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_create_subnet_postcommit(self):
+ self._test_create_resource_postcommit(odl_const.ODL_SUBNET,
+ requests.codes.created)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.unauthorized,
+ requests.codes.forbidden,
+ requests.codes.not_found,
+ requests.codes.conflict,
+ requests.codes.not_implemented):
+ self._test_create_resource_postcommit(
+ odl_const.ODL_SUBNET, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_create_port_postcommit(self):
+ self._test_create_resource_postcommit(odl_const.ODL_PORT,
+ requests.codes.created)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.unauthorized,
+ requests.codes.forbidden,
+ requests.codes.not_found,
+ requests.codes.conflict,
+ requests.codes.not_implemented,
+ requests.codes.service_unavailable):
+ self._test_create_resource_postcommit(
+ odl_const.ODL_PORT, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_update_network_postcommit(self):
+ self._test_update_resource_postcommit(odl_const.ODL_NETWORK,
+ requests.codes.ok)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.forbidden,
+ requests.codes.not_found):
+ self._test_update_resource_postcommit(
+ odl_const.ODL_NETWORK, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_update_subnet_postcommit(self):
+ self._test_update_resource_postcommit(odl_const.ODL_SUBNET,
+ requests.codes.ok)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.unauthorized,
+ requests.codes.forbidden,
+ requests.codes.not_found,
+ requests.codes.not_implemented):
+ self._test_update_resource_postcommit(
+ odl_const.ODL_SUBNET, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_update_port_postcommit(self):
+ self._test_update_resource_postcommit(odl_const.ODL_PORT,
+ requests.codes.ok)
+ for status_code in (requests.codes.bad_request,
+ requests.codes.unauthorized,
+ requests.codes.forbidden,
+ requests.codes.not_found,
+ requests.codes.conflict,
+ requests.codes.not_implemented):
+ self._test_update_resource_postcommit(
+ odl_const.ODL_PORT, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_delete_network_postcommit(self):
+ self._test_delete_resource_postcommit(odl_const.ODL_NETWORK,
+ requests.codes.no_content)
+ self._test_delete_resource_postcommit(odl_const.ODL_NETWORK,
+ requests.codes.not_found)
+ for status_code in (requests.codes.unauthorized,
+ requests.codes.conflict):
+ self._test_delete_resource_postcommit(
+ odl_const.ODL_NETWORK, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_delete_subnet_postcommit(self):
+ self._test_delete_resource_postcommit(odl_const.ODL_SUBNET,
+ requests.codes.no_content)
+ self._test_delete_resource_postcommit(odl_const.ODL_SUBNET,
+ requests.codes.not_found)
+ for status_code in (requests.codes.unauthorized,
+ requests.codes.conflict,
+ requests.codes.not_implemented):
+ self._test_delete_resource_postcommit(
+ odl_const.ODL_SUBNET, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_delete_port_postcommit(self):
+ self._test_delete_resource_postcommit(odl_const.ODL_PORT,
+ requests.codes.no_content)
+ self._test_delete_resource_postcommit(odl_const.ODL_PORT,
+ requests.codes.not_found)
+ for status_code in (requests.codes.unauthorized,
+ requests.codes.forbidden,
+ requests.codes.not_implemented):
+ self._test_delete_resource_postcommit(
+ odl_const.ODL_PORT, status_code,
+ requests.exceptions.HTTPError)
+
+ def test_port_emtpy_tenant_id_work_around(self):
+ """Validate the work around code of port creation"""
+ plugin = mock.Mock()
+ plugin_context = mock.Mock()
+ network = self._get_mock_operation_context(
+ odl_const.ODL_NETWORK).current
+ port = self._get_mock_operation_context(odl_const.ODL_PORT).current
+ tenant_id = network['tenant_id']
+ port['tenant_id'] = ''
+
+ with mock.patch.object(segments_db, 'get_network_segments'):
+ context = driver_context.PortContext(
+ plugin, plugin_context, port, network, {}, 0, None)
+ self.mech.odl_drv.FILTER_MAP[
+ odl_const.ODL_PORTS].filter_create_attributes(port, context)
+ self.assertEqual(tenant_id, port['tenant_id'])
+
+ def test_update_port_filter(self):
+ """Validate the filter code on update port operation"""
+ items_to_filter = ['network_id', 'id', 'status', 'tenant_id']
+ plugin_context = mock.Mock()
+ network = self._get_mock_operation_context(
+ odl_const.ODL_NETWORK).current
+ subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current
+ port = self._get_mock_operation_context(odl_const.ODL_PORT).current
+ port['fixed_ips'] = [{'subnet_id': subnet['id'],
+ 'ip_address': '10.0.0.10'}]
+ port['mac_address'] = port['mac_address'].upper()
+ orig_port = copy.deepcopy(port)
+
+ with mock.patch.object(segments_db, 'get_network_segments'):
+ context = driver_context.PortContext(
+ plugin, plugin_context, port, network, {}, 0, None)
+ self.mech.odl_drv.FILTER_MAP[
+ odl_const.ODL_PORTS].filter_update_attributes(port, context)
+ for key, value in port.items():
+ if key not in items_to_filter:
+ self.assertEqual(orig_port[key], value)
+
+
+class TestOpenDaylightMechanismDriver(base.DietTestCase):
+
+ # given valid and invalid segments
+ valid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_LOCAL,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ invalid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_NONE,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ def test_bind_port_front_end(self):
+ given_front_end = mech_driver.OpenDaylightMechanismDriver()
+ given_port_context = self.given_port_context()
+ given_back_end = mech_driver.OpenDaylightDriver()
+ given_front_end.odl_drv = given_back_end
+ given_back_end.port_binding_controller = \
+ legacy_port_binding.LegacyPortBindingManager()
+
+ # when port is bound
+ given_front_end.bind_port(given_port_context)
+
+ # then context binding is setup with returned vif_type and valid
+ # segment API ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[api.ID], portbindings.VIF_TYPE_OVS,
+ given_back_end.port_binding_controller.vif_details,
+ status=n_constants.PORT_STATUS_ACTIVE)
+
+ def given_port_context(self):
+ from neutron.plugins.ml2 import driver_context as ctx
+
+ # given NetworkContext
+ network = mock.MagicMock(spec=api.NetworkContext)
+
+ # given port context
+ return mock.MagicMock(
+ spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'},
+ segments_to_bind=[self.valid_segment, self.invalid_segment],
+ network=network,
+ _new_bound_segment=self.valid_segment)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py
new file mode 100644
index 0000000..7e8c7fc
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py
@@ -0,0 +1,577 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+
+from networking_odl.common import callback
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl.db import db
+from networking_odl.journal import cleanup
+from networking_odl.journal import journal
+from networking_odl.ml2 import mech_driver_v2
+
+import mock
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import requests
+
+from neutron.db import api as neutron_db_api
+from neutron import manager
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import plugin
+from neutron.tests.unit.plugins.ml2 import test_plugin
+from neutron.tests.unit import testlib_api
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+SECURITY_GROUP = '2f9244b4-9bee-4e81-bc4a-3f3c2045b3d7'
+SG_FAKE_ID = 'sg_fake_uuid'
+SG_RULE_FAKE_ID = 'sg_rule_fake_uuid'
+
+
+class OpenDaylightConfigBase(test_plugin.Ml2PluginV2TestCase):
+ def setUp(self):
+ super(OpenDaylightConfigBase, self).setUp()
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'], 'ml2')
+ config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+ config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+ config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+
+
+class OpenDaylightTestCase(OpenDaylightConfigBase):
+ def setUp(self):
+ super(OpenDaylightTestCase, self).setUp()
+ self.port_create_status = 'DOWN'
+ self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+ mock.patch.object(journal.OpendaylightJournalThread,
+ 'start_odl_sync_thread').start()
+ self.mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
+ 'sendjson').start()
+ self.mock_sendjson.side_effect = self.check_sendjson
+
+ def check_sendjson(self, method, urlpath, obj):
+ self.assertFalse(urlpath.startswith("http://"))
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+ def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+ password='somepass'):
+ config.cfg.CONF.set_override('mechanism_drivers',
+ ['logger', 'opendaylight'],
+ 'ml2')
+ config.cfg.CONF.set_override('url', url, 'ml2_odl')
+ config.cfg.CONF.set_override('username', username, 'ml2_odl')
+ config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+ def _test_missing_config(self, **kwargs):
+ self._set_config(**kwargs)
+ self.assertRaises(config.cfg.RequiredOptError,
+ plugin.Ml2Plugin)
+
+ def test_valid_config(self):
+ self._set_config()
+ plugin.Ml2Plugin()
+
+ def test_missing_url_raises_exception(self):
+ self._test_missing_config(url=None)
+
+ def test_missing_username_raises_exception(self):
+ self._test_missing_config(username=None)
+
+ def test_missing_password_raises_exception(self):
+ self._test_missing_config(password=None)
+
+
+class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2,
+ OpenDaylightTestCase):
+ pass
+
+
+class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2,
+ OpenDaylightTestCase):
+ pass
+
+
+class DataMatcher(object):
+
+ def __init__(self, operation, object_type, context):
+ if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+ self._data = context[object_type].copy()
+ else:
+ self._data = context.current.copy()
+ self._object_type = object_type
+ filters.filter_for_odl(object_type, operation, self._data)
+
+ def __eq__(self, s):
+ data = jsonutils.loads(s)
+ return self._data == data[self._object_type]
+
+ def __ne__(self, s):
+ return not self.__eq__(s)
+
+
+class AttributeDict(dict):
+ def __init__(self, *args, **kwargs):
+ super(AttributeDict, self).__init__(*args, **kwargs)
+ self.__dict__ = self
+
+
+class OpenDaylightMechanismDriverTestCase(OpenDaylightConfigBase):
+ def setUp(self):
+ super(OpenDaylightMechanismDriverTestCase, self).setUp()
+ self.db_session = neutron_db_api.get_session()
+ self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+ self.mock_sync_thread = mock.patch.object(
+ journal.OpendaylightJournalThread, 'start_odl_sync_thread').start()
+ self.mech.initialize()
+ self.thread = journal.OpendaylightJournalThread()
+ self.addCleanup(self._db_cleanup)
+
+ @staticmethod
+ def _get_mock_network_operation_context():
+ current = {'status': 'ACTIVE',
+ 'subnets': [],
+ 'name': 'net1',
+ 'provider:physical_network': None,
+ 'admin_state_up': True,
+ 'tenant_id': 'test-tenant',
+ 'provider:network_type': 'local',
+ 'router:external': False,
+ 'shared': False,
+ 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'provider:segmentation_id': None}
+ context = mock.Mock(current=current)
+ context._plugin_context.session = neutron_db_api.get_session()
+ return context
+
+ @staticmethod
+ def _get_mock_subnet_operation_context():
+ current = {'ipv6_ra_mode': None,
+ 'allocation_pools': [{'start': '10.0.0.2',
+ 'end': '10.0.1.254'}],
+ 'host_routes': [],
+ 'ipv6_address_mode': None,
+ 'cidr': '10.0.0.0/23',
+ 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+ 'name': '',
+ 'enable_dhcp': True,
+ 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'tenant_id': 'test-tenant',
+ 'dns_nameservers': [],
+ 'gateway_ip': '10.0.0.1',
+ 'ip_version': 4,
+ 'shared': False}
+ context = mock.Mock(current=current)
+ context._plugin_context.session = neutron_db_api.get_session()
+ return context
+
+ @staticmethod
+ def _get_mock_port_operation_context():
+ current = {'status': 'DOWN',
+ 'binding:host_id': '',
+ 'allowed_address_pairs': [],
+ 'device_owner': 'fake_owner',
+ 'binding:profile': {},
+ 'fixed_ips': [{
+ 'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}],
+ 'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940',
+ 'security_groups': [SECURITY_GROUP],
+ 'device_id': 'fake_device',
+ 'name': '',
+ 'admin_state_up': True,
+ 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+ 'tenant_id': 'test-tenant',
+ 'binding:vif_details': {},
+ 'binding:vnic_type': 'normal',
+ 'binding:vif_type': 'unbound',
+ 'mac_address': '12:34:56:78:21:b6'}
+ _network = OpenDaylightMechanismDriverTestCase.\
+ _get_mock_network_operation_context().current
+ _plugin = manager.NeutronManager.get_plugin()
+ _plugin.get_security_group = mock.Mock(return_value=SECURITY_GROUP)
+ _plugin.get_port = mock.Mock(return_value=current)
+ _plugin.get_network = mock.Mock(return_value=_network)
+ _plugin_context_mock = {'session': neutron_db_api.get_session()}
+ _network_context_mock = {'_network': _network}
+ context = {'current': AttributeDict(current),
+ '_plugin': _plugin,
+ '_plugin_context': AttributeDict(_plugin_context_mock),
+ '_network_context': AttributeDict(_network_context_mock)}
+ return AttributeDict(context)
+
+ @staticmethod
+ def _get_mock_security_group_operation_context():
+ context = {odl_const.ODL_SG: {'name': 'test_sg',
+ 'id': SG_FAKE_ID}}
+ return context
+
+ @staticmethod
+ def _get_mock_security_group_rule_operation_context():
+ context = {odl_const.ODL_SG_RULE: {'security_group_id': SG_FAKE_ID,
+ 'id': SG_RULE_FAKE_ID}}
+ return context
+
+ @classmethod
+ def _get_mock_operation_context(cls, object_type):
+ getter = getattr(cls, '_get_mock_%s_operation_context' % object_type)
+ return getter()
+
+ _status_code_msgs = {
+ 200: '',
+ 201: '',
+ 204: '',
+ 400: '400 Client Error: Bad Request',
+ 401: '401 Client Error: Unauthorized',
+ 403: '403 Client Error: Forbidden',
+ 404: '404 Client Error: Not Found',
+ 409: '409 Client Error: Conflict',
+ 501: '501 Server Error: Not Implemented',
+ 503: '503 Server Error: Service Unavailable',
+ }
+
+ def _db_cleanup(self):
+ rows = db.get_all_db_rows(self.db_session)
+ for row in rows:
+ db.delete_row(self.db_session, row=row)
+
+ @classmethod
+ def _get_mock_request_response(cls, status_code):
+ response = mock.Mock(status_code=status_code)
+ response.raise_for_status = mock.Mock() if status_code < 400 else (
+ mock.Mock(side_effect=requests.exceptions.HTTPError(
+ cls._status_code_msgs[status_code])))
+ return response
+
+ def _test_operation(self, method, status_code, expected_calls,
+ *args, **kwargs):
+ request_response = self._get_mock_request_response(status_code)
+ with mock.patch('requests.request',
+ return_value=request_response) as mock_method:
+ method(exit_after_run=True)
+
+ if expected_calls:
+ mock_method.assert_called_with(
+ headers={'Content-Type': 'application/json'},
+ auth=(config.cfg.CONF.ml2_odl.username,
+ config.cfg.CONF.ml2_odl.password),
+ timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+ self.assertEqual(expected_calls, mock_method.call_count)
+
+ def _call_operation_object(self, operation, object_type):
+ context = self._get_mock_operation_context(object_type)
+
+ if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+ res_type = [rt for rt in callback._RESOURCE_MAPPING.values()
+ if rt.singular == object_type][0]
+ self.mech.sync_from_callback(operation, res_type,
+ context[object_type]['id'], context)
+ else:
+ method = getattr(self.mech, '%s_%s_precommit' % (operation,
+ object_type))
+ method(context)
+
+ def _test_operation_object(self, operation, object_type):
+ self._call_operation_object(operation, object_type)
+
+ context = self._get_mock_operation_context(object_type)
+ row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+ self.assertEqual(operation, row['operation'])
+ self.assertEqual(object_type, row['object_type'])
+ self.assertEqual(context.current['id'], row['object_uuid'])
+
+ def _test_thread_processing(self, operation, object_type,
+ expected_calls=1):
+ http_requests = {odl_const.ODL_CREATE: 'post',
+ odl_const.ODL_UPDATE: 'put',
+ odl_const.ODL_DELETE: 'delete'}
+ status_codes = {odl_const.ODL_CREATE: requests.codes.created,
+ odl_const.ODL_UPDATE: requests.codes.ok,
+ odl_const.ODL_DELETE: requests.codes.no_content}
+
+ http_request = http_requests[operation]
+ status_code = status_codes[operation]
+
+ self._call_operation_object(operation, object_type)
+
+ context = self._get_mock_operation_context(object_type)
+ url_object_type = object_type.replace('_', '-')
+ if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
+ if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+ uuid = context[object_type]['id']
+ else:
+ uuid = context.current['id']
+ url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, url_object_type,
+ uuid)
+ else:
+ url = '%s/%ss' % (config.cfg.CONF.ml2_odl.url, url_object_type)
+
+ if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]:
+ kwargs = {
+ 'url': url,
+ 'data': DataMatcher(operation, object_type, context)}
+ else:
+ kwargs = {'url': url, 'data': None}
+ with mock.patch.object(self.thread.event, 'wait',
+ return_value=False):
+ self._test_operation(self.thread.run_sync_thread, status_code,
+ expected_calls, http_request, **kwargs)
+
+ def _test_object_type(self, object_type):
+ # Add and process create request.
+ self._test_thread_processing(odl_const.ODL_CREATE, object_type)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(1, len(rows))
+
+ # Add and process update request. Adds to database.
+ self._test_thread_processing(odl_const.ODL_UPDATE, object_type)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(2, len(rows))
+
+ # Add and process update request. Adds to database.
+ self._test_thread_processing(odl_const.ODL_DELETE, object_type)
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(3, len(rows))
+
+ def _test_object_type_pending_network(self, object_type):
+ # Create a network (creates db row in pending state).
+ self._call_operation_object(odl_const.ODL_CREATE,
+ odl_const.ODL_NETWORK)
+
+ # Create object_type database row and process. This results in both
+ # the object_type and network rows being processed.
+ self._test_thread_processing(odl_const.ODL_CREATE, object_type,
+ expected_calls=2)
+
+ # Verify both rows are now marked as completed.
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.COMPLETED)
+ self.assertEqual(2, len(rows))
+
+ def _test_object_type_processing_network(self, object_type):
+ self._test_object_operation_pending_another_object_operation(
+ object_type, odl_const.ODL_CREATE, odl_const.ODL_NETWORK,
+ odl_const.ODL_CREATE)
+
+ def _test_object_operation_pending_object_operation(
+ self, object_type, operation, pending_operation):
+ self._test_object_operation_pending_another_object_operation(
+ object_type, operation, object_type, pending_operation)
+
+ def _test_object_operation_pending_another_object_operation(
+ self, object_type, operation, pending_type, pending_operation):
+ # Create the object_type (creates db row in pending state).
+ self._call_operation_object(pending_operation,
+ pending_type)
+
+ # Get pending row and mark as processing so that
+ # this row will not be processed by journal thread.
+ row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+ db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING)
+
+ # Create the object_type database row and process.
+ # Verify that object request is not processed because the
+ # dependent object operation has not been marked as 'completed'.
+ self._test_thread_processing(operation,
+ object_type,
+ expected_calls=0)
+
+ # Verify that all rows are still in the database.
+ rows = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.PROCESSING)
+ self.assertEqual(1, len(rows))
+ rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+ self.assertEqual(1, len(rows))
+
+ def _test_parent_delete_pending_child_delete(self, parent, child):
+ self._test_object_operation_pending_another_object_operation(
+ parent, odl_const.ODL_DELETE, child, odl_const.ODL_DELETE)
+
+ def _test_cleanup_processing_rows(self, last_retried, expected_state):
+ # Create a dummy network (creates db row in pending state).
+ self._call_operation_object(odl_const.ODL_CREATE,
+ odl_const.ODL_NETWORK)
+
+ # Get pending row and mark as processing and update
+ # the last_retried time
+ row = db.get_all_db_rows_by_state(self.db_session,
+ odl_const.PENDING)[0]
+ row.last_retried = last_retried
+ db.update_db_row_state(self.db_session, row, odl_const.PROCESSING)
+
+ # Test if the cleanup marks this in the desired state
+ # based on the last_retried timestamp
+ cleanup.JournalCleanup().cleanup_processing_rows(self.db_session)
+
+ # Verify that the Db row is in the desired state
+ rows = db.get_all_db_rows_by_state(self.db_session, expected_state)
+ self.assertEqual(1, len(rows))
+
+ def test_driver(self):
+ for operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE,
+ odl_const.ODL_DELETE]:
+ for object_type in [odl_const.ODL_NETWORK, odl_const.ODL_SUBNET,
+ odl_const.ODL_PORT]:
+ self._test_operation_object(operation, object_type)
+
+ def test_port_precommit_no_tenant(self):
+ context = self._get_mock_operation_context(odl_const.ODL_PORT)
+ context.current['tenant_id'] = ''
+
+ method = getattr(self.mech, 'create_port_precommit')
+ method(context)
+
+ # Verify that the Db row has a tenant
+ rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+ self.assertEqual(1, len(rows))
+ _network = OpenDaylightMechanismDriverTestCase.\
+ _get_mock_network_operation_context().current
+ self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id'])
+
+ def test_network(self):
+ self._test_object_type(odl_const.ODL_NETWORK)
+
+ def test_network_update_pending_network_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_NETWORK, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+ def test_network_delete_pending_network_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+ def test_network_delete_pending_network_update(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+ def test_network_delete_pending_subnet_delete(self):
+ self._test_parent_delete_pending_child_delete(
+ odl_const.ODL_NETWORK, odl_const.ODL_SUBNET)
+
+ def test_network_delete_pending_port_delete(self):
+ self._test_parent_delete_pending_child_delete(
+ odl_const.ODL_NETWORK, odl_const.ODL_PORT)
+
+ def test_subnet(self):
+ self._test_object_type(odl_const.ODL_SUBNET)
+
+ def test_subnet_update_pending_subnet_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_SUBNET, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+ def test_subnet_delete_pending_subnet_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+ def test_subnet_delete_pending_subnet_update(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+ def test_subnet_pending_network(self):
+ self._test_object_type_pending_network(odl_const.ODL_SUBNET)
+
+ def test_subnet_processing_network(self):
+ self._test_object_type_processing_network(odl_const.ODL_SUBNET)
+
+ def test_subnet_delete_pending_port_delete(self):
+ self._test_parent_delete_pending_child_delete(
+ odl_const.ODL_SUBNET, odl_const.ODL_PORT)
+
+ def test_port(self):
+ self._test_object_type(odl_const.ODL_PORT)
+
+ def test_port_update_pending_port_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_PORT, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+ def test_port_delete_pending_port_create(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+ def test_port_delete_pending_port_update(self):
+ self._test_object_operation_pending_object_operation(
+ odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+ def test_port_pending_network(self):
+ self._test_object_type_pending_network(odl_const.ODL_PORT)
+
+ def test_port_processing_network(self):
+ self._test_object_type_processing_network(odl_const.ODL_PORT)
+
+ def test_cleanup_processing_rows_time_not_expired(self):
+ self._test_cleanup_processing_rows(datetime.datetime.utcnow(),
+ odl_const.PROCESSING)
+
+ def test_cleanup_processing_rows_time_expired(self):
+ old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
+ self._test_cleanup_processing_rows(old_time, odl_const.PENDING)
+
+ def test_thread_call(self):
+ """Verify that the sync thread method is called."""
+
+ # Create any object that would spin up the sync thread via the
+ # decorator call_thread_on_end() used by all the event handlers.
+ self._call_operation_object(odl_const.ODL_CREATE,
+ odl_const.ODL_NETWORK)
+
+ # Verify that the thread call was made.
+ self.assertTrue(self.mock_sync_thread.called)
+
+ def test_sg(self):
+ self._test_object_type(odl_const.ODL_SG)
+
+ def test_sg_rule(self):
+ self._test_object_type(odl_const.ODL_SG_RULE)
+
+ def _decrease_row_created_time(self, row):
+ row.created_at -= datetime.timedelta(hours=1)
+ self.db_session.merge(row)
+ self.db_session.flush()
+
+ def test_sync_multiple_updates(self):
+ # add 2 updates
+ for i in range(2):
+ self._call_operation_object(odl_const.ODL_UPDATE,
+ odl_const.ODL_NETWORK)
+
+ # get the last update row
+ last_row = db.get_all_db_rows(self.db_session)[-1]
+
+ # change the last update created time
+ self._decrease_row_created_time(last_row)
+
+ # create 1 more operation to trigger the sync thread
+ # verify that there are no calls to ODL controller, because the
+ # first row was not valid (exit_after_run = true)
+ self._test_thread_processing(odl_const.ODL_UPDATE,
+ odl_const.ODL_NETWORK, expected_calls=0)
+
+ # validate that all the rows are in 'pending' state
+ # first row should be set back to 'pending' because it was not valid
+ rows = db.get_all_db_rows_by_state(self.db_session, 'pending')
+ self.assertEqual(3, len(rows))
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py b/networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py
new file mode 100644
index 0000000..fb83a7b
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py
@@ -0,0 +1,475 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from os import path
+
+import mock
+from oslo_log import log
+from oslo_serialization import jsonutils
+import requests
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import constants as n_constants
+
+from networking_odl.common import cache
+from networking_odl.ml2 import mech_driver
+from networking_odl.ml2 import mech_driver_v2
+from networking_odl.ml2 import network_topology
+from networking_odl.tests import base
+
+
+LOG = log.getLogger(__name__)
+
+
+class TestNetworkTopologyManager(base.DietTestCase):
+
+ # pylint: disable=protected-access
+
+ # given valid and invalid segments
+ valid_segment = {
+ driver_api.ID: 'API_ID',
+ driver_api.NETWORK_TYPE: constants.TYPE_LOCAL,
+ driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ invalid_segment = {
+ driver_api.ID: 'API_ID',
+ driver_api.NETWORK_TYPE: constants.TYPE_NONE,
+ driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ segments_to_bind = [valid_segment, invalid_segment]
+
+ def setUp(self):
+ super(TestNetworkTopologyManager, self).setUp()
+ self.patch(network_topology.LOG, 'isEnabledFor', lambda level: True)
+ # patch given configuration
+ self.cfg = mocked_cfg = self.patch(network_topology.client, 'cfg')
+ mocked_cfg.CONF.ml2_odl.url =\
+ 'http://localhost:8181/controller/nb/v2/neutron'
+ mocked_cfg.CONF.ml2_odl.username = 'admin'
+ mocked_cfg.CONF.ml2_odl.password = 'admin'
+ mocked_cfg.CONF.ml2_odl.timeout = 5
+
+ @mock.patch.object(cache, 'LOG')
+ @mock.patch.object(network_topology, 'LOG')
+ def test_fetch_elements_by_host_with_no_entry(
+ self, network_topology_logger, cache_logger):
+ given_client = self.mock_client('ovs_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '192.168.0.1'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ client=given_client)
+
+ try:
+ next(given_network_topology._fetch_elements_by_host(
+ 'some_host_name'))
+ except ValueError as error:
+ cache_logger.warning.assert_called_once_with(
+ 'Error fetching values for keys: %r',
+ "'some_host_name', '127.0.0.1', '192.168.0.1'",
+ exc_info=(ValueError, error, mock.ANY))
+ network_topology_logger.exception.assert_called_once_with(
+ 'No such network topology elements for given host '
+ '%(host_name)r and given IPs: %(ip_addresses)s.',
+ {'ip_addresses': '127.0.0.1, 192.168.0.1',
+ 'host_name': 'some_host_name'})
+ else:
+ self.fail('Expected ValueError being raised.')
+
+ def test_fetch_element_with_ovs_entry(self):
+ given_client = self.mock_client('ovs_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ client=given_client)
+
+ elements = given_network_topology._fetch_elements_by_host(
+ 'some_host_name.')
+
+ self.assertEqual([
+ {'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'has_datapath_type_netdev': False,
+ 'host_addresses': ['10.237.214.247'],
+ 'support_vhost_user': False,
+ 'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
+ 'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
+ [e.to_dict() for e in elements])
+
+ def test_fetch_elements_with_vhost_user_entry(self):
+ given_client = self.mock_client('vhostuser_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ client=given_client)
+
+ elements = given_network_topology._fetch_elements_by_host(
+ 'some_host_name.')
+
+ self.assertEqual([
+ {'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'has_datapath_type_netdev': True,
+ 'host_addresses': ['192.168.66.1'],
+ 'support_vhost_user': True,
+ 'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
+ 'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
+ portbindings.VIF_TYPE_OVS],
+ 'port_prefix': 'vhu',
+ 'vhostuser_socket_dir': '/var/run/openvswitch'}],
+ [e.to_dict() for e in elements])
+
+ def mock_get_addresses_by_name(self, ips):
+ utils = self.patch(
+ network_topology, 'utils',
+ mock.Mock(
+ get_addresses_by_name=mock.Mock(return_value=tuple(ips))))
+ return utils.get_addresses_by_name
+
+ def mock_client(self, topology_name=None):
+
+ mocked_client = mock.NonCallableMock(
+ specs=network_topology.NetworkTopologyClient)
+
+ if topology_name:
+ cached_file_path = path.join(path.dirname(__file__), topology_name)
+
+ with open(cached_file_path, 'rt') as fd:
+ topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
+
+ mocked_client.get().json.return_value = topology
+
+ return mocked_client
+
+ def test_bind_port_from_mech_driver_with_ovs(self):
+
+ given_client = self.mock_client('ovs_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ vif_details={'some': 'detail'},
+ client=given_client)
+ self.patch(
+ network_topology, 'NetworkTopologyManager',
+ return_value=given_network_topology)
+
+ given_driver = mech_driver.OpenDaylightMechanismDriver()
+ given_driver.odl_drv = mech_driver.OpenDaylightDriver()
+ given_port_context = self.given_port_context()
+
+ # when port is bound
+ given_driver.bind_port(given_port_context)
+
+ # then context binding is setup with returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+ {'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_from_mech_driver_with_vhostuser(self):
+
+ given_client = self.mock_client('vhostuser_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ vif_details={'some': 'detail'},
+ client=given_client)
+ self.patch(
+ network_topology, 'NetworkTopologyManager',
+ return_value=given_network_topology)
+
+ given_driver = mech_driver.OpenDaylightMechanismDriver()
+ given_driver.odl_drv = mech_driver.OpenDaylightDriver()
+ given_port_context = self.given_port_context()
+
+ # when port is bound
+ given_driver.bind_port(given_port_context)
+
+ expected_vif_details = {
+ 'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+ 'vhostuser_ovs_plug': True,
+ 'some': 'detail',
+ 'vhostuser_mode': 'client'}
+
+ # then context binding is setup with returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID],
+ portbindings.VIF_TYPE_VHOST_USER,
+ expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_from_mech_driver_v2_with_ovs(self):
+ given_client = self.mock_client('ovs_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ vif_details={'some': 'detail'},
+ client=given_client)
+ self.patch(
+ network_topology, 'NetworkTopologyManager',
+ return_value=given_network_topology)
+
+ given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
+ given_port_context = self.given_port_context()
+
+ given_driver.initialize()
+ # when port is bound
+ given_driver.bind_port(given_port_context)
+
+ # then context binding is setup with returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+ {'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_from_mech_driver_v2_with_vhostuser(self):
+ given_client = self.mock_client('vhostuser_topology.json')
+ self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+ given_network_topology = network_topology.NetworkTopologyManager(
+ vif_details={'some': 'detail'},
+ client=given_client)
+ self.patch(
+ network_topology, 'NetworkTopologyManager',
+ return_value=given_network_topology)
+
+ given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
+ given_driver._network_topology = given_network_topology
+ given_port_context = self.given_port_context()
+
+ given_driver.initialize()
+ # when port is bound
+ given_driver.bind_port(given_port_context)
+
+ expected_vif_details = {
+ 'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+ 'vhostuser_ovs_plug': True,
+ 'some': 'detail',
+ 'vhostuser_mode': 'client'}
+
+ # then context binding is setup with returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID],
+ portbindings.VIF_TYPE_VHOST_USER,
+ expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_with_vif_type_ovs(self):
+ given_topology = self._mock_network_topology(
+ 'ovs_topology.json', vif_details={'much': 'details'})
+ given_port_context = self.given_port_context()
+
+ # when port is bound
+ given_topology.bind_port(given_port_context)
+
+ # then context binding is setup wit returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+ {'much': 'details'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_with_vif_type_vhost_user(self):
+ given_topology = self._mock_network_topology(
+ 'vhostuser_topology.json', vif_details={'much': 'details'})
+ given_port_context = self.given_port_context()
+
+ # when port is bound
+ given_topology.bind_port(given_port_context)
+
+ # then context binding is setup wit returned vif_type and valid
+ # segment api ID
+ given_port_context.set_binding.assert_called_once_with(
+ self.valid_segment[driver_api.ID],
+ portbindings.VIF_TYPE_VHOST_USER,
+ {'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+ 'vhostuser_ovs_plug': True, 'vhostuser_mode': 'client',
+ 'much': 'details'},
+ status=n_constants.PORT_STATUS_ACTIVE)
+
+ @mock.patch.object(network_topology, 'LOG')
+ def test_bind_port_without_valid_segment(self, logger):
+ given_topology = self._mock_network_topology('ovs_topology.json')
+ given_port_context = self.given_port_context(
+ given_segments=[self.invalid_segment])
+
+ # when port is bound
+ given_topology.bind_port(given_port_context)
+
+ self.assertFalse(given_port_context.set_binding.called)
+ logger.exception.assert_called_once_with(
+ 'Network topology element has failed binding port:\n%(element)s',
+ {'element': mock.ANY})
+ logger.error.assert_called_once_with(
+ 'Unable to bind port element for given host and valid VIF types:\n'
+ '\thostname: %(host_name)s\n'
+ '\tvalid VIF types: %(valid_vif_types)s',
+ {'host_name': 'some_host', 'valid_vif_types': 'vhostuser, ovs'})
+
+ def _mock_network_topology(self, given_topology, vif_details=None):
+ self.mock_get_addresses_by_name(
+ ['127.0.0.1', '10.237.214.247', '192.168.66.1'])
+ return network_topology.NetworkTopologyManager(
+ client=self.mock_client(given_topology),
+ vif_details=vif_details)
+
+ def given_port_context(self, given_segments=None):
+ # given NetworkContext
+ network = mock.MagicMock(spec=driver_api.NetworkContext)
+
+ if given_segments is None:
+ given_segments = self.segments_to_bind
+
+ # given port context
+ return mock.MagicMock(
+ spec=driver_context.PortContext,
+ current={'id': 'CURRENT_CONTEXT_ID'},
+ host='some_host',
+ segments_to_bind=given_segments,
+ network=network,
+ _new_bound_segment=self.valid_segment)
+
+ NETOWORK_TOPOLOGY_URL =\
+ 'http://localhost:8181/'\
+ 'restconf/operational/network-topology:network-topology/'
+
+ def mock_request_network_topology(self, file_name):
+ cached_file_path = path.join(
+ path.dirname(__file__), file_name + '.json')
+
+ if path.isfile(cached_file_path):
+ LOG.debug('Loading topology from file: %r', cached_file_path)
+ with open(cached_file_path, 'rt') as fd:
+ topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
+ else:
+ LOG.debug(
+ 'Getting topology from ODL: %r', self.NETOWORK_TOPOLOGY_URL)
+ request = requests.get(
+ self.NETOWORK_TOPOLOGY_URL, auth=('admin', 'admin'),
+ headers={'Content-Type': 'application/json'})
+ request.raise_for_status()
+
+ with open(cached_file_path, 'wt') as fd:
+ LOG.debug('Saving topology to file: %r', cached_file_path)
+ topology = request.json()
+ jsonutils.dump(
+ topology, fd, sort_keys=True, indent=4,
+ separators=(',', ': '))
+
+ mocked_request = self.patch(
+ mech_driver.odl_client.requests, 'request',
+ return_value=mock.MagicMock(
+ spec=requests.Response,
+ json=mock.MagicMock(return_value=topology)))
+
+ return mocked_request
+
+
+class TestNetworkTopologyClient(base.DietTestCase):
+
+ given_host = 'given.host'
+ given_port = 1234
+ given_url_with_port = 'http://{}:{}/'.format(
+ given_host, given_port)
+ given_url_without_port = 'http://{}/'.format(given_host)
+ given_username = 'GIVEN_USERNAME'
+ given_password = 'GIVEN_PASSWORD'
+ given_timeout = 20
+
+ def given_client(
+ self, url=None, username=None, password=None, timeout=None):
+ return network_topology.NetworkTopologyClient(
+ url=url or self.given_url_with_port,
+ username=username or self.given_username,
+ password=password or self.given_password,
+ timeout=timeout or self.given_timeout)
+
+ def test_constructor(self):
+ # When client is created
+ rest_client = network_topology.NetworkTopologyClient(
+ url=self.given_url_with_port,
+ username=self.given_username,
+ password=self.given_password,
+ timeout=self.given_timeout)
+
+ self.assertEqual(
+ self.given_url_with_port +
+ 'restconf/operational/network-topology:network-topology',
+ rest_client.url)
+ self.assertEqual(
+ (self.given_username, self.given_password), rest_client.auth)
+ self.assertEqual(self.given_timeout, rest_client.timeout)
+
+ def test_request_with_port(self):
+ # Given rest client and used 'requests' module
+ given_client = self.given_client()
+ mocked_requests_module = self.mocked_requests()
+
+ # When a request is performed
+ result = given_client.request(
+ 'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
+
+ # Then request method is called
+ mocked_requests_module.request.assert_called_once_with(
+ 'GIVEN_METHOD',
+ url='http://given.host:1234/restconf/operational/' +
+ 'network-topology:network-topology/given/path',
+ auth=(self.given_username, self.given_password),
+ data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+ timeout=self.given_timeout)
+
+ # Then request method result is returned
+ self.assertIs(mocked_requests_module.request.return_value, result)
+
+ def test_request_without_port(self):
+ # Given rest client and used 'requests' module
+ given_client = self.given_client(url=self.given_url_without_port)
+ mocked_requests_module = self.mocked_requests()
+
+ # When a request is performed
+ result = given_client.request(
+ 'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
+
+ # Then request method is called
+ mocked_requests_module.request.assert_called_once_with(
+ 'GIVEN_METHOD',
+ url='http://given.host/restconf/operational/' +
+ 'network-topology:network-topology/given/path',
+ auth=(self.given_username, self.given_password),
+ data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+ timeout=self.given_timeout)
+
+ # Then request method result is returned
+ self.assertIs(mocked_requests_module.request.return_value, result)
+
+ def test_get(self):
+ # Given rest client and used 'requests' module
+ given_client = self.given_client()
+ mocked_requests_module = self.mocked_requests()
+
+ # When a request is performed
+ result = given_client.get('given/path', 'GIVEN_DATA')
+
+ # Then request method is called
+ mocked_requests_module.request.assert_called_once_with(
+ 'get',
+ url='http://given.host:1234/restconf/operational/' +
+ 'network-topology:network-topology/given/path',
+ auth=(self.given_username, self.given_password),
+ data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+ timeout=self.given_timeout)
+
+ # Then request method result is returned
+ self.assertIs(mocked_requests_module.request.return_value, result)
+
+ def mocked_requests(self):
+ return self.patch(network_topology.client, 'requests')
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py b/networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py
new file mode 100644
index 0000000..228154d
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from os import path
+
+import mock
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import constants as n_constants
+
+from networking_odl.ml2 import ovsdb_topology
+from networking_odl.tests import base
+
+
+LOG = log.getLogger(__name__)
+
+
+class TestOvsdbTopologyParser(base.DietTestCase):
+
+ def test_parse_network_topology_ovs(self):
+ given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
+ given_topology = self.load_network_topology('ovs_topology.json')
+
+ # when parse topology
+ elements = list(given_parser.parse_network_topology(given_topology))
+
+ # then parser yields one element supporting only OVS vif type
+ self.assertEqual(
+ [{'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'has_datapath_type_netdev': False,
+ 'host_addresses': ['10.237.214.247'],
+ 'support_vhost_user': False,
+ 'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
+ 'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
+ [e.to_dict() for e in elements])
+
+ def test_parse_network_topology_vhostuser(self):
+ given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
+ given_topology = self.load_network_topology('vhostuser_topology.json')
+
+ # when parse topology
+ elements = list(given_parser.parse_network_topology(given_topology))
+
+ # then parser yields one element supporting VHOSTUSER and OVS vif types
+ self.assertEqual(
+ [{'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'has_datapath_type_netdev': True,
+ 'host_addresses': ['192.168.66.1'],
+ 'port_prefix': 'vhu',
+ 'support_vhost_user': True,
+ 'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
+ 'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
+ portbindings.VIF_TYPE_OVS],
+ 'vhostuser_socket_dir': '/var/run/openvswitch'}],
+ [e.to_dict() for e in elements])
+
+ def load_network_topology(self, file_name):
+ file_path = path.join(path.dirname(__file__), file_name)
+ LOG.debug('Loading topology from file: %r', file_path)
+ with open(file_path, 'rt') as fd:
+ return jsonutils.loads(str(fd.read()), encoding='utf-8')
+
+
+class TestOvsdbNetworkingTopologyElement(base.DietTestCase):
+
+ # given valid and invalid segments
+ VALID_SEGMENT = {
+ driver_api.ID: 'API_ID',
+ driver_api.NETWORK_TYPE: constants.TYPE_LOCAL,
+ driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ INVALID_SEGMENT = {
+ driver_api.ID: 'API_ID',
+ driver_api.NETWORK_TYPE: constants.TYPE_NONE,
+ driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ segments_to_bind = [INVALID_SEGMENT, VALID_SEGMENT]
+
+ def given_element(self, uuid='some_uuid', **kwargs):
+ return ovsdb_topology.OvsdbNetworkTopologyElement(uuid=uuid, **kwargs)
+
+ def test_valid_vif_types_with_no_positive_value(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=False, support_vhost_user=False)
+ valid_vif_types = given_element.valid_vif_types
+ self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+ def test_valid_vif_types_with_datapath_type_netdev(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=True, support_vhost_user=False)
+ valid_vif_types = given_element.valid_vif_types
+ self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+ def test_valid_vif_types_with_support_vhost_user(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=False, support_vhost_user=True)
+ valid_vif_types = given_element.valid_vif_types
+ self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+ def test_valid_vif_types_with_all_positive_values(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=True, support_vhost_user=True)
+ valid_vif_types = given_element.valid_vif_types
+ self.assertEqual(
+ [portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
+ valid_vif_types)
+
+ def test_to_json_ovs(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=False, support_vhost_user=True,
+ remote_ip='192.168.99.33')
+ json = given_element.to_json()
+ self.assertEqual(
+ {'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'uuid': 'some_uuid',
+ 'host_addresses': ['192.168.99.33'],
+ 'has_datapath_type_netdev': False,
+ 'support_vhost_user': True,
+ 'valid_vif_types': [portbindings.VIF_TYPE_OVS]},
+ jsonutils.loads(json))
+
+ def test_to_json_vhost_user(self):
+ given_element = self.given_element(
+ has_datapath_type_netdev=True, support_vhost_user=True,
+ remote_ip='192.168.99.66')
+ json = given_element.to_json()
+ self.assertEqual(
+ {'class':
+ 'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+ 'uuid': 'some_uuid',
+ 'host_addresses': ['192.168.99.66'],
+ 'has_datapath_type_netdev': True,
+ 'support_vhost_user': True,
+ 'valid_vif_types':
+ [portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
+ 'port_prefix': 'vhu',
+ 'vhostuser_socket_dir': '/var/run/openvswitch'},
+ jsonutils.loads(json))
+
+ def test_set_attr_with_invalid_name(self):
+ element = self.given_element()
+ self.assertRaises(
+ AttributeError, lambda: setattr(element, 'invalid_attribute', 10))
+
+ def test_is_valid_segment(self):
+ """Validate the _check_segment method."""
+
+ # given driver and all network types
+ given_element = self.given_element(
+ has_datapath_type_netdev=True, support_vhost_user=True,
+ remote_ip='192.168.99.66')
+ all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+ constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN, constants.TYPE_NONE]
+
+ # when checking segments network type
+ valid_types = {
+ network_type
+ for network_type in all_network_types
+ if given_element._is_valid_segment(
+ {driver_api.NETWORK_TYPE: network_type})}
+
+ # then true is returned only for valid network types
+ self.assertEqual({
+ constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN}, valid_types)
+
+ def test_bind_port_with_vif_type_ovs(self):
+ given_port_context = self.given_port_context(
+ given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
+ given_element = self.given_element('some_uuid')
+
+ # When bind port
+ given_element.bind_port(
+ port_context=given_port_context,
+ vif_type=portbindings.VIF_TYPE_OVS,
+ vif_details={'some_details': None})
+
+ given_port_context.set_binding.assert_called_once_with(
+ self.VALID_SEGMENT[driver_api.ID], portbindings.VIF_TYPE_OVS,
+ {'some_details': None}, status=n_constants.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_with_vif_type_vhost_user(self):
+ given_port_context = self.given_port_context(
+ given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
+ given_element = self.given_element('some_uuid')
+
+ # When bind port
+ given_element.bind_port(
+ port_context=given_port_context,
+ vif_type=portbindings.VIF_TYPE_VHOST_USER,
+ vif_details={'some_details': None})
+
+ given_port_context.set_binding.assert_called_once_with(
+ self.VALID_SEGMENT[driver_api.ID],
+ portbindings.VIF_TYPE_VHOST_USER,
+ {'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+ 'some_details': None, 'vhostuser_ovs_plug': True,
+ 'vhostuser_mode': 'client'},
+ status=n_constants.PORT_STATUS_ACTIVE)
+
+ @mock.patch.object(ovsdb_topology, 'LOG')
+ def test_bind_port_without_valid_segment(self, logger):
+ given_port_context = self.given_port_context(
+ given_segments=[self.INVALID_SEGMENT])
+ given_element = self.given_element('some_uuid')
+
+ # when port is bound
+ self.assertRaises(
+ ValueError, lambda: given_element.bind_port(
+ port_context=given_port_context,
+ vif_type=portbindings.VIF_TYPE_OVS,
+ vif_details={'some_details': None}))
+
+ self.assertFalse(given_port_context.set_binding.called)
+
+ def given_port_context(self, given_segments):
+ # given NetworkContext
+ network = mock.MagicMock(spec=driver_api.NetworkContext)
+
+ # given port context
+ return mock.MagicMock(
+ spec=driver_context.PortContext,
+ current={'id': 'CURRENT_CONTEXT_ID'},
+ segments_to_bind=given_segments,
+ network=network)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py
new file mode 100644
index 0000000..35ae9ec
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.ml2 import port_binding
+from networking_odl.tests import base
+
+
+class TestPortBindingManager(base.DietTestCase):
+
+ def test_create(self):
+ mgr = port_binding.PortBindingManager.create(
+ name="legacy-port-binding")
+ self.assertEqual("legacy-port-binding", mgr.name)
+ self.assertIsInstance(mgr.controller,
+ legacy_port_binding.LegacyPortBindingManager)
+
+ def test_create_with_nonexist_name(self):
+ self.assertRaises(AssertionError,
+ port_binding.PortBindingManager.create,
+ name="nonexist-port-binding")
+
+ @mock.patch.object(legacy_port_binding.LegacyPortBindingManager,
+ "bind_port")
+ def test_bind_port(self, mock_method):
+ port_context = mock.Mock()
+ mgr = port_binding.PortBindingManager.create(
+ name="legacy-port-binding")
+ mgr.controller.bind_port(port_context)
+ mock_method.assert_called_once_with(port_context)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py
new file mode 100644
index 0000000..d69150c
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py
@@ -0,0 +1,334 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from copy import deepcopy
+import mock
+from os import path as os_path
+from string import Template
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import config
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as ctx
+from neutron_lib import constants as n_const
+
+from networking_odl.ml2 import pseudo_agentdb_binding
+from networking_odl.tests import base
+
+AGENTDB_BINARY = 'neutron-odlagent-portbinding'
+L2_TYPE = "ODL L2"
+
+
+class TestPseudoAgentDBBindingController(base.DietTestCase):
+ """Test class for AgentDBPortBinding."""
+
+ # test data hostconfig and hostconfig-dbget
+ sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [
+ {"host-id": "devstack",
+ "host-type": "ODL L2",
+ "config": """{"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": "ovs",
+ "vif_details": {}}],
+ "allowed_network_types": [
+ "local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}"""}
+ ]}}
+
+ # Test data for string interpolation of substitutable identifers
+ # e.g. $PORT_ID identifier in the configurations JSON string below shall
+ # be substituted with portcontext.current['id'] eliminating the check
+ # for specific vif_type making port-binding truly switch agnostic.
+ # Refer: Python string templates and interpolation (string.Template)
+ sample_hconf_str_tmpl_subs_vpp = {
+ "host": "devstack", # host-id in ODL JSON
+ "agent_type": "ODL L2", # host-type in ODL JSON
+ # config in ODL JSON
+ "configurations": """{"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": "vhostuser",
+ "vif_details": {
+ "uuid": "TEST_UUID",
+ "has_datapath_type_netdev": true,
+ "support_vhost_user": true,
+ "port_prefix": "socket_",
+ "vhostuser_socket_dir": "/tmp",
+ "vhostuser_ovs_plug": true,
+ "vhostuser_mode": "server",
+ "vhostuser_socket":
+ "/tmp/socket_$PORT_ID"
+ }}],
+ "allowed_network_types": [
+ "local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}"""
+ }
+
+ sample_hconf_str_tmpl_subs_ovs = {
+ "host": "devstack", # host-id in ODL JSON
+ "agent_type": "ODL L2", # host-type in ODL JSON
+ # config in ODL JSON
+ "configurations": """{"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": "vhostuser",
+ "vif_details": {
+ "uuid": "TEST_UUID",
+ "has_datapath_type_netdev": true,
+ "support_vhost_user": true,
+ "port_prefix": "vhu_",
+ "vhostuser_socket_dir": "/var/run/openvswitch",
+ "vhostuser_ovs_plug": true,
+ "vhostuser_mode": "client",
+ "vhostuser_socket":
+ "/var/run/openvswitch/vhu_$PORT_ID"
+ }}],
+ "allowed_network_types": [
+ "local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}"""
+ }
+
+ sample_hconf_str_tmpl_nosubs = {
+ "host": "devstack", # host-id in ODL JSON
+ "agent_type": "ODL L2", # host-type in ODL JSON
+ # config in ODL JSON
+ "configurations": """{"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": "ovs",
+ "vif_details": {
+ "uuid": "TEST_UUID",
+ "has_datapath_type_netdev": true,
+ "support_vhost_user": true,
+ "port_prefix": "socket_",
+ "vhostuser_socket_dir": "/tmp",
+ "vhostuser_ovs_plug": true,
+ "vhostuser_mode": "server",
+ "vhostuser_socket":
+ "/var/run/openvswitch/PORT_NOSUBS"
+ }}],
+ "allowed_network_types": [
+ "local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}"""
+ }
+
+ # Test data for vanilla OVS
+ sample_hconfig_dbget_ovs = {"configurations": {"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS,
+ "vif_details": {
+ "some_test_details": None
+ }}],
+ "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}}
+
+ # Test data for OVS-DPDK
+ sample_hconfig_dbget_ovs_dpdk = {"configurations": {
+ "supported_vnic_types": [{
+ "vnic_type": "normal",
+ "vif_type": portbindings.VIF_TYPE_VHOST_USER,
+ "vif_details": {
+ "uuid": "TEST_UUID",
+ "has_datapath_type_netdev": True,
+ "support_vhost_user": True,
+ "port_prefix": "vhu_",
+ # Assumption: /var/run mounted as tmpfs
+ "vhostuser_socket_dir": "/var/run/openvswitch",
+ "vhostuser_ovs_plug": True,
+ "vhostuser_mode": "client",
+ "vhostuser_socket": "/var/run/openvswitch/vhu_$PORT_ID"}}],
+ "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}}
+
+ # Test data for VPP
+ sample_hconfig_dbget_vpp = {"configurations": {"supported_vnic_types": [
+ {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER,
+ "vif_details": {
+ "uuid": "TEST_UUID",
+ "has_datapath_type_netdev": True,
+ "support_vhost_user": True,
+ "port_prefix": "socket_",
+ "vhostuser_socket_dir": "/tmp",
+ "vhostuser_ovs_plug": True,
+ "vhostuser_mode": "server",
+ "vhostuser_socket": "/tmp/socket_$PORT_ID"
+ }}],
+ "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+ "bridge_mappings": {"physnet1": "br-ex"}}}
+
+ # test data valid and invalid segments
+ test_valid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_LOCAL,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ test_invalid_segment = {
+ api.ID: 'API_ID',
+ api.NETWORK_TYPE: constants.TYPE_NONE,
+ api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+ api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+ def setUp(self):
+ """Setup test."""
+ super(TestPseudoAgentDBBindingController, self).setUp()
+
+ config.cfg.CONF.set_override('url',
+ 'http://localhost:8080'
+ '/controller/nb/v2/neutron', 'ml2_odl')
+
+ fake_agents_db = mock.MagicMock()
+ fake_agents_db.create_or_update_agent = mock.MagicMock()
+
+ self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController(
+ db_plugin=fake_agents_db)
+
+ def test_make_hostconf_uri(self):
+ """test make uri."""
+ test_path = '/restconf/neutron:neutron/hostconfigs'
+ expected = "http://localhost:8080/restconf/neutron:neutron/hostconfigs"
+ test_uri = self.mgr._make_hostconf_uri(path=test_path)
+
+ self.assertEqual(expected, test_uri)
+
+ def test_update_agents_db(self):
+ """test agent update."""
+ self.mgr._update_agents_db(
+ hostconfigs=self.sample_odl_hconfigs['hostconfigs']['hostconfig'])
+ self.mgr.agents_db.create_or_update_agent.assert_called_once()
+
+ def test_is_valid_segment(self):
+ """Validate the _check_segment method."""
+ all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+ constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN, constants.TYPE_NONE]
+
+ valid_types = {
+ network_type
+ for network_type in all_network_types
+ if self.mgr._is_valid_segment({api.NETWORK_TYPE: network_type}, {
+ 'allowed_network_types': [
+ constants.TYPE_LOCAL, constants.TYPE_GRE,
+ constants.TYPE_VXLAN, constants.TYPE_VLAN]})}
+
+ self.assertEqual({
+ constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+ constants.TYPE_VLAN}, valid_types)
+
+ def test_bind_port_with_vif_type_ovs(self):
+ """test bind_port with vanilla ovs."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+ vif_type = portbindings.VIF_TYPE_OVS
+ vif_details = {'some_test_details': None}
+
+ self.mgr._hconfig_bind_port(
+ port_context, self.sample_hconfig_dbget_ovs)
+
+ port_context.set_binding.assert_called_once_with(
+ self.test_valid_segment[api.ID], vif_type,
+ vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+ def _set_pass_vif_details(self, port_context, vif_details):
+ """extract vif_details and update vif_details if needed."""
+ vhostuser_socket_dir = vif_details.get(
+ 'vhostuser_socket_dir', '/var/run/openvswitch')
+ port_spec = vif_details.get(
+ 'port_prefix', 'vhu_') + port_context.current['id']
+ socket_path = os_path.join(vhostuser_socket_dir, port_spec)
+ vif_details.update({portbindings.VHOST_USER_SOCKET: socket_path})
+
+ return vif_details
+
+ def test_bind_port_with_vif_type_vhost_user(self):
+ """test bind_port with ovs-dpdk."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment, self.test_valid_segment],
+ host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_ovs)])
+
+ self.mgr.bind_port(port_context)
+
+ pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
+ pass_vif_details = self.sample_hconfig_dbget_ovs_dpdk[
+ 'configurations']['supported_vnic_types'][0]['vif_details']
+ self._set_pass_vif_details(port_context, pass_vif_details)
+
+ port_context.set_binding.assert_called_once_with(
+ self.test_valid_segment[api.ID], pass_vif_type,
+ pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_with_vif_type_vhost_user_vpp(self):
+ """test bind_port with vpp."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment, self.test_valid_segment],
+ host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_vpp)])
+
+ self.mgr.bind_port(port_context)
+
+ pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
+ pass_vif_details = self.sample_hconfig_dbget_vpp['configurations'][
+ 'supported_vnic_types'][0]['vif_details']
+ self._set_pass_vif_details(port_context, pass_vif_details)
+
+ port_context.set_binding.assert_called_once_with(
+ self.test_valid_segment[api.ID], pass_vif_type,
+ pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+ def test_bind_port_without_valid_segment(self):
+ """test bind_port without a valid segment."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment])
+
+ self.mgr._hconfig_bind_port(
+ port_context, self.sample_hconfig_dbget_ovs)
+
+ port_context.set_binding.assert_not_called()
+
+ def test_no_str_template_substitution_in_configuration_string(self):
+ """Test for no identifier substituion in config JSON string."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+ hconf_dict = self.mgr._substitute_hconfig_tmpl(
+ port_context, self.sample_hconf_str_tmpl_nosubs)
+
+ test_string = hconf_dict['configurations'][
+ 'supported_vnic_types'][0][
+ 'vif_details'][portbindings.VHOST_USER_SOCKET]
+
+ expected_str = '/var/run/openvswitch/PORT_NOSUBS'
+
+ self.assertEqual(expected_str, test_string)
+
+ def test_str_template_substitution_in_configuration_string(self):
+ """Test for identifier substitution in config JSON string."""
+ port_context = self._fake_port_context(
+ fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+ hconf_dict = self.mgr._substitute_hconfig_tmpl(
+ port_context, self.sample_hconf_str_tmpl_subs_vpp)
+
+ test_string = hconf_dict['configurations'][
+ 'supported_vnic_types'][0][
+ 'vif_details'][portbindings.VHOST_USER_SOCKET]
+
+ expected_str = Template('/tmp/socket_$PORT_ID')
+ expected_str = expected_str.safe_substitute({
+ 'PORT_ID': port_context.current['id']})
+
+ self.assertEqual(expected_str, test_string)
+
+ def _fake_port_context(self, fake_segments, host_agents=None):
+ network = mock.MagicMock(spec=api.NetworkContext)
+ return mock.MagicMock(
+ spec=ctx.PortContext,
+ current={'id': 'CONTEXT_ID',
+ portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL},
+ segments_to_bind=fake_segments, network=network,
+ host_agents=lambda agent_type: host_agents)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json b/networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json
new file mode 100644
index 0000000..5d6b994
--- /dev/null
+++ b/networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json
@@ -0,0 +1,182 @@
+{
+ "network-topology": {
+ "topology": [
+ {
+ "topology-id": "flow:1"
+ },
+ {
+ "node": [
+ {
+ "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int",
+ "ovsdb:bridge-external-ids": [
+ {
+ "bridge-external-id-key": "opendaylight-iid",
+ "bridge-external-id-value": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
+ }
+ ],
+ "ovsdb:bridge-name": "br-int",
+ "ovsdb:bridge-uuid": "e92ec02d-dba8-46d8-8047-680cab5ee8b0",
+ "ovsdb:controller-entry": [
+ {
+ "controller-uuid": "8521e6df-54bd-48ac-a249-3bb810fd812c",
+ "is-connected": false,
+ "target": "tcp:192.168.66.1:6653"
+ }
+ ],
+ "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+ "ovsdb:fail-mode": "ovsdb:ovsdb-fail-mode-secure",
+ "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+ "ovsdb:protocol-entry": [
+ {
+ "protocol": "ovsdb:ovsdb-bridge-protocol-openflow-13"
+ }
+ ],
+ "termination-point": [
+ {
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "d21472db-5c3c-4b38-bf18-6ed3a32edff1",
+ "ovsdb:name": "br-int",
+ "ovsdb:port-uuid": "30adf59e-ff0d-478f-b37a-e37ea20dddd3",
+ "tp-id": "br-int"
+ }
+ ]
+ },
+ {
+ "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1",
+ "ovsdb:bridge-name": "br-nian1_1",
+ "ovsdb:bridge-uuid": "243e01cb-e413-4615-a044-b254141e407d",
+ "ovsdb:datapath-id": "00:00:ca:01:3e:24:15:46",
+ "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+ "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+ "termination-point": [
+ {
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "45184fd2-31eb-4c87-a071-2d64a0893662",
+ "ovsdb:name": "br-nian1_1",
+ "ovsdb:ofport": 65534,
+ "ovsdb:port-uuid": "f5952c1b-6b6d-4fd2-b2cd-201b8c9e0779",
+ "tp-id": "br-nian1_1"
+ }
+ ]
+ },
+ {
+ "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex",
+ "ovsdb:bridge-external-ids": [
+ {
+ "bridge-external-id-key": "bridge-id",
+ "bridge-external-id-value": "br-ex"
+ }
+ ],
+ "ovsdb:bridge-name": "br-ex",
+ "ovsdb:bridge-other-configs": [
+ {
+ "bridge-other-config-key": "disable-in-band",
+ "bridge-other-config-value": "true"
+ }
+ ],
+ "ovsdb:bridge-uuid": "43f7768e-c2f9-4ae7-8099-8aee5a17add7",
+ "ovsdb:datapath-id": "00:00:8e:76:f7:43:e7:4a",
+ "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+ "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+ "termination-point": [
+ {
+ "ovsdb:interface-type": "ovsdb:interface-type-internal",
+ "ovsdb:interface-uuid": "bdec1830-e6a5-4476-adff-569c455adb33",
+ "ovsdb:name": "br-ex",
+ "ovsdb:ofport": 65534,
+ "ovsdb:port-uuid": "7ba5939b-ff13-409d-86de-67556021ddff",
+ "tp-id": "br-ex"
+ }
+ ]
+ },
+ {
+ "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c",
+ "ovsdb:connection-info": {
+ "local-ip": "192.168.66.1",
+ "local-port": 6640,
+ "remote-ip": "192.168.66.1",
+ "remote-port": 41817
+ },
+ "ovsdb:datapath-type-entry": [
+ {
+ "datapath-type": "ovsdb:datapath-type-netdev"
+ },
+ {
+ "datapath-type": "ovsdb:datapath-type-system"
+ }
+ ],
+ "ovsdb:interface-type-entry": [
+ {
+ "interface-type": "ovsdb:interface-type-ipsec-gre"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-gre"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-gre64"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-dpdkr"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-vxlan"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-dpdkvhostuser"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-tap"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-geneve"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-dpdk"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-internal"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-system"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-lisp"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-patch"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-ipsec-gre64"
+ },
+ {
+ "interface-type": "ovsdb:interface-type-stt"
+ }
+ ],
+ "ovsdb:managed-node-entry": [
+ {
+ "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex']"
+ },
+ {
+ "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
+ },
+ {
+ "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1']"
+ }
+ ],
+ "ovsdb:openvswitch-other-configs": [
+ {
+ "other-config-key": "local_ip",
+ "other-config-value": "192.168.66.1"
+ },
+ {
+ "other-config-key": "pmd-cpu-mask",
+ "other-config-value": "400004"
+ }
+ ]
+ }
+ ],
+ "topology-id": "ovsdb:1"
+ }
+ ]
+ }
+}