diff options
Diffstat (limited to 'networking-odl/networking_odl/common')
-rw-r--r-- | networking-odl/networking_odl/common/__init__.py | 0 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/cache.py | 197 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/callback.py | 73 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/client.py | 94 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/config.py | 67 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/constants.py | 55 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/exceptions.py | 20 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/filters.py | 96 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/lightweight_testing.py | 177 | ||||
-rw-r--r-- | networking-odl/networking_odl/common/utils.py | 60 |
10 files changed, 839 insertions, 0 deletions
diff --git a/networking-odl/networking_odl/common/__init__.py b/networking-odl/networking_odl/common/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/networking-odl/networking_odl/common/__init__.py diff --git a/networking-odl/networking_odl/common/cache.py b/networking-odl/networking_odl/common/cache.py new file mode 100644 index 0000000..6c44cc3 --- /dev/null +++ b/networking-odl/networking_odl/common/cache.py @@ -0,0 +1,197 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import six +import sys +import time + +from oslo_log import log + +from networking_odl._i18n import _LW + + +LOG = log.getLogger(__name__) + + +class CacheEntry(collections.namedtuple('CacheEntry', ['timeout', 'values'])): + + error = None + + @classmethod + def create(cls, timeout, *values): + return CacheEntry(timeout, list(values)) + + def add_value(self, value): + self.values.append(value) + + def is_expired(self, current_clock): + return self.timeout <= current_clock + + def __hash__(self): + return id(self) + + def __eq__(self, other): + return self is other + + def __ne__(self, other): + return not self.__eq__(other) + + +class Cache(object): + '''Generic mapping class used to cache mapping + + Example of uses: + - host name to IP addresses mapping + - IP addresses to ODL networking topology elements mapping + ''' + + # TODO(Federico Ressi) after Mitaka: this class should store cached data + # in a place shared between more hosts using a caching mechanism coherent + # with other OpenStack libraries. This is specially interesting in the + # context of reliability when there are more Neutron instances and direct + # connection to ODL is broken. + + create_new_entry = CacheEntry.create + + def __init__(self, fetch_all_func): + if not callable(fetch_all_func): + message = 'Expected callable as parameter, got {!r}.'.format( + fetch_all_func) + raise TypeError(message) + self._fetch_all = fetch_all_func + self.clear() + + def clear(self): + self._entries = collections.OrderedDict() + + def fetch(self, key, timeout): + __, value = self.fetch_any([key], timeout=timeout) + return value + + def fetch_any(self, keys, timeout): + return next(self.fetch_all(keys=keys, timeout=timeout)) + + def fetch_all(self, keys, timeout): + # this mean now in numbers + current_clock = time.clock() + # this is the moment in the future in which new entries will expires + new_entries_timeout = current_clock + timeout + # entries to be fetched because missing or expired + new_entries = collections.OrderedDict() + # all entries missing or expired + missing = collections.OrderedDict() + # captured error for the case a problem has to be reported + cause_exc_info = None + + for key in keys: + entry = self._entries.get(key) + if entry is None or entry.is_expired(current_clock) or entry.error: + # this entry has to be fetched + new_entries[key] = missing[key] =\ + self.create_new_entry(new_entries_timeout) + elif entry.values: + # Yield existing entry + for value in entry.values: + yield key, value + else: + # This entry is not expired and there were no error where it + # has been fetch. Therefore we accept that there are no values + # for given key until it expires. This is going to produce a + # KeyError if it is still missing at the end of this function. + missing[key] = entry + + if missing: + if new_entries: + # Fetch some entries and update the cache + try: + new_entry_keys = tuple(new_entries) + for key, value in self._fetch_all(new_entry_keys): + entry = new_entries.get(key) + if entry: + # Add fresh new value + entry.add_value(value) + else: + # This key was not asked, but we take it in any + # way. "Noli equi dentes inspicere donati." + new_entries[key] = entry = self.create_new_entry( + new_entries_timeout, value) + + # pylint: disable=broad-except + except Exception: + # Something has gone wrong: update and yield what got until + # now before raising any error + cause_exc_info = sys.exc_info() + LOG.warning( + _LW('Error fetching values for keys: %r'), + ', '.join(repr(k) for k in new_entry_keys), + exc_info=cause_exc_info) + + # update the cache with new fresh entries + self._entries.update(new_entries) + + missing_keys = [] + for key, entry in six.iteritems(missing): + if entry.values: + # yield entries that was missing before + for value in entry.values: + # Yield just fetched entry + yield key, value + else: + if cause_exc_info: + # mark this entry as failed + entry.error = cause_exc_info + # after all this entry is still without any value + missing_keys.append(key) + + if missing_keys: + # After all some entry is still missing, probably because the + # key was invalid. It's time to raise an error. + missing_keys = tuple(missing_keys) + if not cause_exc_info: + # Search for the error cause in missing entries + for key in missing_keys: + error = self._entries[key].error + if error: + # A cached entry for which fetch method produced an + # error will produce the same error if fetch method + # fails to fetch it again without giving any error + # Is this what we want? + break + + else: + # If the cause of the problem is not knwow then + # probably keys were wrong + message = 'Invalid keys: {!r}'.format( + ', '.join(missing_keys)) + error = KeyError(message) + + try: + raise error + except KeyError: + cause_exc_info = sys.exc_info() + + raise CacheFetchError( + missing_keys=missing_keys, cause_exc_info=cause_exc_info) + + +class CacheFetchError(KeyError): + + def __init__(self, missing_keys, cause_exc_info): + super(CacheFetchError, self).__init__(str(cause_exc_info[1])) + self.cause_exc_info = cause_exc_info + self.missing_keys = missing_keys + + def reraise_cause(self): + six.reraise(*self.cause_exc_info) diff --git a/networking-odl/networking_odl/common/callback.py b/networking-odl/networking_odl/common/callback.py new file mode 100644 index 0000000..d9d168b --- /dev/null +++ b/networking-odl/networking_odl/common/callback.py @@ -0,0 +1,73 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from oslo_log import log as logging + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources + +from networking_odl.common import constants as odl_const + +LOG = logging.getLogger(__name__) + +ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural')) +_RESOURCE_MAPPING = { + resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS), + resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE, + odl_const.ODL_SG_RULES), +} +_OPERATION_MAPPING = { + events.AFTER_CREATE: odl_const.ODL_CREATE, + events.AFTER_UPDATE: odl_const.ODL_UPDATE, + events.AFTER_DELETE: odl_const.ODL_DELETE, +} + + +class OdlSecurityGroupsHandler(object): + + def __init__(self, odl_driver): + self.odl_driver = odl_driver + self._subscribe() + + def _subscribe(self): + for event in (events.AFTER_CREATE, events.AFTER_DELETE): + registry.subscribe(self.sg_callback, resources.SECURITY_GROUP, + event) + registry.subscribe(self.sg_callback, resources.SECURITY_GROUP_RULE, + event) + + registry.subscribe(self.sg_callback, resources.SECURITY_GROUP, + events.AFTER_UPDATE) + + def sg_callback(self, resource, event, trigger, **kwargs): + res = kwargs.get(resource) + res_id = kwargs.get("%s_id" % resource) + odl_res_type = _RESOURCE_MAPPING[resource] + + odl_ops = _OPERATION_MAPPING[event] + odl_res_dict = None if res is None else {odl_res_type.singular: res} + + LOG.debug("Calling sync_from_callback with ODL_OPS (%(odl_ops)s) " + "ODL_RES_TYPE (%(odl_res_type)s) RES_ID (%(res_id)s) " + "ODL_RES_DICT (%(odl_res_dict)s) KWARGS (%(kwargs)s)", + {'odl_ops': odl_ops, 'odl_res_type': odl_res_type, + 'res_id': res_id, 'odl_res_dict': odl_res_dict, + 'kwargs': kwargs}) + + self.odl_driver.sync_from_callback(odl_ops, odl_res_type, + res_id, odl_res_dict) diff --git a/networking-odl/networking_odl/common/client.py b/networking-odl/networking_odl/common/client.py new file mode 100644 index 0000000..45349e9 --- /dev/null +++ b/networking-odl/networking_odl/common/client.py @@ -0,0 +1,94 @@ +# Copyright (c) 2014 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log +from oslo_serialization import jsonutils +from oslo_utils import excutils +import requests + + +LOG = log.getLogger(__name__) +cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') + + +class OpenDaylightRestClient(object): + + @classmethod + def create_client(cls, url=None): + if cfg.CONF.ml2_odl.enable_lightweight_testing: + LOG.debug("ODL lightweight testing is enabled, " + "returning a OpenDaylightLwtClient instance") + + """Have to import at here, otherwise we create a dependency loop""" + from networking_odl.common import lightweight_testing as lwt + cls = lwt.OpenDaylightLwtClient + + return cls( + url or cfg.CONF.ml2_odl.url, + cfg.CONF.ml2_odl.username, + cfg.CONF.ml2_odl.password, + cfg.CONF.ml2_odl.timeout) + + def __init__(self, url, username, password, timeout): + self.url = url + self.timeout = timeout + self.auth = (username, password) + + def get(self, urlpath='', data=None): + return self.request('get', urlpath, data) + + def put(self, urlpath='', data=None): + return self.request('put', urlpath, data) + + def delete(self, urlpath='', data=None): + return self.request('delete', urlpath, data) + + def request(self, method, urlpath='', data=None): + headers = {'Content-Type': 'application/json'} + url = '/'.join([self.url, urlpath]) + LOG.debug( + "Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)", + {'method': method, 'url': url, 'data': data}) + return requests.request( + method, url=url, headers=headers, data=data, auth=self.auth, + timeout=self.timeout) + + def sendjson(self, method, urlpath, obj): + """Send json to the OpenDaylight controller.""" + data = jsonutils.dumps(obj, indent=2) if obj else None + return self._check_rensponse(self.request(method, urlpath, data)) + + def try_delete(self, urlpath): + rensponse = self.delete(urlpath) + if rensponse.status_code == requests.codes.not_found: + # The resource is already removed. ignore 404 gracefully + LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath}) + return False + else: + self._check_rensponse(rensponse) + return True + + def _check_rensponse(self, rensponse): + try: + rensponse.raise_for_status() + except requests.HTTPError as error: + with excutils.save_and_reraise_exception(): + LOG.debug("Exception from ODL: %(e)s %(text)s", + {'e': error, 'text': rensponse.text}, exc_info=1) + else: + LOG.debug("Got response:\n" + "(%(response)s)", {'response': rensponse.text}) + return rensponse diff --git a/networking-odl/networking_odl/common/config.py b/networking-odl/networking_odl/common/config.py new file mode 100644 index 0000000..c921242 --- /dev/null +++ b/networking-odl/networking_odl/common/config.py @@ -0,0 +1,67 @@ +# Copyright (c) 2014 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from networking_odl._i18n import _ + + +odl_opts = [ + cfg.StrOpt('url', + help=_("HTTP URL of OpenDaylight REST interface.")), + cfg.StrOpt('username', + help=_("HTTP username for authentication.")), + cfg.StrOpt('password', secret=True, + help=_("HTTP password for authentication.")), + cfg.IntOpt('timeout', default=10, + help=_("HTTP timeout in seconds.")), + cfg.IntOpt('session_timeout', default=30, + help=_("Tomcat session timeout in minutes.")), + cfg.IntOpt('sync_timeout', default=10, + help=_("(V2 driver) Sync thread timeout in seconds.")), + cfg.IntOpt('retry_count', default=5, + help=_("(V2 driver) Number of times to retry a row " + "before failing.")), + cfg.IntOpt('maintenance_interval', default=300, + help=_("(V2 driver) Journal maintenance operations interval " + "in seconds.")), + cfg.IntOpt('completed_rows_retention', default=600, + help=_("(V2 driver) Time to keep completed rows in seconds." + "Completed rows retention will be checked every " + "maintenance_interval by the cleanup thread." + "To disable completed rows deletion " + "value should be -1")), + cfg.BoolOpt('enable_lightweight_testing', + default=False, + help=_('Test without real ODL.')), + cfg.StrOpt('port_binding_controller', + default='network-topology', + help=_('Name of the controller to be used for port binding.')), + cfg.IntOpt('processing_timeout', default='100', + help=_("(V2 driver) Time in seconds to wait before a " + "processing row is marked back to pending.")), + cfg.StrOpt('odl_hostconf_uri', + help=_("Path for ODL host configuration REST interface"), + default="/restconf/operational/neutron:neutron/hostconfigs"), + cfg.IntOpt('restconf_poll_interval', default=30, + help=_("Poll interval in seconds for getting ODL hostconfig")), + +] + +cfg.CONF.register_opts(odl_opts, "ml2_odl") + + +def list_opts(): + return [('ml2_odl', odl_opts)] diff --git a/networking-odl/networking_odl/common/constants.py b/networking-odl/networking_odl/common/constants.py new file mode 100644 index 0000000..50c0117 --- /dev/null +++ b/networking-odl/networking_odl/common/constants.py @@ -0,0 +1,55 @@ +# Copyright (c) 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ODL_NETWORK = 'network' +ODL_NETWORKS = 'networks' +ODL_SUBNET = 'subnet' +ODL_SUBNETS = 'subnets' +ODL_PORT = 'port' +ODL_PORTS = 'ports' +ODL_SG = 'security_group' +ODL_SGS = 'security_groups' +ODL_SG_RULE = 'security_group_rule' +ODL_SG_RULES = 'security_group_rules' +ODL_ROUTER = 'router' +ODL_ROUTERS = 'routers' +ODL_ROUTER_INTF = 'router_interface' +ODL_FLOATINGIP = 'floatingip' +ODL_FLOATINGIPS = 'floatingips' + +ODL_LOADBALANCER = 'loadbalancer' +ODL_LOADBALANCERS = 'loadbalancers' +ODL_LISTENER = 'listener' +ODL_LISTENERS = 'listeners' +ODL_POOL = 'pool' +ODL_POOLS = 'pools' +ODL_MEMBER = 'member' +ODL_MEMBERS = 'members' +ODL_HEALTHMONITOR = 'healthmonitor' +ODL_HEALTHMONITORS = 'healthmonitors' + +ODL_CREATE = 'create' +ODL_UPDATE = 'update' +ODL_DELETE = 'delete' +ODL_ADD = 'add' +ODL_REMOVE = 'remove' + +ODL_UUID_NOT_USED = '0' + +# Constants for journal operation states +PENDING = 'pending' +PROCESSING = 'processing' +FAILED = 'failed' +COMPLETED = 'completed' diff --git a/networking-odl/networking_odl/common/exceptions.py b/networking-odl/networking_odl/common/exceptions.py new file mode 100644 index 0000000..f174c10 --- /dev/null +++ b/networking-odl/networking_odl/common/exceptions.py @@ -0,0 +1,20 @@ +# Copyright (c) 2014 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as exc + + +class OpendaylightAuthError(exc.NeutronException): + message = '%(msg)s' diff --git a/networking-odl/networking_odl/common/filters.py b/networking-odl/networking_odl/common/filters.py new file mode 100644 index 0000000..fb42a0e --- /dev/null +++ b/networking-odl/networking_odl/common/filters.py @@ -0,0 +1,96 @@ +# Copyright (c) 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from networking_odl.common import constants as odl_const +from networking_odl.common import utils as odl_utils + + +def _filter_unmapped_null(resource_dict, unmapped_keys): + # NOTE(yamahata): bug work around + # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475 + # Null-value for an unmapped element causes next mapped + # collection to contain a null value + # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] } + # + # Java Object: + # class Root { + # Collection<String> mappedCollection = new ArrayList<String>; + # } + # + # Result: + # Field B contains one element; null + # + # TODO(yamahata): update along side with neutron and ODL + # add when neutron adds more extensions + # delete when ODL neutron northbound supports it + # TODO(yamahata): do same thing for other resources + keys_to_del = [key for key in unmapped_keys + if resource_dict.get(key) is None] + if keys_to_del: + odl_utils.try_del(resource_dict, keys_to_del) + + +_NETWORK_UNMAPPED_KEYS = ['qos_policy_id'] +_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name', + 'port_security_enabled', 'qos_policy_id'] + + +def _filter_network_create(network): + odl_utils.try_del(network, ['status', 'subnets']) + _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) + + +def _filter_network_update(network): + odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id']) + _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) + + +def _filter_subnet_update(subnet): + odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', + 'allocation_pools', 'tenant_id']) + + +def _filter_port_create(port): + """Filter out port attributes not required for a create.""" + odl_utils.try_del(port, ['status']) + _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS) + + +def _filter_port_update(port): + """Filter out port attributes for an update operation.""" + odl_utils.try_del(port, ['network_id', 'id', 'status', 'mac_address', + 'tenant_id', 'fixed_ips']) + _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS) + + +def _filter_router_update(router): + """Filter out attributes for an update operation.""" + odl_utils.try_del(router, ['id', 'tenant_id', 'status']) + + +_FILTER_MAP = { + (odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create, + (odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update, + (odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update, + (odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create, + (odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update, + (odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update, +} + + +def filter_for_odl(object_type, operation, data): + """Filter out the attributed before sending the data to ODL""" + filter_key = (object_type, operation) + if filter_key in _FILTER_MAP: + _FILTER_MAP[filter_key](data) diff --git a/networking-odl/networking_odl/common/lightweight_testing.py b/networking-odl/networking_odl/common/lightweight_testing.py new file mode 100644 index 0000000..3d0cf2e --- /dev/null +++ b/networking-odl/networking_odl/common/lightweight_testing.py @@ -0,0 +1,177 @@ +# Copyright (c) 2015 Intel Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +import requests +import six + +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from networking_odl._i18n import _ +from networking_odl.common import client +from networking_odl.common import constants as odl_const + + +LOG = logging.getLogger(__name__) + +OK = requests.codes.ok +NO_CONTENT = requests.codes.no_content +NOT_ALLOWED = requests.codes.not_allowed +NOT_FOUND = requests.codes.not_found +BAD_REQUEST = requests.codes.bad_request + + +class OpenDaylightLwtClient(client.OpenDaylightRestClient): + """Lightweight testing client""" + + lwt_dict = {odl_const.ODL_NETWORKS: {}, + odl_const.ODL_SUBNETS: {}, + odl_const.ODL_PORTS: {}, + odl_const.ODL_SGS: {}, + odl_const.ODL_SG_RULES: {}, + odl_const.ODL_LOADBALANCERS: {}, + odl_const.ODL_LISTENERS: {}, + odl_const.ODL_POOLS: {}, + odl_const.ODL_MEMBERS: {}, + odl_const.ODL_HEALTHMONITORS: {}} + + @classmethod + def _make_response(cls, status_code=OK, content=None): + """Only supports 'content-type': 'application/json'""" + response = requests.models.Response() + response.status_code = status_code + if content: + response.raw = six.BytesIO( + jsonutils.dumps(content).encode('utf-8')) + + return response + + @classmethod + def _get_resource_id(cls, urlpath): + # resouce ID is the last element of urlpath + return str(urlpath).rsplit('/', 1)[-1] + + @classmethod + def post(cls, resource_type, resource_dict, urlpath, resource_list): + """No ID in URL, elements in resource_list must have ID""" + + if resource_list is None: + raise ValueError(_("resource_list can not be None")) + + for resource in resource_list: + if resource['id'] in resource_dict: + LOG.debug("%s %s already exists", resource_type, + resource['id']) + response = cls._make_response(NOT_ALLOWED) + raise requests.exceptions.HTTPError(response=response) + + resource_dict[resource['id']] = deepcopy(resource) + + return cls._make_response(NO_CONTENT) + + @classmethod + def put(cls, resource_type, resource_dict, urlpath, resource_list): + + resource_id = cls._get_resource_id(urlpath) + + if resource_list is None: + raise ValueError(_("resource_list can not be None")) + + if resource_id and len(resource_list) != 1: + LOG.debug("Updating %s with multiple resources", urlpath) + response = cls._make_response(BAD_REQUEST) + raise requests.exceptions.HTTPError(response=response) + + for resource in resource_list: + res_id = resource_id or resource['id'] + if res_id in resource_dict: + resource_dict[res_id].update(deepcopy(resource)) + else: + LOG.debug("%s %s does not exist", resource_type, res_id) + response = cls._make_response(NOT_FOUND) + raise requests.exceptions.HTTPError(response=response) + + return cls._make_response(NO_CONTENT) + + @classmethod + def delete(cls, resource_type, resource_dict, urlpath, resource_list): + + if resource_list is None: + resource_id = cls._get_resource_id(urlpath) + id_list = [resource_id] + else: + id_list = [res['id'] for res in resource_list] + + for res_id in id_list: + removed = resource_dict.pop(res_id, None) + if removed is None: + LOG.debug("%s %s does not exist", resource_type, res_id) + response = cls._make_response(NOT_FOUND) + raise requests.exceptions.HTTPError(response=response) + + return cls._make_response(NO_CONTENT) + + @classmethod + def get(cls, resource_type, resource_dict, urlpath, resource_list=None): + + resource_id = cls._get_resource_id(urlpath) + + if resource_id: + resource = resource_dict.get(resource_id) + if resource is None: + LOG.debug("%s %s does not exist", resource_type, resource_id) + response = cls._make_response(NOT_FOUND) + raise requests.exceptions.HTTPError(response=response) + else: + # When getting single resource, return value is a dict + r_list = {resource_type[:-1]: deepcopy(resource)} + return cls._make_response(OK, r_list) + + r_list = [{resource_type[:-1]: deepcopy(res)} + for res in six.itervalues(resource_dict)] + + return cls._make_response(OK, r_list) + + def sendjson(self, method, urlpath, obj=None): + """Lightweight testing without ODL""" + + if '/' not in urlpath: + urlpath += '/' + + resource_type = str(urlpath).split('/', 1)[0] + resource_type = resource_type.replace('-', '_') + + resource_dict = self.lwt_dict.get(resource_type) + + if resource_dict is None: + LOG.debug("Resource type %s is not supported", resource_type) + response = self._make_response(NOT_FOUND) + raise requests.exceptions.HTTPError(response=response) + + func = getattr(self, str(method).lower()) + + resource_list = None + if obj: + """If obj is not None, it can only have one entry""" + assert len(obj) == 1, "Obj can only have one entry" + + key, resource_list = list(obj.items())[0] + + if not isinstance(resource_list, list): + # Need to transform resource_list to a real list, i.e. [res] + resource_list = [resource_list] + + return func(resource_type, resource_dict, urlpath, resource_list) diff --git a/networking-odl/networking_odl/common/utils.py b/networking-odl/networking_odl/common/utils.py new file mode 100644 index 0000000..a01a14a --- /dev/null +++ b/networking-odl/networking_odl/common/utils.py @@ -0,0 +1,60 @@ +# Copyright (c) 2014 Red Hat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import socket + +from oslo_log import log + +from networking_odl.common import cache + +LOG = log.getLogger(__name__) + + +def try_del(d, keys): + """Ignore key errors when deleting from a dictionary.""" + for key in keys: + try: + del d[key] + except KeyError: + pass + + +def _fetch_all_addresses_by_hostnames(hostnames): + for name in hostnames: + # it uses an ordered dict to avoid duplicates and keep order + entries = collections.OrderedDict( + (info[4][0], None) for info in socket.getaddrinfo(name, None)) + for entry in entries: + yield name, entry + + +_addresses_by_name_cache = cache.Cache(_fetch_all_addresses_by_hostnames) + + +def get_addresses_by_name(name, time_to_live=60.0): + """Gets and caches addresses for given name. + + This is a cached wrapper for function 'socket.getaddrinfo'. + + :returns: a sequence of unique addresses binded to given hostname. + """ + + try: + results = _addresses_by_name_cache.fetch_all( + [name], timeout=time_to_live) + return tuple(address for name, address in results) + except cache.CacheFetchError as error: + error.reraise_cause() |