aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common/cache
diff options
context:
space:
mode:
authorRHE <rebirthmonkey@gmail.com>2017-11-24 13:54:26 +0100
committerRHE <rebirthmonkey@gmail.com>2017-11-24 13:54:26 +0100
commit920a49cfa055733d575282973e23558c33087a4a (patch)
treed371dab34efa5028600dad2e7ca58063626e7ba4 /keystone-moon/keystone/common/cache
parentef3eefca70d8abb4a00dafb9419ad32738e934b2 (diff)
remove keystone-moon
Change-Id: I80d7c9b669f19d5f6607e162de8e0e55c2f80fdd Signed-off-by: RHE <rebirthmonkey@gmail.com>
Diffstat (limited to 'keystone-moon/keystone/common/cache')
-rw-r--r--keystone-moon/keystone/common/cache/__init__.py15
-rw-r--r--keystone-moon/keystone/common/cache/_context_cache.py129
-rw-r--r--keystone-moon/keystone/common/cache/_memcache_pool.py244
-rw-r--r--keystone-moon/keystone/common/cache/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py28
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py25
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py56
-rw-r--r--keystone-moon/keystone/common/cache/core.py124
8 files changed, 0 insertions, 621 deletions
diff --git a/keystone-moon/keystone/common/cache/__init__.py b/keystone-moon/keystone/common/cache/__init__.py
deleted file mode 100644
index 49502399..00000000
--- a/keystone-moon/keystone/common/cache/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common.cache.core import * # noqa
diff --git a/keystone-moon/keystone/common/cache/_context_cache.py b/keystone-moon/keystone/common/cache/_context_cache.py
deleted file mode 100644
index 3895ca1f..00000000
--- a/keystone-moon/keystone/common/cache/_context_cache.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A dogpile.cache proxy that caches objects in the request local cache."""
-from dogpile.cache import api
-from dogpile.cache import proxy
-from oslo_context import context as oslo_context
-from oslo_serialization import msgpackutils
-
-from keystone.models import revoke_model
-
-
-class _RevokeModelHandler(object):
- # NOTE(morganfainberg): There needs to be reserved "registry" entries set
- # in oslo_serialization for application-specific handlers. We picked 127
- # here since it's waaaaaay far out before oslo_serialization will use it.
- identity = 127
- handles = (revoke_model.RevokeTree,)
-
- def __init__(self, registry):
- self._registry = registry
-
- def serialize(self, obj):
- return msgpackutils.dumps(obj.revoke_map,
- registry=self._registry)
-
- def deserialize(self, data):
- revoke_map = msgpackutils.loads(data, registry=self._registry)
- revoke_tree = revoke_model.RevokeTree()
- revoke_tree.revoke_map = revoke_map
- return revoke_tree
-
-
-# Register our new handler.
-_registry = msgpackutils.default_registry
-_registry.frozen = False
-_registry.register(_RevokeModelHandler(registry=_registry))
-_registry.frozen = True
-
-
-class _ResponseCacheProxy(proxy.ProxyBackend):
-
- __key_pfx = '_request_cache_%s'
-
- def _get_request_context(self):
- # Return the current context or a new/empty context.
- return oslo_context.get_current() or oslo_context.RequestContext()
-
- def _get_request_key(self, key):
- return self.__key_pfx % key
-
- def _set_local_cache(self, key, value, ctx=None):
- # Set a serialized version of the returned value in local cache for
- # subsequent calls to the memoized method.
- if not ctx:
- ctx = self._get_request_context()
- serialize = {'payload': value.payload, 'metadata': value.metadata}
- setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
- ctx.update_store()
-
- def _get_local_cache(self, key):
- # Return the version from our local request cache if it exists.
- ctx = self._get_request_context()
- try:
- value = getattr(ctx, self._get_request_key(key))
- except AttributeError:
- return api.NO_VALUE
-
- value = msgpackutils.loads(value)
- return api.CachedValue(payload=value['payload'],
- metadata=value['metadata'])
-
- def _delete_local_cache(self, key):
- # On invalidate/delete remove the value from the local request cache
- ctx = self._get_request_context()
- try:
- delattr(ctx, self._get_request_key(key))
- ctx.update_store()
- except AttributeError: # nosec
- # NOTE(morganfainberg): We will simply pass here, this value has
- # not been cached locally in the request.
- pass
-
- def get(self, key):
- value = self._get_local_cache(key)
- if value is api.NO_VALUE:
- value = self.proxied.get(key)
- if value is not api.NO_VALUE:
- self._set_local_cache(key, value)
- return value
-
- def set(self, key, value):
- self._set_local_cache(key, value)
- self.proxied.set(key, value)
-
- def delete(self, key):
- self._delete_local_cache(key)
- self.proxied.delete(key)
-
- def get_multi(self, keys):
- values = {}
- for key in keys:
- v = self._get_local_cache(key)
- if v is not api.NO_VALUE:
- values[key] = v
- query_keys = set(keys).difference(set(values.keys()))
- values.update(dict(
- zip(query_keys, self.proxied.get_multi(query_keys))))
- return [values[k] for k in keys]
-
- def set_multi(self, mapping):
- ctx = self._get_request_context()
- for k, v in mapping.items():
- self._set_local_cache(k, v, ctx)
- self.proxied.set_multi(mapping)
-
- def delete_multi(self, keys):
- for k in keys:
- self._delete_local_cache(k)
- self.proxied.delete_multi(keys)
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
deleted file mode 100644
index 2bfcc3bb..00000000
--- a/keystone-moon/keystone/common/cache/_memcache_pool.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2014 Mirantis Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Thread-safe connection pool for python-memcached."""
-
-# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
-# and should be kept in sync until we can use external library for this.
-
-import collections
-import contextlib
-import itertools
-import logging
-import threading
-import time
-
-import memcache
-from oslo_log import log
-from six.moves import queue, zip
-
-from keystone import exception
-from keystone.i18n import _
-
-
-LOG = log.getLogger(__name__)
-
-
-class _MemcacheClient(memcache.Client):
- """Thread global memcache client
-
- As client is inherited from threading.local we have to restore object
- methods overloaded by threading.local so we can reuse clients in
- different threads
- """
- __delattr__ = object.__delattr__
- __getattribute__ = object.__getattribute__
- __new__ = object.__new__
- __setattr__ = object.__setattr__
-
- def __del__(self):
- pass
-
-
-_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
-
-
-class ConnectionPool(queue.Queue):
- """Base connection pool class
-
- This class implements the basic connection pool logic as an abstract base
- class.
- """
- def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
- """Initialize the connection pool.
-
- :param maxsize: maximum number of client connections for the pool
- :type maxsize: int
- :param unused_timeout: idle time to live for unused clients (in
- seconds). If a client connection object has been
- in the pool and idle for longer than the
- unused_timeout, it will be reaped. This is to
- ensure resources are released as utilization
- goes down.
- :type unused_timeout: int
- :param conn_get_timeout: maximum time in seconds to wait for a
- connection. If set to `None` timeout is
- indefinite.
- :type conn_get_timeout: int
- """
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- queue.Queue.__init__(self, maxsize)
- self._unused_timeout = unused_timeout
- self._connection_get_timeout = conn_get_timeout
- self._acquired = 0
-
- def _create_connection(self):
- """Returns a connection instance.
-
- This is called when the pool needs another instance created.
-
- :returns: a new connection instance
-
- """
- raise NotImplementedError
-
- def _destroy_connection(self, conn):
- """Destroy and cleanup a connection instance.
-
- This is called when the pool wishes to get rid of an existing
- connection. This is the opportunity for a subclass to free up
- resources and cleaup after itself.
-
- :param conn: the connection object to destroy
-
- """
- raise NotImplementedError
-
- def _debug_logger(self, msg, *args, **kwargs):
- if LOG.isEnabledFor(logging.DEBUG):
- thread_id = threading.current_thread().ident
- args = (id(self), thread_id) + args
- prefix = 'Memcached pool %s, thread %s: '
- LOG.debug(prefix + msg, *args, **kwargs)
-
- @contextlib.contextmanager
- def acquire(self):
- self._debug_logger('Acquiring connection')
- try:
- conn = self.get(timeout=self._connection_get_timeout)
- except queue.Empty:
- raise exception.UnexpectedError(
- _('Unable to get a connection from pool id %(id)s after '
- '%(seconds)s seconds.') %
- {'id': id(self), 'seconds': self._connection_get_timeout})
- self._debug_logger('Acquired connection %s', id(conn))
- try:
- yield conn
- finally:
- self._debug_logger('Releasing connection %s', id(conn))
- self._drop_expired_connections()
- try:
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- queue.Queue.put(self, conn, block=False)
- except queue.Full:
- self._debug_logger('Reaping exceeding connection %s', id(conn))
- self._destroy_connection(conn)
-
- def _qsize(self):
- if self.maxsize:
- return self.maxsize - self._acquired
- else:
- # A value indicating there is always a free connection
- # if maxsize is None or 0
- return 1
-
- # NOTE(dstanek): stdlib and eventlet Queue implementations
- # have different names for the qsize method. This ensures
- # that we override both of them.
- if not hasattr(queue.Queue, '_qsize'):
- qsize = _qsize
-
- def _get(self):
- if self.queue:
- conn = self.queue.pop().connection
- else:
- conn = self._create_connection()
- self._acquired += 1
- return conn
-
- def _drop_expired_connections(self):
- """Drop all expired connections from the right end of the queue."""
- now = time.time()
- while self.queue and self.queue[0].ttl < now:
- conn = self.queue.popleft().connection
- self._debug_logger('Reaping connection %s', id(conn))
- self._destroy_connection(conn)
-
- def _put(self, conn):
- self.queue.append(_PoolItem(
- ttl=time.time() + self._unused_timeout,
- connection=conn,
- ))
- self._acquired -= 1
-
-
-class MemcacheClientPool(ConnectionPool):
- def __init__(self, urls, arguments, **kwargs):
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool.__init__(self, **kwargs)
- self.urls = urls
- self._arguments = arguments
- # NOTE(morganfainberg): The host objects expect an int for the
- # deaduntil value. Initialize this at 0 for each host with 0 indicating
- # the host is not dead.
- self._hosts_deaduntil = [0] * len(urls)
-
- def _create_connection(self):
- return _MemcacheClient(self.urls, **self._arguments)
-
- def _destroy_connection(self, conn):
- conn.disconnect_all()
-
- def _get(self):
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- conn = ConnectionPool._get(self)
- try:
- # Propagate host state known to us to this client's list
- now = time.time()
- for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
- if deaduntil > now and host.deaduntil <= now:
- host.mark_dead('propagating death mark from the pool')
- host.deaduntil = deaduntil
- except Exception:
- # We need to be sure that connection doesn't leak from the pool.
- # This code runs before we enter context manager's try-finally
- # block, so we need to explicitly release it here.
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool._put(self, conn)
- raise
- return conn
-
- def _put(self, conn):
- try:
- # If this client found that one of the hosts is dead, mark it as
- # such in our internal list
- now = time.time()
- for i, host in zip(itertools.count(), conn.servers):
- deaduntil = self._hosts_deaduntil[i]
- # Do nothing if we already know this host is dead
- if deaduntil <= now:
- if host.deaduntil > now:
- self._hosts_deaduntil[i] = host.deaduntil
- self._debug_logger(
- 'Marked host %s dead until %s',
- self.urls[i], host.deaduntil)
- else:
- self._hosts_deaduntil[i] = 0
- # If all hosts are dead we should forget that they're dead. This
- # way we won't get completely shut off until dead_retry seconds
- # pass, but will be checking servers as frequent as we can (over
- # way smaller socket_timeout)
- if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
- self._debug_logger('All hosts are dead. Marking them as live.')
- self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
- finally:
- # super() cannot be used here because Queue in stdlib is an
- # old-style class
- ConnectionPool._put(self, conn)
diff --git a/keystone-moon/keystone/common/cache/backends/__init__.py b/keystone-moon/keystone/common/cache/backends/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/keystone-moon/keystone/common/cache/backends/__init__.py
+++ /dev/null
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
deleted file mode 100644
index bbe4785a..00000000
--- a/keystone-moon/keystone/common/cache/backends/memcache_pool.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2014 Mirantis Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""This module is deprecated."""
-
-from oslo_cache.backends import memcache_pool
-from oslo_log import versionutils
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.cache.memcache_pool backend',
- in_favor_of='oslo_cache.memcache_pool backend',
- remove_in=+1)
-class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
- pass
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
deleted file mode 100644
index 861aefed..00000000
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_cache.backends import mongo
-from oslo_log import versionutils
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.cache.mongo backend',
- in_favor_of='oslo_cache.mongo backend',
- remove_in=+1)
-class MongoCacheBackend(mongo.MongoCacheBackend):
- pass
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
deleted file mode 100644
index eda06ec8..00000000
--- a/keystone-moon/keystone/common/cache/backends/noop.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from dogpile.cache import api
-from oslo_log import versionutils
-
-
-NO_VALUE = api.NO_VALUE
-
-
-@versionutils.deprecated(
- versionutils.deprecated.MITAKA,
- what='keystone.common.cache.noop backend',
- in_favor_of="dogpile.cache's Null backend",
- remove_in=+1)
-class NoopCacheBackend(api.CacheBackend):
- """A no op backend as a default caching backend.
-
- The no op backend is provided as the default caching backend for keystone
- to ensure that ``dogpile.cache.memory`` is not used in any real-world
- circumstances unintentionally. ``dogpile.cache.memory`` does not have a
- mechanism to cleanup it's internal dict and therefore could cause run-away
- memory utilization.
- """
-
- def __init__(self, *args):
- return
-
- def get(self, key):
- return NO_VALUE
-
- def get_multi(self, keys):
- return [NO_VALUE for x in keys]
-
- def set(self, key, value):
- return
-
- def set_multi(self, mapping):
- return
-
- def delete(self, key):
- return
-
- def delete_multi(self, keys):
- return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
deleted file mode 100644
index 6bb0af51..00000000
--- a/keystone-moon/keystone/common/cache/core.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Keystone Caching Layer Implementation."""
-import dogpile.cache
-from dogpile.cache import api
-from oslo_cache import core as cache
-from oslo_config import cfg
-
-from keystone.common.cache import _context_cache
-
-
-CONF = cfg.CONF
-CACHE_REGION = cache.create_region()
-
-
-def configure_cache(region=None):
- if region is None:
- region = CACHE_REGION
- # NOTE(morganfainberg): running cache.configure_cache_region()
- # sets region.is_configured, this must be captured before
- # cache.configure_cache_region is called.
- configured = region.is_configured
- cache.configure_cache_region(CONF, region)
- # Only wrap the region if it was not configured. This should be pushed
- # to oslo_cache lib somehow.
- if not configured:
- region.wrap(_context_cache._ResponseCacheProxy)
-
-
-def get_memoization_decorator(group, expiration_group=None, region=None):
- if region is None:
- region = CACHE_REGION
- return cache.get_memoization_decorator(CONF, region, group,
- expiration_group=expiration_group)
-
-
-# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
-# we no longer need to register the backends here.
-dogpile.cache.register_backend(
- 'keystone.common.cache.noop',
- 'keystone.common.cache.backends.noop',
- 'NoopCacheBackend')
-
-dogpile.cache.register_backend(
- 'keystone.cache.mongo',
- 'keystone.common.cache.backends.mongo',
- 'MongoCacheBackend')
-
-dogpile.cache.register_backend(
- 'keystone.cache.memcache_pool',
- 'keystone.common.cache.backends.memcache_pool',
- 'PooledMemcachedBackend')
-
-
-# TODO(morganfainberg): Move this logic up into oslo.cache directly
-# so we can handle region-wide invalidations or alternatively propose
-# a fix to dogpile.cache to make region-wide invalidates possible to
-# work across distributed processes.
-class _RegionInvalidator(object):
-
- def __init__(self, region, region_name):
- self.region = region
- self.region_name = region_name
- region_key = '_RegionExpiration.%(type)s.%(region_name)s'
- self.soft_region_key = region_key % {'type': 'soft',
- 'region_name': self.region_name}
- self.hard_region_key = region_key % {'type': 'hard',
- 'region_name': self.region_name}
-
- @property
- def hard_invalidated(self):
- invalidated = self.region.backend.get(self.hard_region_key)
- if invalidated is not api.NO_VALUE:
- return invalidated.payload
- return None
-
- @hard_invalidated.setter
- def hard_invalidated(self, value):
- self.region.set(self.hard_region_key, value)
-
- @hard_invalidated.deleter
- def hard_invalidated(self):
- self.region.delete(self.hard_region_key)
-
- @property
- def soft_invalidated(self):
- invalidated = self.region.backend.get(self.soft_region_key)
- if invalidated is not api.NO_VALUE:
- return invalidated.payload
- return None
-
- @soft_invalidated.setter
- def soft_invalidated(self, value):
- self.region.set(self.soft_region_key, value)
-
- @soft_invalidated.deleter
- def soft_invalidated(self):
- self.region.delete(self.soft_region_key)
-
-
-def apply_invalidation_patch(region, region_name):
- """Patch the region interfaces to ensure we share the expiration time.
-
- This method is used to patch region.invalidate, region._hard_invalidated,
- and region._soft_invalidated.
- """
- # Patch the region object. This logic needs to be moved up into dogpile
- # itself. Patching the internal interfaces, unfortunately, is the only
- # way to handle this at the moment.
- invalidator = _RegionInvalidator(region=region, region_name=region_name)
- setattr(region, '_hard_invalidated', invalidator.hard_invalidated)
- setattr(region, '_soft_invalidated', invalidator.soft_invalidated)