summaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common/cache
diff options
context:
space:
mode:
authorDUVAL Thomas <thomas.duval@orange.com>2016-06-09 09:11:50 +0200
committerDUVAL Thomas <thomas.duval@orange.com>2016-06-09 09:11:50 +0200
commit2e7b4f2027a1147ca28301e4f88adf8274b39a1f (patch)
tree8b8d94001ebe6cc34106cf813b538911a8d66d9a /keystone-moon/keystone/common/cache
parenta33bdcb627102a01244630a54cb4b5066b385a6a (diff)
Update Keystone core to Mitaka.
Change-Id: Ia10d6add16f4a9d25d1f42d420661c46332e69db
Diffstat (limited to 'keystone-moon/keystone/common/cache')
-rw-r--r--keystone-moon/keystone/common/cache/_context_cache.py129
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py53
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py554
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py7
-rw-r--r--keystone-moon/keystone/common/cache/core.py352
5 files changed, 239 insertions, 856 deletions
diff --git a/keystone-moon/keystone/common/cache/_context_cache.py b/keystone-moon/keystone/common/cache/_context_cache.py
new file mode 100644
index 00000000..3895ca1f
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/_context_cache.py
@@ -0,0 +1,129 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A dogpile.cache proxy that caches objects in the request local cache."""
+from dogpile.cache import api
+from dogpile.cache import proxy
+from oslo_context import context as oslo_context
+from oslo_serialization import msgpackutils
+
+from keystone.models import revoke_model
+
+
+class _RevokeModelHandler(object):
+ # NOTE(morganfainberg): There needs to be reserved "registry" entries set
+ # in oslo_serialization for application-specific handlers. We picked 127
+ # here since it's waaaaaay far out before oslo_serialization will use it.
+ identity = 127
+ handles = (revoke_model.RevokeTree,)
+
+ def __init__(self, registry):
+ self._registry = registry
+
+ def serialize(self, obj):
+ return msgpackutils.dumps(obj.revoke_map,
+ registry=self._registry)
+
+ def deserialize(self, data):
+ revoke_map = msgpackutils.loads(data, registry=self._registry)
+ revoke_tree = revoke_model.RevokeTree()
+ revoke_tree.revoke_map = revoke_map
+ return revoke_tree
+
+
+# Register our new handler.
+_registry = msgpackutils.default_registry
+_registry.frozen = False
+_registry.register(_RevokeModelHandler(registry=_registry))
+_registry.frozen = True
+
+
+class _ResponseCacheProxy(proxy.ProxyBackend):
+
+ __key_pfx = '_request_cache_%s'
+
+ def _get_request_context(self):
+ # Return the current context or a new/empty context.
+ return oslo_context.get_current() or oslo_context.RequestContext()
+
+ def _get_request_key(self, key):
+ return self.__key_pfx % key
+
+ def _set_local_cache(self, key, value, ctx=None):
+ # Set a serialized version of the returned value in local cache for
+ # subsequent calls to the memoized method.
+ if not ctx:
+ ctx = self._get_request_context()
+ serialize = {'payload': value.payload, 'metadata': value.metadata}
+ setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
+ ctx.update_store()
+
+ def _get_local_cache(self, key):
+ # Return the version from our local request cache if it exists.
+ ctx = self._get_request_context()
+ try:
+ value = getattr(ctx, self._get_request_key(key))
+ except AttributeError:
+ return api.NO_VALUE
+
+ value = msgpackutils.loads(value)
+ return api.CachedValue(payload=value['payload'],
+ metadata=value['metadata'])
+
+ def _delete_local_cache(self, key):
+ # On invalidate/delete remove the value from the local request cache
+ ctx = self._get_request_context()
+ try:
+ delattr(ctx, self._get_request_key(key))
+ ctx.update_store()
+ except AttributeError: # nosec
+ # NOTE(morganfainberg): We will simply pass here, this value has
+ # not been cached locally in the request.
+ pass
+
+ def get(self, key):
+ value = self._get_local_cache(key)
+ if value is api.NO_VALUE:
+ value = self.proxied.get(key)
+ if value is not api.NO_VALUE:
+ self._set_local_cache(key, value)
+ return value
+
+ def set(self, key, value):
+ self._set_local_cache(key, value)
+ self.proxied.set(key, value)
+
+ def delete(self, key):
+ self._delete_local_cache(key)
+ self.proxied.delete(key)
+
+ def get_multi(self, keys):
+ values = {}
+ for key in keys:
+ v = self._get_local_cache(key)
+ if v is not api.NO_VALUE:
+ values[key] = v
+ query_keys = set(keys).difference(set(values.keys()))
+ values.update(dict(
+ zip(query_keys, self.proxied.get_multi(query_keys))))
+ return [values[k] for k in keys]
+
+ def set_multi(self, mapping):
+ ctx = self._get_request_context()
+ for k, v in mapping.items():
+ self._set_local_cache(k, v, ctx)
+ self.proxied.set_multi(mapping)
+
+ def delete_multi(self, keys):
+ for k in keys:
+ self._delete_local_cache(k)
+ self.proxied.delete_multi(keys)
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
index f3990b12..bbe4785a 100644
--- a/keystone-moon/keystone/common/cache/backends/memcache_pool.py
+++ b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
@@ -13,49 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""dogpile.cache backend that uses Memcached connection pool"""
+"""This module is deprecated."""
-import functools
-import logging
+from oslo_cache.backends import memcache_pool
+from oslo_log import versionutils
-from dogpile.cache.backends import memcached as memcached_backend
-from keystone.common.cache import _memcache_pool
-
-
-LOG = logging.getLogger(__name__)
-
-
-# Helper to ease backend refactoring
-class ClientProxy(object):
- def __init__(self, client_pool):
- self.client_pool = client_pool
-
- def _run_method(self, __name, *args, **kwargs):
- with self.client_pool.acquire() as client:
- return getattr(client, __name)(*args, **kwargs)
-
- def __getattr__(self, name):
- return functools.partial(self._run_method, name)
-
-
-class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
- # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
- def __init__(self, arguments):
- super(PooledMemcachedBackend, self).__init__(arguments)
- self.client_pool = _memcache_pool.MemcacheClientPool(
- self.url,
- arguments={
- 'dead_retry': arguments.get('dead_retry', 5 * 60),
- 'socket_timeout': arguments.get('socket_timeout', 3),
- },
- maxsize=arguments.get('pool_maxsize', 10),
- unused_timeout=arguments.get('pool_unused_timeout', 60),
- conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
- )
-
- # Since all methods in backend just call one of methods of client, this
- # lets us avoid need to hack it too much
- @property
- def client(self):
- return ClientProxy(self.client_pool)
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.memcache_pool backend',
+ in_favor_of='oslo_cache.memcache_pool backend',
+ remove_in=+1)
+class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
index cb5ad833..861aefed 100644
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -12,550 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import abc
-import datetime
+from oslo_cache.backends import mongo
+from oslo_log import versionutils
-from dogpile.cache import api
-from dogpile.cache import util as dp_util
-from oslo_log import log
-from oslo_utils import importutils
-from oslo_utils import timeutils
-import six
-from keystone import exception
-from keystone.i18n import _, _LW
-
-
-NO_VALUE = api.NO_VALUE
-LOG = log.getLogger(__name__)
-
-
-class MongoCacheBackend(api.CacheBackend):
- """A MongoDB based caching backend implementing dogpile backend APIs.
-
- Arguments accepted in the arguments dictionary:
-
- :param db_hosts: string (required), hostname or IP address of the
- MongoDB server instance. This can be a single MongoDB connection URI,
- or a list of MongoDB connection URIs.
-
- :param db_name: string (required), the name of the database to be used.
-
- :param cache_collection: string (required), the name of collection to store
- cached data.
- *Note:* Different collection name can be provided if there is need to
- create separate container (i.e. collection) for cache data. So region
- configuration is done per collection.
-
- Following are optional parameters for MongoDB backend configuration,
-
- :param username: string, the name of the user to authenticate.
-
- :param password: string, the password of the user to authenticate.
-
- :param max_pool_size: integer, the maximum number of connections that the
- pool will open simultaneously. By default the pool size is 10.
-
- :param w: integer, write acknowledgement for MongoDB client
-
- If not provided, then no default is set on MongoDB and then write
- acknowledgement behavior occurs as per MongoDB default. This parameter
- name is same as what is used in MongoDB docs. This value is specified
- at collection level so its applicable to `cache_collection` db write
- operations.
-
- If this is a replica set, write operations will block until they have
- been replicated to the specified number or tagged set of servers.
- Setting w=0 disables write acknowledgement and all other write concern
- options.
-
- :param read_preference: string, the read preference mode for MongoDB client
- Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
- ``secondaryPreferred``, or ``nearest``. This read_preference is
- specified at collection level so its applicable to `cache_collection`
- db read operations.
-
- :param use_replica: boolean, flag to indicate if replica client to be
- used. Default is `False`. `replicaset_name` value is required if
- `True`.
-
- :param replicaset_name: string, name of replica set.
- Becomes required if `use_replica` is `True`
-
- :param son_manipulator: string, name of class with module name which
- implements MongoDB SONManipulator.
- Default manipulator used is :class:`.BaseTransform`.
-
- This manipulator is added per database. In multiple cache
- configurations, the manipulator name should be same if same
- database name ``db_name`` is used in those configurations.
-
- SONManipulator is used to manipulate custom data types as they are
- saved or retrieved from MongoDB. Custom impl is only needed if cached
- data is custom class and needs transformations when saving or reading
- from db. If dogpile cached value contains built-in data types, then
- BaseTransform class is sufficient as it already handles dogpile
- CachedValue class transformation.
-
- :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
- time-to-live value.
- If value is greater than 0, then its assumed that cache_collection
- needs to be TTL type (has index at 'doc_date' field).
- By default, the value is -1 and its disabled.
- Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
-
- .. NOTE::
-
- This parameter is different from Dogpile own
- expiration_time, which is the number of seconds after which Dogpile
- will consider the value to be expired. When Dogpile considers a
- value to be expired, it continues to use the value until generation
- of a new value is complete, when using CacheRegion.get_or_create().
- Therefore, if you are setting `mongo_ttl_seconds`, you will want to
- make sure it is greater than expiration_time by at least enough
- seconds for new values to be generated, else the value would not
- be available during a regeneration, forcing all threads to wait for
- a regeneration each time a value expires.
-
- :param ssl: boolean, If True, create the connection to the server
- using SSL. Default is `False`. Client SSL connection parameters depends
- on server side SSL setup. For further reference on SSL configuration:
- <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
-
- :param ssl_keyfile: string, the private keyfile used to identify the
- local connection against mongod. If included with the certfile then
- only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
-
- :param ssl_certfile: string, the certificate file used to identify the
- local connection against mongod. Used only when `ssl` is `True`.
-
- :param ssl_ca_certs: string, the ca_certs file contains a set of
- concatenated 'certification authority' certificates, which are used to
- validate certificates passed from the other end of the connection.
- Used only when `ssl` is `True`.
-
- :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
- a certificate is required from the other side of the connection, and
- whether it will be validated if provided. It must be one of the three
- values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
- (not required, but validated if provided), or
- ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
- parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
- must point to a file of CA certificates. Used only when `ssl`
- is `True`.
-
- Rest of arguments are passed to mongo calls for read, write and remove.
- So related options can be specified to pass to these operations.
-
- Further details of various supported arguments can be referred from
- <http://api.mongodb.org/python/current/api/pymongo/>
-
- """
-
- def __init__(self, arguments):
- self.api = MongoApi(arguments)
-
- @dp_util.memoized_property
- def client(self):
- """Initializes MongoDB connection and collection defaults.
-
- This initialization is done only once and performed as part of lazy
- inclusion of MongoDB dependency i.e. add imports only if related
- backend is used.
-
- :return: :class:`.MongoApi` instance
- """
- self.api.get_cache_collection()
- return self.api
-
- def get(self, key):
- value = self.client.get(key)
- if value is None:
- return NO_VALUE
- else:
- return value
-
- def get_multi(self, keys):
- values = self.client.get_multi(keys)
- return [
- NO_VALUE if key not in values
- else values[key] for key in keys
- ]
-
- def set(self, key, value):
- self.client.set(key, value)
-
- def set_multi(self, mapping):
- self.client.set_multi(mapping)
-
- def delete(self, key):
- self.client.delete(key)
-
- def delete_multi(self, keys):
- self.client.delete_multi(keys)
-
-
-class MongoApi(object):
- """Class handling MongoDB specific functionality.
-
- This class uses PyMongo APIs internally to create database connection
- with configured pool size, ensures unique index on key, does database
- authentication and ensure TTL collection index if configured so.
- This class also serves as handle to cache collection for dogpile cache
- APIs.
-
- In a single deployment, multiple cache configuration can be defined. In
- that case of multiple cache collections usage, db client connection pool
- is shared when cache collections are within same database.
- """
-
- # class level attributes for re-use of db client connection and collection
- _DB = {} # dict of db_name: db connection reference
- _MONGO_COLLS = {} # dict of cache_collection : db collection reference
-
- def __init__(self, arguments):
- self._init_args(arguments)
- self._data_manipulator = None
-
- def _init_args(self, arguments):
- """Helper logic for collecting and parsing MongoDB specific arguments.
-
- The arguments passed in are separated out in connection specific
- setting and rest of arguments are passed to create/update/delete
- db operations.
- """
- self.conn_kwargs = {} # connection specific arguments
-
- self.hosts = arguments.pop('db_hosts', None)
- if self.hosts is None:
- msg = _('db_hosts value is required')
- raise exception.ValidationError(message=msg)
-
- self.db_name = arguments.pop('db_name', None)
- if self.db_name is None:
- msg = _('database db_name is required')
- raise exception.ValidationError(message=msg)
-
- self.cache_collection = arguments.pop('cache_collection', None)
- if self.cache_collection is None:
- msg = _('cache_collection name is required')
- raise exception.ValidationError(message=msg)
-
- self.username = arguments.pop('username', None)
- self.password = arguments.pop('password', None)
- self.max_pool_size = arguments.pop('max_pool_size', 10)
-
- self.w = arguments.pop('w', -1)
- try:
- self.w = int(self.w)
- except ValueError:
- msg = _('integer value expected for w (write concern attribute)')
- raise exception.ValidationError(message=msg)
-
- self.read_preference = arguments.pop('read_preference', None)
-
- self.use_replica = arguments.pop('use_replica', False)
- if self.use_replica:
- if arguments.get('replicaset_name') is None:
- msg = _('replicaset_name required when use_replica is True')
- raise exception.ValidationError(message=msg)
- self.replicaset_name = arguments.get('replicaset_name')
-
- self.son_manipulator = arguments.pop('son_manipulator', None)
-
- # set if mongo collection needs to be TTL type.
- # This needs to be max ttl for any cache entry.
- # By default, -1 means don't use TTL collection.
- # With ttl set, it creates related index and have doc_date field with
- # needed expiration interval
- self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
- try:
- self.ttl_seconds = int(self.ttl_seconds)
- except ValueError:
- msg = _('integer value expected for mongo_ttl_seconds')
- raise exception.ValidationError(message=msg)
-
- self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
- if self.conn_kwargs['ssl']:
- ssl_keyfile = arguments.pop('ssl_keyfile', None)
- ssl_certfile = arguments.pop('ssl_certfile', None)
- ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
- ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
- if ssl_keyfile:
- self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
- if ssl_certfile:
- self.conn_kwargs['ssl_certfile'] = ssl_certfile
- if ssl_ca_certs:
- self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
- if ssl_cert_reqs:
- self.conn_kwargs['ssl_cert_reqs'] = (
- self._ssl_cert_req_type(ssl_cert_reqs))
-
- # rest of arguments are passed to mongo crud calls
- self.meth_kwargs = arguments
-
- def _ssl_cert_req_type(self, req_type):
- try:
- import ssl
- except ImportError:
- raise exception.ValidationError(_('no ssl support available'))
- req_type = req_type.upper()
- try:
- return {
- 'NONE': ssl.CERT_NONE,
- 'OPTIONAL': ssl.CERT_OPTIONAL,
- 'REQUIRED': ssl.CERT_REQUIRED
- }[req_type]
- except KeyError:
- msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
- '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
- raise exception.ValidationError(message=msg)
-
- def _get_db(self):
- # defer imports until backend is used
- global pymongo
- import pymongo
- if self.use_replica:
- connection = pymongo.MongoReplicaSetClient(
- host=self.hosts, replicaSet=self.replicaset_name,
- max_pool_size=self.max_pool_size, **self.conn_kwargs)
- else: # used for standalone node or mongos in sharded setup
- connection = pymongo.MongoClient(
- host=self.hosts, max_pool_size=self.max_pool_size,
- **self.conn_kwargs)
-
- database = getattr(connection, self.db_name)
-
- self._assign_data_mainpulator()
- database.add_son_manipulator(self._data_manipulator)
- if self.username and self.password:
- database.authenticate(self.username, self.password)
- return database
-
- def _assign_data_mainpulator(self):
- if self._data_manipulator is None:
- if self.son_manipulator:
- self._data_manipulator = importutils.import_object(
- self.son_manipulator)
- else:
- self._data_manipulator = BaseTransform()
-
- def _get_doc_date(self):
- if self.ttl_seconds > 0:
- expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
- doc_date = timeutils.utcnow() + expire_delta
- else:
- doc_date = timeutils.utcnow()
- return doc_date
-
- def get_cache_collection(self):
- if self.cache_collection not in self._MONGO_COLLS:
- global pymongo
- import pymongo
- # re-use db client connection if already defined as part of
- # earlier dogpile cache configuration
- if self.db_name not in self._DB:
- self._DB[self.db_name] = self._get_db()
- coll = getattr(self._DB[self.db_name], self.cache_collection)
-
- self._assign_data_mainpulator()
- if self.read_preference:
- # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
- f = getattr(pymongo.read_preferences,
- 'read_pref_mode_from_name', None)
- if not f:
- f = pymongo.read_preferences.mongos_enum
- self.read_preference = f(self.read_preference)
- coll.read_preference = self.read_preference
- if self.w > -1:
- coll.write_concern['w'] = self.w
- if self.ttl_seconds > 0:
- kwargs = {'expireAfterSeconds': self.ttl_seconds}
- coll.ensure_index('doc_date', cache_for=5, **kwargs)
- else:
- self._validate_ttl_index(coll, self.cache_collection,
- self.ttl_seconds)
- self._MONGO_COLLS[self.cache_collection] = coll
-
- return self._MONGO_COLLS[self.cache_collection]
-
- def _get_cache_entry(self, key, value, meta, doc_date):
- """MongoDB cache data representation.
-
- Storing cache key as ``_id`` field as MongoDB by default creates
- unique index on this field. So no need to create separate field and
- index for storing cache key. Cache data has additional ``doc_date``
- field for MongoDB TTL collection support.
- """
- return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
-
- def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
- """Checks if existing TTL index is removed on a collection.
-
- This logs warning when existing collection has TTL index defined and
- new cache configuration tries to disable index with
- ``mongo_ttl_seconds < 0``. In that case, existing index needs
- to be addressed first to make new configuration effective.
- Refer to MongoDB documentation around TTL index for further details.
- """
- indexes = collection.index_information()
- for indx_name, index_data in indexes.items():
- if all(k in index_data for k in ('key', 'expireAfterSeconds')):
- existing_value = index_data['expireAfterSeconds']
- fld_present = 'doc_date' in index_data['key'][0]
- if fld_present and existing_value > -1 and ttl_seconds < 1:
- msg = _LW('TTL index already exists on db collection '
- '<%(c_name)s>, remove index <%(indx_name)s> '
- 'first to make updated mongo_ttl_seconds value '
- 'to be effective')
- LOG.warn(msg, {'c_name': coll_name,
- 'indx_name': indx_name})
-
- def get(self, key):
- critieria = {'_id': key}
- result = self.get_cache_collection().find_one(spec_or_id=critieria,
- **self.meth_kwargs)
- if result:
- return result['value']
- else:
- return None
-
- def get_multi(self, keys):
- db_results = self._get_results_as_dict(keys)
- return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
-
- def _get_results_as_dict(self, keys):
- critieria = {'_id': {'$in': keys}}
- db_results = self.get_cache_collection().find(spec=critieria,
- **self.meth_kwargs)
- return {doc['_id']: doc for doc in db_results}
-
- def set(self, key, value):
- doc_date = self._get_doc_date()
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- spec = {'_id': key}
- # find and modify does not have manipulator support
- # so need to do conversion as part of input document
- ref = self._data_manipulator.transform_incoming(ref, self)
- self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
- **self.meth_kwargs)
-
- def set_multi(self, mapping):
- """Insert multiple documents specified as key, value pairs.
-
- In this case, multiple documents can be added via insert provided they
- do not exist.
- Update of multiple existing documents is done one by one
- """
- doc_date = self._get_doc_date()
- insert_refs = []
- update_refs = []
- existing_docs = self._get_results_as_dict(list(mapping.keys()))
- for key, value in mapping.items():
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- if key in existing_docs:
- ref['_id'] = existing_docs[key]['_id']
- update_refs.append(ref)
- else:
- insert_refs.append(ref)
- if insert_refs:
- self.get_cache_collection().insert(insert_refs, manipulate=True,
- **self.meth_kwargs)
- for upd_doc in update_refs:
- self.get_cache_collection().save(upd_doc, manipulate=True,
- **self.meth_kwargs)
-
- def delete(self, key):
- critieria = {'_id': key}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
- def delete_multi(self, keys):
- critieria = {'_id': {'$in': keys}}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AbstractManipulator(object):
- """Abstract class with methods which need to be implemented for custom
- manipulation.
-
- Adding this as a base class for :class:`.BaseTransform` instead of adding
- import dependency of pymongo specific class i.e.
- `pymongo.son_manipulator.SONManipulator` and using that as base class.
- This is done to avoid pymongo dependency if MongoDB backend is not used.
- """
- @abc.abstractmethod
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB.
-
- :param son: the SON object to be inserted into the database
- :param collection: the collection the object is being inserted into
-
- :returns: transformed SON object
-
- """
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB.
-
- :param son: the SON object being retrieved from the database
- :param collection: the collection this object was stored in
-
- :returns: transformed SON object
- """
- raise exception.NotImplemented() # pragma: no cover
-
- def will_copy(self):
- """Will this SON manipulator make a copy of the incoming document?
-
- Derived classes that do need to make a copy should override this
- method, returning `True` instead of `False`.
-
- :returns: boolean
- """
- return False
-
-
-class BaseTransform(AbstractManipulator):
- """Base transformation class to store and read dogpile cached data
- from MongoDB.
-
- This is needed as dogpile internally stores data as a custom class
- i.e. dogpile.cache.api.CachedValue
-
- Note: Custom manipulator needs to always override ``transform_incoming``
- and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
- checks that overridden method in instance and its super are different.
- """
-
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB."""
- for (key, value) in list(son.items()):
- if isinstance(value, api.CachedValue):
- son[key] = value.payload # key is 'value' field here
- son['meta'] = value.metadata
- elif isinstance(value, dict): # Make sure we recurse into sub-docs
- son[key] = self.transform_incoming(value, collection)
- return son
-
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB."""
- metadata = None
- # make sure its top level dictionary with all expected fields names
- # present
- if isinstance(son, dict) and all(k in son for k in
- ('_id', 'value', 'meta', 'doc_date')):
- payload = son.pop('value', None)
- metadata = son.pop('meta', None)
- for (key, value) in list(son.items()):
- if isinstance(value, dict):
- son[key] = self.transform_outgoing(value, collection)
- if metadata is not None:
- son['value'] = api.CachedValue(payload, metadata)
- return son
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.mongo backend',
+ in_favor_of='oslo_cache.mongo backend',
+ remove_in=+1)
+class MongoCacheBackend(mongo.MongoCacheBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
index 38329c94..eda06ec8 100644
--- a/keystone-moon/keystone/common/cache/backends/noop.py
+++ b/keystone-moon/keystone/common/cache/backends/noop.py
@@ -13,11 +13,17 @@
# under the License.
from dogpile.cache import api
+from oslo_log import versionutils
NO_VALUE = api.NO_VALUE
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.common.cache.noop backend',
+ in_favor_of="dogpile.cache's Null backend",
+ remove_in=+1)
class NoopCacheBackend(api.CacheBackend):
"""A no op backend as a default caching backend.
@@ -27,6 +33,7 @@ class NoopCacheBackend(api.CacheBackend):
mechanism to cleanup it's internal dict and therefore could cause run-away
memory utilization.
"""
+
def __init__(self, *args):
return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
index 306587b3..6bb0af51 100644
--- a/keystone-moon/keystone/common/cache/core.py
+++ b/keystone-moon/keystone/common/cache/core.py
@@ -13,23 +13,41 @@
# under the License.
"""Keystone Caching Layer Implementation."""
-
import dogpile.cache
-from dogpile.cache import proxy
-from dogpile.cache import util
+from dogpile.cache import api
+from oslo_cache import core as cache
from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import importutils
-from keystone import exception
-from keystone.i18n import _, _LE
+from keystone.common.cache import _context_cache
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
+CACHE_REGION = cache.create_region()
+
+
+def configure_cache(region=None):
+ if region is None:
+ region = CACHE_REGION
+ # NOTE(morganfainberg): running cache.configure_cache_region()
+ # sets region.is_configured, this must be captured before
+ # cache.configure_cache_region is called.
+ configured = region.is_configured
+ cache.configure_cache_region(CONF, region)
+ # Only wrap the region if it was not configured. This should be pushed
+ # to oslo_cache lib somehow.
+ if not configured:
+ region.wrap(_context_cache._ResponseCacheProxy)
+
+
+def get_memoization_decorator(group, expiration_group=None, region=None):
+ if region is None:
+ region = CACHE_REGION
+ return cache.get_memoization_decorator(CONF, region, group,
+ expiration_group=expiration_group)
-make_region = dogpile.cache.make_region
+# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
+# we no longer need to register the backends here.
dogpile.cache.register_backend(
'keystone.common.cache.noop',
'keystone.common.cache.backends.noop',
@@ -46,263 +64,61 @@ dogpile.cache.register_backend(
'PooledMemcachedBackend')
-class DebugProxy(proxy.ProxyBackend):
- """Extra Logging ProxyBackend."""
- # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
- # a clean description of the information. Without use of repr, it might
- # be possible to run into encode/decode error(s). For logging/debugging
- # purposes encode/decode is irrelevant and we should be looking at the
- # data exactly as it stands.
-
- def get(self, key):
- value = self.proxied.get(key)
- LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return value
-
- def get_multi(self, keys):
- values = self.proxied.get_multi(keys)
- LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
- {'keys': keys, 'values': values})
- return values
-
- def set(self, key, value):
- LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return self.proxied.set(key, value)
-
- def set_multi(self, keys):
- LOG.debug('CACHE_SET_MULTI: "%r"', keys)
- self.proxied.set_multi(keys)
-
- def delete(self, key):
- self.proxied.delete(key)
- LOG.debug('CACHE_DELETE: "%r"', key)
-
- def delete_multi(self, keys):
- LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
- self.proxied.delete_multi(keys)
-
-
-def build_cache_config():
- """Build the cache region dictionary configuration.
-
- :returns: dict
+# TODO(morganfainberg): Move this logic up into oslo.cache directly
+# so we can handle region-wide invalidations or alternatively propose
+# a fix to dogpile.cache to make region-wide invalidates possible to
+# work across distributed processes.
+class _RegionInvalidator(object):
+
+ def __init__(self, region, region_name):
+ self.region = region
+ self.region_name = region_name
+ region_key = '_RegionExpiration.%(type)s.%(region_name)s'
+ self.soft_region_key = region_key % {'type': 'soft',
+ 'region_name': self.region_name}
+ self.hard_region_key = region_key % {'type': 'hard',
+ 'region_name': self.region_name}
+
+ @property
+ def hard_invalidated(self):
+ invalidated = self.region.backend.get(self.hard_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @hard_invalidated.setter
+ def hard_invalidated(self, value):
+ self.region.set(self.hard_region_key, value)
+
+ @hard_invalidated.deleter
+ def hard_invalidated(self):
+ self.region.delete(self.hard_region_key)
+
+ @property
+ def soft_invalidated(self):
+ invalidated = self.region.backend.get(self.soft_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @soft_invalidated.setter
+ def soft_invalidated(self, value):
+ self.region.set(self.soft_region_key, value)
+
+ @soft_invalidated.deleter
+ def soft_invalidated(self):
+ self.region.delete(self.soft_region_key)
+
+
+def apply_invalidation_patch(region, region_name):
+ """Patch the region interfaces to ensure we share the expiration time.
+
+ This method is used to patch region.invalidate, region._hard_invalidated,
+ and region._soft_invalidated.
"""
- prefix = CONF.cache.config_prefix
- conf_dict = {}
- conf_dict['%s.backend' % prefix] = CONF.cache.backend
- conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
- for argument in CONF.cache.backend_argument:
- try:
- (argname, argvalue) = argument.split(':', 1)
- except ValueError:
- msg = _LE('Unable to build cache config-key. Expected format '
- '"<argname>:<value>". Skipping unknown format: %s')
- LOG.error(msg, argument)
- continue
-
- arg_key = '.'.join([prefix, 'arguments', argname])
- conf_dict[arg_key] = argvalue
-
- LOG.debug('Keystone Cache Config: %s', conf_dict)
- # NOTE(yorik-sar): these arguments will be used for memcache-related
- # backends. Use setdefault for url to support old-style setting through
- # backend_argument=url:127.0.0.1:11211
- conf_dict.setdefault('%s.arguments.url' % prefix,
- CONF.cache.memcache_servers)
- for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
- 'pool_unused_timeout', 'pool_connection_get_timeout'):
- value = getattr(CONF.cache, 'memcache_' + arg)
- conf_dict['%s.arguments.%s' % (prefix, arg)] = value
-
- return conf_dict
-
-
-def configure_cache_region(region):
- """Configure a cache region.
-
- :param region: optional CacheRegion object, if not provided a new region
- will be instantiated
- :raises: exception.ValidationError
- :returns: dogpile.cache.CacheRegion
- """
- if not isinstance(region, dogpile.cache.CacheRegion):
- raise exception.ValidationError(
- _('region not type dogpile.cache.CacheRegion'))
-
- if not region.is_configured:
- # NOTE(morganfainberg): this is how you tell if a region is configured.
- # There is a request logged with dogpile.cache upstream to make this
- # easier / less ugly.
-
- config_dict = build_cache_config()
- region.configure_from_config(config_dict,
- '%s.' % CONF.cache.config_prefix)
-
- if CONF.cache.debug_cache_backend:
- region.wrap(DebugProxy)
-
- # NOTE(morganfainberg): if the backend requests the use of a
- # key_mangler, we should respect that key_mangler function. If a
- # key_mangler is not defined by the backend, use the sha1_mangle_key
- # mangler provided by dogpile.cache. This ensures we always use a fixed
- # size cache-key.
- if region.key_mangler is None:
- region.key_mangler = util.sha1_mangle_key
-
- for class_path in CONF.cache.proxies:
- # NOTE(morganfainberg): if we have any proxy wrappers, we should
- # ensure they are added to the cache region's backend. Since
- # configure_from_config doesn't handle the wrap argument, we need
- # to manually add the Proxies. For information on how the
- # ProxyBackends work, see the dogpile.cache documents on
- # "changing-backend-behavior"
- cls = importutils.import_class(class_path)
- LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
- region.wrap(cls)
-
- return region
-
-
-def get_should_cache_fn(section):
- """Build a function that returns a config section's caching status.
-
- For any given driver in keystone that has caching capabilities, a boolean
- config option for that driver's section (e.g. ``token``) should exist and
- default to ``True``. This function will use that value to tell the caching
- decorator if caching for that driver is enabled. To properly use this
- with the decorator, pass this function the configuration section and assign
- the result to a variable. Pass the new variable to the caching decorator
- as the named argument ``should_cache_fn``. e.g.::
-
- from keystone.common import cache
-
- SHOULD_CACHE = cache.get_should_cache_fn('token')
-
- @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :returns: function reference
- """
- def should_cache(value):
- if not CONF.cache.enabled:
- return False
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'caching', True)
- return should_cache
-
-
-def get_expiration_time_fn(section):
- """Build a function that returns a config section's expiration time status.
-
- For any given driver in keystone that has caching capabilities, an int
- config option called ``cache_time`` for that driver's section
- (e.g. ``token``) should exist and typically default to ``None``. This
- function will use that value to tell the caching decorator of the TTL
- override for caching the resulting objects. If the value of the config
- option is ``None`` the default value provided in the
- ``[cache] expiration_time`` option will be used by the decorator. The
- default may be set to something other than ``None`` in cases where the
- caching TTL should not be tied to the global default(s) (e.g.
- revocation_list changes very infrequently and can be cached for >1h by
- default).
-
- To properly use this with the decorator, pass this function the
- configuration section and assign the result to a variable. Pass the new
- variable to the caching decorator as the named argument
- ``expiration_time``. e.g.::
-
- from keystone.common import cache
-
- EXPIRATION_TIME = cache.get_expiration_time_fn('token')
-
- @cache.on_arguments(expiration_time=EXPIRATION_TIME)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :rtype: function reference
- """
- def get_expiration_time():
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'cache_time', None)
- return get_expiration_time
-
-
-def key_generate_to_str(s):
- # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
- # to stringify and handle the Unicode error explicitly as needed.
- try:
- return str(s)
- except UnicodeEncodeError:
- return s.encode('utf-8')
-
-
-def function_key_generator(namespace, fn, to_str=key_generate_to_str):
- # NOTE(morganfainberg): This wraps dogpile.cache's default
- # function_key_generator to change the default to_str mechanism.
- return util.function_key_generator(namespace, fn, to_str=to_str)
-
-
-REGION = dogpile.cache.make_region(
- function_key_generator=function_key_generator)
-on_arguments = REGION.cache_on_arguments
-
-
-def get_memoization_decorator(section, expiration_section=None):
- """Build a function based on the `on_arguments` decorator for the section.
-
- For any given driver in Keystone that has caching capabilities, a
- pair of functions is required to properly determine the status of the
- caching capabilities (a toggle to indicate caching is enabled and any
- override of the default TTL for cached data). This function will return
- an object that has the memoization decorator ``on_arguments``
- pre-configured for the driver.
-
- Example usage::
-
- from keystone.common import cache
-
- MEMOIZE = cache.get_memoization_decorator(section='token')
-
- @MEMOIZE
- def function(arg1, arg2):
- ...
-
-
- ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
- section='token', expiration_section='revoke')
-
- @ALTERNATE_MEMOIZE
- def function2(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :param expiration_section: name of the configuration section to examine
- for the expiration option. This will fall back
- to using ``section`` if the value is unspecified
- or ``None``
- :type expiration_section: string
- :rtype: function reference
- """
- if expiration_section is None:
- expiration_section = section
- should_cache = get_should_cache_fn(section)
- expiration_time = get_expiration_time_fn(expiration_section)
-
- memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
- expiration_time=expiration_time)
-
- # Make sure the actual "should_cache" and "expiration_time" methods are
- # available. This is potentially interesting/useful to pre-seed cache
- # values.
- memoize.should_cache = should_cache
- memoize.get_expiration_time = expiration_time
-
- return memoize
+ # Patch the region object. This logic needs to be moved up into dogpile
+ # itself. Patching the internal interfaces, unfortunately, is the only
+ # way to handle this at the moment.
+ invalidator = _RegionInvalidator(region=region, region_name=region_name)
+ setattr(region, '_hard_invalidated', invalidator.hard_invalidated)
+ setattr(region, '_soft_invalidated', invalidator.soft_invalidated)