aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common/cache
diff options
context:
space:
mode:
authorWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
committerWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
commitb8c756ecdd7cced1db4300935484e8c83701c82e (patch)
tree87e51107d82b217ede145de9d9d59e2100725bd7 /keystone-moon/keystone/common/cache
parentc304c773bae68fb854ed9eab8fb35c4ef17cf136 (diff)
migrate moon code from github to opnfv
Change-Id: Ice53e368fd1114d56a75271aa9f2e598e3eba604 Signed-off-by: WuKong <rebirthmonkey@gmail.com>
Diffstat (limited to 'keystone-moon/keystone/common/cache')
-rw-r--r--keystone-moon/keystone/common/cache/__init__.py15
-rw-r--r--keystone-moon/keystone/common/cache/_memcache_pool.py233
-rw-r--r--keystone-moon/keystone/common/cache/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py61
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py557
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py49
-rw-r--r--keystone-moon/keystone/common/cache/core.py308
7 files changed, 1223 insertions, 0 deletions
diff --git a/keystone-moon/keystone/common/cache/__init__.py b/keystone-moon/keystone/common/cache/__init__.py
new file mode 100644
index 00000000..49502399
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.cache.core import * # noqa
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
new file mode 100644
index 00000000..b15332db
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/_memcache_pool.py
@@ -0,0 +1,233 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Thread-safe connection pool for python-memcached."""
+
+# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
+# and should be kept in sync until we can use external library for this.
+
+import collections
+import contextlib
+import itertools
+import logging
+import threading
+import time
+
+import memcache
+from oslo_log import log
+from six.moves import queue
+
+from keystone import exception
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
+# Don't inherit client from threading.local so that we can reuse clients in
+# different threads
+_MemcacheClient = type('_MemcacheClient', (object,),
+ dict(memcache.Client.__dict__))
+
+_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
+
+
+class ConnectionPool(queue.Queue):
+ """Base connection pool class
+
+ This class implements the basic connection pool logic as an abstract base
+ class.
+ """
+ def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
+ """Initialize the connection pool.
+
+ :param maxsize: maximum number of client connections for the pool
+ :type maxsize: int
+ :param unused_timeout: idle time to live for unused clients (in
+ seconds). If a client connection object has been
+ in the pool and idle for longer than the
+ unused_timeout, it will be reaped. This is to
+ ensure resources are released as utilization
+ goes down.
+ :type unused_timeout: int
+ :param conn_get_timeout: maximum time in seconds to wait for a
+ connection. If set to `None` timeout is
+ indefinite.
+ :type conn_get_timeout: int
+ """
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ queue.Queue.__init__(self, maxsize)
+ self._unused_timeout = unused_timeout
+ self._connection_get_timeout = conn_get_timeout
+ self._acquired = 0
+
+ def _create_connection(self):
+ """Returns a connection instance.
+
+ This is called when the pool needs another instance created.
+
+ :returns: a new connection instance
+
+ """
+ raise NotImplementedError
+
+ def _destroy_connection(self, conn):
+ """Destroy and cleanup a connection instance.
+
+ This is called when the pool wishes to get rid of an existing
+ connection. This is the opportunity for a subclass to free up
+ resources and cleaup after itself.
+
+ :param conn: the connection object to destroy
+
+ """
+ raise NotImplementedError
+
+ def _debug_logger(self, msg, *args, **kwargs):
+ if LOG.isEnabledFor(logging.DEBUG):
+ thread_id = threading.current_thread().ident
+ args = (id(self), thread_id) + args
+ prefix = 'Memcached pool %s, thread %s: '
+ LOG.debug(prefix + msg, *args, **kwargs)
+
+ @contextlib.contextmanager
+ def acquire(self):
+ self._debug_logger('Acquiring connection')
+ try:
+ conn = self.get(timeout=self._connection_get_timeout)
+ except queue.Empty:
+ raise exception.UnexpectedError(
+ _('Unable to get a connection from pool id %(id)s after '
+ '%(seconds)s seconds.') %
+ {'id': id(self), 'seconds': self._connection_get_timeout})
+ self._debug_logger('Acquired connection %s', id(conn))
+ try:
+ yield conn
+ finally:
+ self._debug_logger('Releasing connection %s', id(conn))
+ self._drop_expired_connections()
+ try:
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ queue.Queue.put(self, conn, block=False)
+ except queue.Full:
+ self._debug_logger('Reaping exceeding connection %s', id(conn))
+ self._destroy_connection(conn)
+
+ def _qsize(self):
+ if self.maxsize:
+ return self.maxsize - self._acquired
+ else:
+ # A value indicating there is always a free connection
+ # if maxsize is None or 0
+ return 1
+
+ # NOTE(dstanek): stdlib and eventlet Queue implementations
+ # have different names for the qsize method. This ensures
+ # that we override both of them.
+ if not hasattr(queue.Queue, '_qsize'):
+ qsize = _qsize
+
+ def _get(self):
+ if self.queue:
+ conn = self.queue.pop().connection
+ else:
+ conn = self._create_connection()
+ self._acquired += 1
+ return conn
+
+ def _drop_expired_connections(self):
+ """Drop all expired connections from the right end of the queue."""
+ now = time.time()
+ while self.queue and self.queue[0].ttl < now:
+ conn = self.queue.popleft().connection
+ self._debug_logger('Reaping connection %s', id(conn))
+ self._destroy_connection(conn)
+
+ def _put(self, conn):
+ self.queue.append(_PoolItem(
+ ttl=time.time() + self._unused_timeout,
+ connection=conn,
+ ))
+ self._acquired -= 1
+
+
+class MemcacheClientPool(ConnectionPool):
+ def __init__(self, urls, arguments, **kwargs):
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool.__init__(self, **kwargs)
+ self.urls = urls
+ self._arguments = arguments
+ # NOTE(morganfainberg): The host objects expect an int for the
+ # deaduntil value. Initialize this at 0 for each host with 0 indicating
+ # the host is not dead.
+ self._hosts_deaduntil = [0] * len(urls)
+
+ def _create_connection(self):
+ return _MemcacheClient(self.urls, **self._arguments)
+
+ def _destroy_connection(self, conn):
+ conn.disconnect_all()
+
+ def _get(self):
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ conn = ConnectionPool._get(self)
+ try:
+ # Propagate host state known to us to this client's list
+ now = time.time()
+ for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
+ if deaduntil > now and host.deaduntil <= now:
+ host.mark_dead('propagating death mark from the pool')
+ host.deaduntil = deaduntil
+ except Exception:
+ # We need to be sure that connection doesn't leak from the pool.
+ # This code runs before we enter context manager's try-finally
+ # block, so we need to explicitly release it here.
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool._put(self, conn)
+ raise
+ return conn
+
+ def _put(self, conn):
+ try:
+ # If this client found that one of the hosts is dead, mark it as
+ # such in our internal list
+ now = time.time()
+ for i, host in zip(itertools.count(), conn.servers):
+ deaduntil = self._hosts_deaduntil[i]
+ # Do nothing if we already know this host is dead
+ if deaduntil <= now:
+ if host.deaduntil > now:
+ self._hosts_deaduntil[i] = host.deaduntil
+ self._debug_logger(
+ 'Marked host %s dead until %s',
+ self.urls[i], host.deaduntil)
+ else:
+ self._hosts_deaduntil[i] = 0
+ # If all hosts are dead we should forget that they're dead. This
+ # way we won't get completely shut off until dead_retry seconds
+ # pass, but will be checking servers as frequent as we can (over
+ # way smaller socket_timeout)
+ if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
+ self._debug_logger('All hosts are dead. Marking them as live.')
+ self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
+ finally:
+ # super() cannot be used here because Queue in stdlib is an
+ # old-style class
+ ConnectionPool._put(self, conn)
diff --git a/keystone-moon/keystone/common/cache/backends/__init__.py b/keystone-moon/keystone/common/cache/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/__init__.py
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
new file mode 100644
index 00000000..f3990b12
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
@@ -0,0 +1,61 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""dogpile.cache backend that uses Memcached connection pool"""
+
+import functools
+import logging
+
+from dogpile.cache.backends import memcached as memcached_backend
+
+from keystone.common.cache import _memcache_pool
+
+
+LOG = logging.getLogger(__name__)
+
+
+# Helper to ease backend refactoring
+class ClientProxy(object):
+ def __init__(self, client_pool):
+ self.client_pool = client_pool
+
+ def _run_method(self, __name, *args, **kwargs):
+ with self.client_pool.acquire() as client:
+ return getattr(client, __name)(*args, **kwargs)
+
+ def __getattr__(self, name):
+ return functools.partial(self._run_method, name)
+
+
+class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
+ # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
+ def __init__(self, arguments):
+ super(PooledMemcachedBackend, self).__init__(arguments)
+ self.client_pool = _memcache_pool.MemcacheClientPool(
+ self.url,
+ arguments={
+ 'dead_retry': arguments.get('dead_retry', 5 * 60),
+ 'socket_timeout': arguments.get('socket_timeout', 3),
+ },
+ maxsize=arguments.get('pool_maxsize', 10),
+ unused_timeout=arguments.get('pool_unused_timeout', 60),
+ conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
+ )
+
+ # Since all methods in backend just call one of methods of client, this
+ # lets us avoid need to hack it too much
+ @property
+ def client(self):
+ return ClientProxy(self.client_pool)
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
new file mode 100644
index 00000000..b5de9bc4
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -0,0 +1,557 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import datetime
+
+from dogpile.cache import api
+from dogpile.cache import util as dp_util
+from oslo_log import log
+from oslo_utils import importutils
+from oslo_utils import timeutils
+import six
+
+from keystone import exception
+from keystone.i18n import _, _LW
+
+
+NO_VALUE = api.NO_VALUE
+LOG = log.getLogger(__name__)
+
+
+class MongoCacheBackend(api.CacheBackend):
+ """A MongoDB based caching backend implementing dogpile backend APIs.
+
+ Arguments accepted in the arguments dictionary:
+
+ :param db_hosts: string (required), hostname or IP address of the
+ MongoDB server instance. This can be a single MongoDB connection URI,
+ or a list of MongoDB connection URIs.
+
+ :param db_name: string (required), the name of the database to be used.
+
+ :param cache_collection: string (required), the name of collection to store
+ cached data.
+ *Note:* Different collection name can be provided if there is need to
+ create separate container (i.e. collection) for cache data. So region
+ configuration is done per collection.
+
+ Following are optional parameters for MongoDB backend configuration,
+
+ :param username: string, the name of the user to authenticate.
+
+ :param password: string, the password of the user to authenticate.
+
+ :param max_pool_size: integer, the maximum number of connections that the
+ pool will open simultaneously. By default the pool size is 10.
+
+ :param w: integer, write acknowledgement for MongoDB client
+
+ If not provided, then no default is set on MongoDB and then write
+ acknowledgement behavior occurs as per MongoDB default. This parameter
+ name is same as what is used in MongoDB docs. This value is specified
+ at collection level so its applicable to `cache_collection` db write
+ operations.
+
+ If this is a replica set, write operations will block until they have
+ been replicated to the specified number or tagged set of servers.
+ Setting w=0 disables write acknowledgement and all other write concern
+ options.
+
+ :param read_preference: string, the read preference mode for MongoDB client
+ Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
+ ``secondaryPreferred``, or ``nearest``. This read_preference is
+ specified at collection level so its applicable to `cache_collection`
+ db read operations.
+
+ :param use_replica: boolean, flag to indicate if replica client to be
+ used. Default is `False`. `replicaset_name` value is required if
+ `True`.
+
+ :param replicaset_name: string, name of replica set.
+ Becomes required if `use_replica` is `True`
+
+ :param son_manipulator: string, name of class with module name which
+ implements MongoDB SONManipulator.
+ Default manipulator used is :class:`.BaseTransform`.
+
+ This manipulator is added per database. In multiple cache
+ configurations, the manipulator name should be same if same
+ database name ``db_name`` is used in those configurations.
+
+ SONManipulator is used to manipulate custom data types as they are
+ saved or retrieved from MongoDB. Custom impl is only needed if cached
+ data is custom class and needs transformations when saving or reading
+ from db. If dogpile cached value contains built-in data types, then
+ BaseTransform class is sufficient as it already handles dogpile
+ CachedValue class transformation.
+
+ :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
+ time-to-live value.
+ If value is greater than 0, then its assumed that cache_collection
+ needs to be TTL type (has index at 'doc_date' field).
+ By default, the value is -1 and its disabled.
+ Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
+
+ .. NOTE::
+
+ This parameter is different from Dogpile own
+ expiration_time, which is the number of seconds after which Dogpile
+ will consider the value to be expired. When Dogpile considers a
+ value to be expired, it continues to use the value until generation
+ of a new value is complete, when using CacheRegion.get_or_create().
+ Therefore, if you are setting `mongo_ttl_seconds`, you will want to
+ make sure it is greater than expiration_time by at least enough
+ seconds for new values to be generated, else the value would not
+ be available during a regeneration, forcing all threads to wait for
+ a regeneration each time a value expires.
+
+ :param ssl: boolean, If True, create the connection to the server
+ using SSL. Default is `False`. Client SSL connection parameters depends
+ on server side SSL setup. For further reference on SSL configuration:
+ <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
+
+ :param ssl_keyfile: string, the private keyfile used to identify the
+ local connection against mongod. If included with the certfile then
+ only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
+
+ :param ssl_certfile: string, the certificate file used to identify the
+ local connection against mongod. Used only when `ssl` is `True`.
+
+ :param ssl_ca_certs: string, the ca_certs file contains a set of
+ concatenated 'certification authority' certificates, which are used to
+ validate certificates passed from the other end of the connection.
+ Used only when `ssl` is `True`.
+
+ :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
+ a certificate is required from the other side of the connection, and
+ whether it will be validated if provided. It must be one of the three
+ values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
+ (not required, but validated if provided), or
+ ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
+ parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
+ must point to a file of CA certificates. Used only when `ssl`
+ is `True`.
+
+ Rest of arguments are passed to mongo calls for read, write and remove.
+ So related options can be specified to pass to these operations.
+
+ Further details of various supported arguments can be referred from
+ <http://api.mongodb.org/python/current/api/pymongo/>
+
+ """
+
+ def __init__(self, arguments):
+ self.api = MongoApi(arguments)
+
+ @dp_util.memoized_property
+ def client(self):
+ """Initializes MongoDB connection and collection defaults.
+
+ This initialization is done only once and performed as part of lazy
+ inclusion of MongoDB dependency i.e. add imports only if related
+ backend is used.
+
+ :return: :class:`.MongoApi` instance
+ """
+ self.api.get_cache_collection()
+ return self.api
+
+ def get(self, key):
+ value = self.client.get(key)
+ if value is None:
+ return NO_VALUE
+ else:
+ return value
+
+ def get_multi(self, keys):
+ values = self.client.get_multi(keys)
+ return [
+ NO_VALUE if key not in values
+ else values[key] for key in keys
+ ]
+
+ def set(self, key, value):
+ self.client.set(key, value)
+
+ def set_multi(self, mapping):
+ self.client.set_multi(mapping)
+
+ def delete(self, key):
+ self.client.delete(key)
+
+ def delete_multi(self, keys):
+ self.client.delete_multi(keys)
+
+
+class MongoApi(object):
+ """Class handling MongoDB specific functionality.
+
+ This class uses PyMongo APIs internally to create database connection
+ with configured pool size, ensures unique index on key, does database
+ authentication and ensure TTL collection index if configured so.
+ This class also serves as handle to cache collection for dogpile cache
+ APIs.
+
+ In a single deployment, multiple cache configuration can be defined. In
+ that case of multiple cache collections usage, db client connection pool
+ is shared when cache collections are within same database.
+ """
+
+ # class level attributes for re-use of db client connection and collection
+ _DB = {} # dict of db_name: db connection reference
+ _MONGO_COLLS = {} # dict of cache_collection : db collection reference
+
+ def __init__(self, arguments):
+ self._init_args(arguments)
+ self._data_manipulator = None
+
+ def _init_args(self, arguments):
+ """Helper logic for collecting and parsing MongoDB specific arguments.
+
+ The arguments passed in are separated out in connection specific
+ setting and rest of arguments are passed to create/update/delete
+ db operations.
+ """
+ self.conn_kwargs = {} # connection specific arguments
+
+ self.hosts = arguments.pop('db_hosts', None)
+ if self.hosts is None:
+ msg = _('db_hosts value is required')
+ raise exception.ValidationError(message=msg)
+
+ self.db_name = arguments.pop('db_name', None)
+ if self.db_name is None:
+ msg = _('database db_name is required')
+ raise exception.ValidationError(message=msg)
+
+ self.cache_collection = arguments.pop('cache_collection', None)
+ if self.cache_collection is None:
+ msg = _('cache_collection name is required')
+ raise exception.ValidationError(message=msg)
+
+ self.username = arguments.pop('username', None)
+ self.password = arguments.pop('password', None)
+ self.max_pool_size = arguments.pop('max_pool_size', 10)
+
+ self.w = arguments.pop('w', -1)
+ try:
+ self.w = int(self.w)
+ except ValueError:
+ msg = _('integer value expected for w (write concern attribute)')
+ raise exception.ValidationError(message=msg)
+
+ self.read_preference = arguments.pop('read_preference', None)
+
+ self.use_replica = arguments.pop('use_replica', False)
+ if self.use_replica:
+ if arguments.get('replicaset_name') is None:
+ msg = _('replicaset_name required when use_replica is True')
+ raise exception.ValidationError(message=msg)
+ self.replicaset_name = arguments.get('replicaset_name')
+
+ self.son_manipulator = arguments.pop('son_manipulator', None)
+
+ # set if mongo collection needs to be TTL type.
+ # This needs to be max ttl for any cache entry.
+ # By default, -1 means don't use TTL collection.
+ # With ttl set, it creates related index and have doc_date field with
+ # needed expiration interval
+ self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
+ try:
+ self.ttl_seconds = int(self.ttl_seconds)
+ except ValueError:
+ msg = _('integer value expected for mongo_ttl_seconds')
+ raise exception.ValidationError(message=msg)
+
+ self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
+ if self.conn_kwargs['ssl']:
+ ssl_keyfile = arguments.pop('ssl_keyfile', None)
+ ssl_certfile = arguments.pop('ssl_certfile', None)
+ ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
+ ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
+ if ssl_keyfile:
+ self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
+ if ssl_certfile:
+ self.conn_kwargs['ssl_certfile'] = ssl_certfile
+ if ssl_ca_certs:
+ self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
+ if ssl_cert_reqs:
+ self.conn_kwargs['ssl_cert_reqs'] = (
+ self._ssl_cert_req_type(ssl_cert_reqs))
+
+ # rest of arguments are passed to mongo crud calls
+ self.meth_kwargs = arguments
+
+ def _ssl_cert_req_type(self, req_type):
+ try:
+ import ssl
+ except ImportError:
+ raise exception.ValidationError(_('no ssl support available'))
+ req_type = req_type.upper()
+ try:
+ return {
+ 'NONE': ssl.CERT_NONE,
+ 'OPTIONAL': ssl.CERT_OPTIONAL,
+ 'REQUIRED': ssl.CERT_REQUIRED
+ }[req_type]
+ except KeyError:
+ msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
+ '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
+ raise exception.ValidationError(message=msg)
+
+ def _get_db(self):
+ # defer imports until backend is used
+ global pymongo
+ import pymongo
+ if self.use_replica:
+ connection = pymongo.MongoReplicaSetClient(
+ host=self.hosts, replicaSet=self.replicaset_name,
+ max_pool_size=self.max_pool_size, **self.conn_kwargs)
+ else: # used for standalone node or mongos in sharded setup
+ connection = pymongo.MongoClient(
+ host=self.hosts, max_pool_size=self.max_pool_size,
+ **self.conn_kwargs)
+
+ database = getattr(connection, self.db_name)
+
+ self._assign_data_mainpulator()
+ database.add_son_manipulator(self._data_manipulator)
+ if self.username and self.password:
+ database.authenticate(self.username, self.password)
+ return database
+
+ def _assign_data_mainpulator(self):
+ if self._data_manipulator is None:
+ if self.son_manipulator:
+ self._data_manipulator = importutils.import_object(
+ self.son_manipulator)
+ else:
+ self._data_manipulator = BaseTransform()
+
+ def _get_doc_date(self):
+ if self.ttl_seconds > 0:
+ expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
+ doc_date = timeutils.utcnow() + expire_delta
+ else:
+ doc_date = timeutils.utcnow()
+ return doc_date
+
+ def get_cache_collection(self):
+ if self.cache_collection not in self._MONGO_COLLS:
+ global pymongo
+ import pymongo
+ # re-use db client connection if already defined as part of
+ # earlier dogpile cache configuration
+ if self.db_name not in self._DB:
+ self._DB[self.db_name] = self._get_db()
+ coll = getattr(self._DB[self.db_name], self.cache_collection)
+
+ self._assign_data_mainpulator()
+ if self.read_preference:
+ self.read_preference = pymongo.read_preferences.mongos_enum(
+ self.read_preference)
+ coll.read_preference = self.read_preference
+ if self.w > -1:
+ coll.write_concern['w'] = self.w
+ if self.ttl_seconds > 0:
+ kwargs = {'expireAfterSeconds': self.ttl_seconds}
+ coll.ensure_index('doc_date', cache_for=5, **kwargs)
+ else:
+ self._validate_ttl_index(coll, self.cache_collection,
+ self.ttl_seconds)
+ self._MONGO_COLLS[self.cache_collection] = coll
+
+ return self._MONGO_COLLS[self.cache_collection]
+
+ def _get_cache_entry(self, key, value, meta, doc_date):
+ """MongoDB cache data representation.
+
+ Storing cache key as ``_id`` field as MongoDB by default creates
+ unique index on this field. So no need to create separate field and
+ index for storing cache key. Cache data has additional ``doc_date``
+ field for MongoDB TTL collection support.
+ """
+ return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
+
+ def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
+ """Checks if existing TTL index is removed on a collection.
+
+ This logs warning when existing collection has TTL index defined and
+ new cache configuration tries to disable index with
+ ``mongo_ttl_seconds < 0``. In that case, existing index needs
+ to be addressed first to make new configuration effective.
+ Refer to MongoDB documentation around TTL index for further details.
+ """
+ indexes = collection.index_information()
+ for indx_name, index_data in six.iteritems(indexes):
+ if all(k in index_data for k in ('key', 'expireAfterSeconds')):
+ existing_value = index_data['expireAfterSeconds']
+ fld_present = 'doc_date' in index_data['key'][0]
+ if fld_present and existing_value > -1 and ttl_seconds < 1:
+ msg = _LW('TTL index already exists on db collection '
+ '<%(c_name)s>, remove index <%(indx_name)s> '
+ 'first to make updated mongo_ttl_seconds value '
+ 'to be effective')
+ LOG.warn(msg, {'c_name': coll_name,
+ 'indx_name': indx_name})
+
+ def get(self, key):
+ critieria = {'_id': key}
+ result = self.get_cache_collection().find_one(spec_or_id=critieria,
+ **self.meth_kwargs)
+ if result:
+ return result['value']
+ else:
+ return None
+
+ def get_multi(self, keys):
+ db_results = self._get_results_as_dict(keys)
+ return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
+
+ def _get_results_as_dict(self, keys):
+ critieria = {'_id': {'$in': keys}}
+ db_results = self.get_cache_collection().find(spec=critieria,
+ **self.meth_kwargs)
+ return {doc['_id']: doc for doc in db_results}
+
+ def set(self, key, value):
+ doc_date = self._get_doc_date()
+ ref = self._get_cache_entry(key, value.payload, value.metadata,
+ doc_date)
+ spec = {'_id': key}
+ # find and modify does not have manipulator support
+ # so need to do conversion as part of input document
+ ref = self._data_manipulator.transform_incoming(ref, self)
+ self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
+ **self.meth_kwargs)
+
+ def set_multi(self, mapping):
+ """Insert multiple documents specified as key, value pairs.
+
+ In this case, multiple documents can be added via insert provided they
+ do not exist.
+ Update of multiple existing documents is done one by one
+ """
+ doc_date = self._get_doc_date()
+ insert_refs = []
+ update_refs = []
+ existing_docs = self._get_results_as_dict(mapping.keys())
+ for key, value in mapping.items():
+ ref = self._get_cache_entry(key, value.payload, value.metadata,
+ doc_date)
+ if key in existing_docs:
+ ref['_id'] = existing_docs[key]['_id']
+ update_refs.append(ref)
+ else:
+ insert_refs.append(ref)
+ if insert_refs:
+ self.get_cache_collection().insert(insert_refs, manipulate=True,
+ **self.meth_kwargs)
+ for upd_doc in update_refs:
+ self.get_cache_collection().save(upd_doc, manipulate=True,
+ **self.meth_kwargs)
+
+ def delete(self, key):
+ critieria = {'_id': key}
+ self.get_cache_collection().remove(spec_or_id=critieria,
+ **self.meth_kwargs)
+
+ def delete_multi(self, keys):
+ critieria = {'_id': {'$in': keys}}
+ self.get_cache_collection().remove(spec_or_id=critieria,
+ **self.meth_kwargs)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class AbstractManipulator(object):
+ """Abstract class with methods which need to be implemented for custom
+ manipulation.
+
+ Adding this as a base class for :class:`.BaseTransform` instead of adding
+ import dependency of pymongo specific class i.e.
+ `pymongo.son_manipulator.SONManipulator` and using that as base class.
+ This is done to avoid pymongo dependency if MongoDB backend is not used.
+ """
+ @abc.abstractmethod
+ def transform_incoming(self, son, collection):
+ """Used while saving data to MongoDB.
+
+ :param son: the SON object to be inserted into the database
+ :param collection: the collection the object is being inserted into
+
+ :returns: transformed SON object
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def transform_outgoing(self, son, collection):
+ """Used while reading data from MongoDB.
+
+ :param son: the SON object being retrieved from the database
+ :param collection: the collection this object was stored in
+
+ :returns: transformed SON object
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ def will_copy(self):
+ """Will this SON manipulator make a copy of the incoming document?
+
+ Derived classes that do need to make a copy should override this
+ method, returning `True` instead of `False`.
+
+ :returns: boolean
+ """
+ return False
+
+
+class BaseTransform(AbstractManipulator):
+ """Base transformation class to store and read dogpile cached data
+ from MongoDB.
+
+ This is needed as dogpile internally stores data as a custom class
+ i.e. dogpile.cache.api.CachedValue
+
+ Note: Custom manipulator needs to always override ``transform_incoming``
+ and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
+ checks that overridden method in instance and its super are different.
+ """
+
+ def transform_incoming(self, son, collection):
+ """Used while saving data to MongoDB."""
+ for (key, value) in son.items():
+ if isinstance(value, api.CachedValue):
+ son[key] = value.payload # key is 'value' field here
+ son['meta'] = value.metadata
+ elif isinstance(value, dict): # Make sure we recurse into sub-docs
+ son[key] = self.transform_incoming(value, collection)
+ return son
+
+ def transform_outgoing(self, son, collection):
+ """Used while reading data from MongoDB."""
+ metadata = None
+ # make sure its top level dictionary with all expected fields names
+ # present
+ if isinstance(son, dict) and all(k in son for k in
+ ('_id', 'value', 'meta', 'doc_date')):
+ payload = son.pop('value', None)
+ metadata = son.pop('meta', None)
+ for (key, value) in son.items():
+ if isinstance(value, dict):
+ son[key] = self.transform_outgoing(value, collection)
+ if metadata is not None:
+ son['value'] = api.CachedValue(payload, metadata)
+ return son
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
new file mode 100644
index 00000000..38329c94
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/backends/noop.py
@@ -0,0 +1,49 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dogpile.cache import api
+
+
+NO_VALUE = api.NO_VALUE
+
+
+class NoopCacheBackend(api.CacheBackend):
+ """A no op backend as a default caching backend.
+
+ The no op backend is provided as the default caching backend for keystone
+ to ensure that ``dogpile.cache.memory`` is not used in any real-world
+ circumstances unintentionally. ``dogpile.cache.memory`` does not have a
+ mechanism to cleanup it's internal dict and therefore could cause run-away
+ memory utilization.
+ """
+ def __init__(self, *args):
+ return
+
+ def get(self, key):
+ return NO_VALUE
+
+ def get_multi(self, keys):
+ return [NO_VALUE for x in keys]
+
+ def set(self, key, value):
+ return
+
+ def set_multi(self, mapping):
+ return
+
+ def delete(self, key):
+ return
+
+ def delete_multi(self, keys):
+ return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
new file mode 100644
index 00000000..306587b3
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/core.py
@@ -0,0 +1,308 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone Caching Layer Implementation."""
+
+import dogpile.cache
+from dogpile.cache import proxy
+from dogpile.cache import util
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+
+from keystone import exception
+from keystone.i18n import _, _LE
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+make_region = dogpile.cache.make_region
+
+dogpile.cache.register_backend(
+ 'keystone.common.cache.noop',
+ 'keystone.common.cache.backends.noop',
+ 'NoopCacheBackend')
+
+dogpile.cache.register_backend(
+ 'keystone.cache.mongo',
+ 'keystone.common.cache.backends.mongo',
+ 'MongoCacheBackend')
+
+dogpile.cache.register_backend(
+ 'keystone.cache.memcache_pool',
+ 'keystone.common.cache.backends.memcache_pool',
+ 'PooledMemcachedBackend')
+
+
+class DebugProxy(proxy.ProxyBackend):
+ """Extra Logging ProxyBackend."""
+ # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
+ # a clean description of the information. Without use of repr, it might
+ # be possible to run into encode/decode error(s). For logging/debugging
+ # purposes encode/decode is irrelevant and we should be looking at the
+ # data exactly as it stands.
+
+ def get(self, key):
+ value = self.proxied.get(key)
+ LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
+ {'key': key, 'value': value})
+ return value
+
+ def get_multi(self, keys):
+ values = self.proxied.get_multi(keys)
+ LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
+ {'keys': keys, 'values': values})
+ return values
+
+ def set(self, key, value):
+ LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
+ {'key': key, 'value': value})
+ return self.proxied.set(key, value)
+
+ def set_multi(self, keys):
+ LOG.debug('CACHE_SET_MULTI: "%r"', keys)
+ self.proxied.set_multi(keys)
+
+ def delete(self, key):
+ self.proxied.delete(key)
+ LOG.debug('CACHE_DELETE: "%r"', key)
+
+ def delete_multi(self, keys):
+ LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
+ self.proxied.delete_multi(keys)
+
+
+def build_cache_config():
+ """Build the cache region dictionary configuration.
+
+ :returns: dict
+ """
+ prefix = CONF.cache.config_prefix
+ conf_dict = {}
+ conf_dict['%s.backend' % prefix] = CONF.cache.backend
+ conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
+ for argument in CONF.cache.backend_argument:
+ try:
+ (argname, argvalue) = argument.split(':', 1)
+ except ValueError:
+ msg = _LE('Unable to build cache config-key. Expected format '
+ '"<argname>:<value>". Skipping unknown format: %s')
+ LOG.error(msg, argument)
+ continue
+
+ arg_key = '.'.join([prefix, 'arguments', argname])
+ conf_dict[arg_key] = argvalue
+
+ LOG.debug('Keystone Cache Config: %s', conf_dict)
+ # NOTE(yorik-sar): these arguments will be used for memcache-related
+ # backends. Use setdefault for url to support old-style setting through
+ # backend_argument=url:127.0.0.1:11211
+ conf_dict.setdefault('%s.arguments.url' % prefix,
+ CONF.cache.memcache_servers)
+ for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+ 'pool_unused_timeout', 'pool_connection_get_timeout'):
+ value = getattr(CONF.cache, 'memcache_' + arg)
+ conf_dict['%s.arguments.%s' % (prefix, arg)] = value
+
+ return conf_dict
+
+
+def configure_cache_region(region):
+ """Configure a cache region.
+
+ :param region: optional CacheRegion object, if not provided a new region
+ will be instantiated
+ :raises: exception.ValidationError
+ :returns: dogpile.cache.CacheRegion
+ """
+ if not isinstance(region, dogpile.cache.CacheRegion):
+ raise exception.ValidationError(
+ _('region not type dogpile.cache.CacheRegion'))
+
+ if not region.is_configured:
+ # NOTE(morganfainberg): this is how you tell if a region is configured.
+ # There is a request logged with dogpile.cache upstream to make this
+ # easier / less ugly.
+
+ config_dict = build_cache_config()
+ region.configure_from_config(config_dict,
+ '%s.' % CONF.cache.config_prefix)
+
+ if CONF.cache.debug_cache_backend:
+ region.wrap(DebugProxy)
+
+ # NOTE(morganfainberg): if the backend requests the use of a
+ # key_mangler, we should respect that key_mangler function. If a
+ # key_mangler is not defined by the backend, use the sha1_mangle_key
+ # mangler provided by dogpile.cache. This ensures we always use a fixed
+ # size cache-key.
+ if region.key_mangler is None:
+ region.key_mangler = util.sha1_mangle_key
+
+ for class_path in CONF.cache.proxies:
+ # NOTE(morganfainberg): if we have any proxy wrappers, we should
+ # ensure they are added to the cache region's backend. Since
+ # configure_from_config doesn't handle the wrap argument, we need
+ # to manually add the Proxies. For information on how the
+ # ProxyBackends work, see the dogpile.cache documents on
+ # "changing-backend-behavior"
+ cls = importutils.import_class(class_path)
+ LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
+ region.wrap(cls)
+
+ return region
+
+
+def get_should_cache_fn(section):
+ """Build a function that returns a config section's caching status.
+
+ For any given driver in keystone that has caching capabilities, a boolean
+ config option for that driver's section (e.g. ``token``) should exist and
+ default to ``True``. This function will use that value to tell the caching
+ decorator if caching for that driver is enabled. To properly use this
+ with the decorator, pass this function the configuration section and assign
+ the result to a variable. Pass the new variable to the caching decorator
+ as the named argument ``should_cache_fn``. e.g.::
+
+ from keystone.common import cache
+
+ SHOULD_CACHE = cache.get_should_cache_fn('token')
+
+ @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
+ def function(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :returns: function reference
+ """
+ def should_cache(value):
+ if not CONF.cache.enabled:
+ return False
+ conf_group = getattr(CONF, section)
+ return getattr(conf_group, 'caching', True)
+ return should_cache
+
+
+def get_expiration_time_fn(section):
+ """Build a function that returns a config section's expiration time status.
+
+ For any given driver in keystone that has caching capabilities, an int
+ config option called ``cache_time`` for that driver's section
+ (e.g. ``token``) should exist and typically default to ``None``. This
+ function will use that value to tell the caching decorator of the TTL
+ override for caching the resulting objects. If the value of the config
+ option is ``None`` the default value provided in the
+ ``[cache] expiration_time`` option will be used by the decorator. The
+ default may be set to something other than ``None`` in cases where the
+ caching TTL should not be tied to the global default(s) (e.g.
+ revocation_list changes very infrequently and can be cached for >1h by
+ default).
+
+ To properly use this with the decorator, pass this function the
+ configuration section and assign the result to a variable. Pass the new
+ variable to the caching decorator as the named argument
+ ``expiration_time``. e.g.::
+
+ from keystone.common import cache
+
+ EXPIRATION_TIME = cache.get_expiration_time_fn('token')
+
+ @cache.on_arguments(expiration_time=EXPIRATION_TIME)
+ def function(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :rtype: function reference
+ """
+ def get_expiration_time():
+ conf_group = getattr(CONF, section)
+ return getattr(conf_group, 'cache_time', None)
+ return get_expiration_time
+
+
+def key_generate_to_str(s):
+ # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
+ # to stringify and handle the Unicode error explicitly as needed.
+ try:
+ return str(s)
+ except UnicodeEncodeError:
+ return s.encode('utf-8')
+
+
+def function_key_generator(namespace, fn, to_str=key_generate_to_str):
+ # NOTE(morganfainberg): This wraps dogpile.cache's default
+ # function_key_generator to change the default to_str mechanism.
+ return util.function_key_generator(namespace, fn, to_str=to_str)
+
+
+REGION = dogpile.cache.make_region(
+ function_key_generator=function_key_generator)
+on_arguments = REGION.cache_on_arguments
+
+
+def get_memoization_decorator(section, expiration_section=None):
+ """Build a function based on the `on_arguments` decorator for the section.
+
+ For any given driver in Keystone that has caching capabilities, a
+ pair of functions is required to properly determine the status of the
+ caching capabilities (a toggle to indicate caching is enabled and any
+ override of the default TTL for cached data). This function will return
+ an object that has the memoization decorator ``on_arguments``
+ pre-configured for the driver.
+
+ Example usage::
+
+ from keystone.common import cache
+
+ MEMOIZE = cache.get_memoization_decorator(section='token')
+
+ @MEMOIZE
+ def function(arg1, arg2):
+ ...
+
+
+ ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
+ section='token', expiration_section='revoke')
+
+ @ALTERNATE_MEMOIZE
+ def function2(arg1, arg2):
+ ...
+
+ :param section: name of the configuration section to examine
+ :type section: string
+ :param expiration_section: name of the configuration section to examine
+ for the expiration option. This will fall back
+ to using ``section`` if the value is unspecified
+ or ``None``
+ :type expiration_section: string
+ :rtype: function reference
+ """
+ if expiration_section is None:
+ expiration_section = section
+ should_cache = get_should_cache_fn(section)
+ expiration_time = get_expiration_time_fn(expiration_section)
+
+ memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
+ expiration_time=expiration_time)
+
+ # Make sure the actual "should_cache" and "expiration_time" methods are
+ # available. This is potentially interesting/useful to pre-seed cache
+ # values.
+ memoize.should_cache = should_cache
+ memoize.get_expiration_time = expiration_time
+
+ return memoize