aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/token/persistence
diff options
context:
space:
mode:
authorWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
committerWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
commitb8c756ecdd7cced1db4300935484e8c83701c82e (patch)
tree87e51107d82b217ede145de9d9d59e2100725bd7 /keystone-moon/keystone/token/persistence
parentc304c773bae68fb854ed9eab8fb35c4ef17cf136 (diff)
migrate moon code from github to opnfv
Change-Id: Ice53e368fd1114d56a75271aa9f2e598e3eba604 Signed-off-by: WuKong <rebirthmonkey@gmail.com>
Diffstat (limited to 'keystone-moon/keystone/token/persistence')
-rw-r--r--keystone-moon/keystone/token/persistence/__init__.py16
-rw-r--r--keystone-moon/keystone/token/persistence/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/token/persistence/backends/kvs.py357
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache.py33
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache_pool.py28
-rw-r--r--keystone-moon/keystone/token/persistence/backends/sql.py279
-rw-r--r--keystone-moon/keystone/token/persistence/core.py361
7 files changed, 1074 insertions, 0 deletions
diff --git a/keystone-moon/keystone/token/persistence/__init__.py b/keystone-moon/keystone/token/persistence/__init__.py
new file mode 100644
index 00000000..29ad5653
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/__init__.py
@@ -0,0 +1,16 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token.persistence.core import * # noqa
+
+
+__all__ = ['Manager', 'Driver', 'backends']
diff --git a/keystone-moon/keystone/token/persistence/backends/__init__.py b/keystone-moon/keystone/token/persistence/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/__init__.py
diff --git a/keystone-moon/keystone/token/persistence/backends/kvs.py b/keystone-moon/keystone/token/persistence/backends/kvs.py
new file mode 100644
index 00000000..b4807bf1
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/kvs.py
@@ -0,0 +1,357 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import kvs
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Token(token.persistence.Driver):
+ """KeyValueStore backend for tokens.
+
+ This is the base implementation for any/all key-value-stores (e.g.
+ memcached) for the Token backend. It is recommended to only use the base
+ in-memory implementation for testing purposes.
+ """
+
+ revocation_key = 'revocation-list'
+ kvs_backend = 'openstack.kvs.Memory'
+
+ def __init__(self, backing_store=None, **kwargs):
+ super(Token, self).__init__()
+ self._store = kvs.get_key_value_store('token-driver')
+ if backing_store is not None:
+ self.kvs_backend = backing_store
+ if not self._store.is_configured:
+ # Do not re-configure the backend if the store has been initialized
+ self._store.configure(backing_store=self.kvs_backend, **kwargs)
+ if self.__class__ == Token:
+ # NOTE(morganfainberg): Only warn if the base KVS implementation
+ # is instantiated.
+ LOG.warn(_LW('It is recommended to only use the base '
+ 'key-value-store implementation for the token driver '
+ 'for testing purposes. Please use '
+ 'keystone.token.persistence.backends.memcache.Token '
+ 'or keystone.token.persistence.backends.sql.Token '
+ 'instead.'))
+
+ def _prefix_token_id(self, token_id):
+ return 'token-%s' % token_id.encode('utf-8')
+
+ def _prefix_user_id(self, user_id):
+ return 'usertokens-%s' % user_id.encode('utf-8')
+
+ def _get_key_or_default(self, key, default=None):
+ try:
+ return self._store.get(key)
+ except exception.NotFound:
+ return default
+
+ def _get_key(self, key):
+ return self._store.get(key)
+
+ def _set_key(self, key, value, lock=None):
+ self._store.set(key, value, lock)
+
+ def _delete_key(self, key):
+ return self._store.delete(key)
+
+ def get_token(self, token_id):
+ ptk = self._prefix_token_id(token_id)
+ try:
+ token_ref = self._get_key(ptk)
+ except exception.NotFound:
+ raise exception.TokenNotFound(token_id=token_id)
+
+ return token_ref
+
+ def create_token(self, token_id, data):
+ """Create a token by id and data.
+
+ It is assumed the caller has performed data validation on the "data"
+ parameter.
+ """
+ data_copy = copy.deepcopy(data)
+ ptk = self._prefix_token_id(token_id)
+ if not data_copy.get('expires'):
+ data_copy['expires'] = provider.default_expire_time()
+ if not data_copy.get('user_id'):
+ data_copy['user_id'] = data_copy['user']['id']
+
+ # NOTE(morganfainberg): for ease of manipulating the data without
+ # concern about the backend, always store the value(s) in the
+ # index as the isotime (string) version so this is where the string is
+ # built.
+ expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
+
+ self._set_key(ptk, data_copy)
+ user_id = data['user']['id']
+ user_key = self._prefix_user_id(user_id)
+ self._update_user_token_list(user_key, token_id, expires_str)
+ if CONF.trust.enabled and data.get('trust_id'):
+ # NOTE(morganfainberg): If trusts are enabled and this is a trust
+ # scoped token, we add the token to the trustee list as well. This
+ # allows password changes of the trustee to also expire the token.
+ # There is no harm in placing the token in multiple lists, as
+ # _list_tokens is smart enough to handle almost any case of
+ # valid/invalid/expired for a given token.
+ token_data = data_copy['token_data']
+ if data_copy['token_version'] == token.provider.V2:
+ trustee_user_id = token_data['access']['trust'][
+ 'trustee_user_id']
+ elif data_copy['token_version'] == token.provider.V3:
+ trustee_user_id = token_data['OS-TRUST:trust'][
+ 'trustee_user_id']
+ else:
+ raise exception.UnsupportedTokenVersionException(
+ _('Unknown token version %s') %
+ data_copy.get('token_version'))
+
+ trustee_key = self._prefix_user_id(trustee_user_id)
+ self._update_user_token_list(trustee_key, token_id, expires_str)
+
+ return data_copy
+
+ def _get_user_token_list_with_expiry(self, user_key):
+ """Return a list of tuples in the format (token_id, token_expiry) for
+ the user_key.
+ """
+ return self._get_key_or_default(user_key, default=[])
+
+ def _get_user_token_list(self, user_key):
+ """Return a list of token_ids for the user_key."""
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ # Each element is a tuple of (token_id, token_expiry). Most code does
+ # not care about the expiry, it is stripped out and only a
+ # list of token_ids are returned.
+ return [t[0] for t in token_list]
+
+ def _update_user_token_list(self, user_key, token_id, expires_isotime_str):
+ current_time = self._get_current_time()
+ revoked_token_list = set([t['id'] for t in
+ self.list_revoked_tokens()])
+
+ with self._store.get_lock(user_key) as lock:
+ filtered_list = []
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ for item in token_list:
+ try:
+ item_id, expires = self._format_token_index_item(item)
+ except (ValueError, TypeError):
+ # NOTE(morganfainberg): Skip on expected errors
+ # possibilities from the `_format_token_index_item` method.
+ continue
+
+ if expires < current_time:
+ LOG.debug(('Token `%(token_id)s` is expired, removing '
+ 'from `%(user_key)s`.'),
+ {'token_id': item_id, 'user_key': user_key})
+ continue
+
+ if item_id in revoked_token_list:
+ # NOTE(morganfainberg): If the token has been revoked, it
+ # can safely be removed from this list. This helps to keep
+ # the user_token_list as reasonably small as possible.
+ LOG.debug(('Token `%(token_id)s` is revoked, removing '
+ 'from `%(user_key)s`.'),
+ {'token_id': item_id, 'user_key': user_key})
+ continue
+ filtered_list.append(item)
+ filtered_list.append((token_id, expires_isotime_str))
+ self._set_key(user_key, filtered_list, lock)
+ return filtered_list
+
+ def _get_current_time(self):
+ return timeutils.normalize_time(timeutils.utcnow())
+
+ def _add_to_revocation_list(self, data, lock):
+ filtered_list = []
+ revoked_token_data = {}
+
+ current_time = self._get_current_time()
+ expires = data['expires']
+
+ if isinstance(expires, six.string_types):
+ expires = timeutils.parse_isotime(expires)
+
+ expires = timeutils.normalize_time(expires)
+
+ if expires < current_time:
+ LOG.warning(_LW('Token `%s` is expired, not adding to the '
+ 'revocation list.'), data['id'])
+ return
+
+ revoked_token_data['expires'] = timeutils.isotime(expires,
+ subsecond=True)
+ revoked_token_data['id'] = data['id']
+
+ token_list = self._get_key_or_default(self.revocation_key, default=[])
+ if not isinstance(token_list, list):
+ # NOTE(morganfainberg): In the case that the revocation list is not
+ # in a format we understand, reinitialize it. This is an attempt to
+ # not allow the revocation list to be completely broken if
+ # somehow the key is changed outside of keystone (e.g. memcache
+ # that is shared by multiple applications). Logging occurs at error
+ # level so that the cloud administrators have some awareness that
+ # the revocation_list needed to be cleared out. In all, this should
+ # be recoverable. Keystone cannot control external applications
+ # from changing a key in some backends, however, it is possible to
+ # gracefully handle and notify of this event.
+ LOG.error(_LE('Reinitializing revocation list due to error '
+ 'in loading revocation list from backend. '
+ 'Expected `list` type got `%(type)s`. Old '
+ 'revocation list data: %(list)r'),
+ {'type': type(token_list), 'list': token_list})
+ token_list = []
+
+ # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
+ # to keep the list of tokens revoked at the minimum.
+ for token_data in token_list:
+ try:
+ expires_at = timeutils.normalize_time(
+ timeutils.parse_isotime(token_data['expires']))
+ except ValueError:
+ LOG.warning(_LW('Removing `%s` from revocation list due to '
+ 'invalid expires data in revocation list.'),
+ token_data.get('id', 'INVALID_TOKEN_DATA'))
+ continue
+ if expires_at > current_time:
+ filtered_list.append(token_data)
+ filtered_list.append(revoked_token_data)
+ self._set_key(self.revocation_key, filtered_list, lock)
+
+ def delete_token(self, token_id):
+ # Test for existence
+ with self._store.get_lock(self.revocation_key) as lock:
+ data = self.get_token(token_id)
+ ptk = self._prefix_token_id(token_id)
+ result = self._delete_key(ptk)
+ self._add_to_revocation_list(data, lock)
+ return result
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ return super(Token, self).delete_tokens(
+ user_id=user_id,
+ tenant_id=tenant_id,
+ trust_id=trust_id,
+ consumer_id=consumer_id,
+ )
+
+ def _format_token_index_item(self, item):
+ try:
+ token_id, expires = item
+ except (TypeError, ValueError):
+ LOG.debug(('Invalid token entry expected tuple of '
+ '`(<token_id>, <expires>)` got: `%(item)r`'),
+ dict(item=item))
+ raise
+
+ try:
+ expires = timeutils.normalize_time(
+ timeutils.parse_isotime(expires))
+ except ValueError:
+ LOG.debug(('Invalid expires time on token `%(token_id)s`:'
+ ' %(expires)r'),
+ dict(token_id=token_id, expires=expires))
+ raise
+ return token_id, expires
+
+ def _token_match_tenant(self, token_ref, tenant_id):
+ if token_ref.get('tenant'):
+ return token_ref['tenant'].get('id') == tenant_id
+ return False
+
+ def _token_match_trust(self, token_ref, trust_id):
+ if not token_ref.get('trust_id'):
+ return False
+ return token_ref['trust_id'] == trust_id
+
+ def _token_match_consumer(self, token_ref, consumer_id):
+ try:
+ oauth = token_ref['token_data']['token']['OS-OAUTH1']
+ return oauth.get('consumer_id') == consumer_id
+ except KeyError:
+ return False
+
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ # This function is used to generate the list of tokens that should be
+ # revoked when revoking by token identifiers. This approach will be
+ # deprecated soon, probably in the Juno release. Setting revoke_by_id
+ # to False indicates that this kind of recording should not be
+ # performed. In order to test the revocation events, tokens shouldn't
+ # be deleted from the backends. This check ensures that tokens are
+ # still recorded.
+ if not CONF.token.revoke_by_id:
+ return []
+ tokens = []
+ user_key = self._prefix_user_id(user_id)
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ current_time = self._get_current_time()
+ for item in token_list:
+ try:
+ token_id, expires = self._format_token_index_item(item)
+ except (TypeError, ValueError):
+ # NOTE(morganfainberg): Skip on expected error possibilities
+ # from the `_format_token_index_item` method.
+ continue
+
+ if expires < current_time:
+ continue
+
+ try:
+ token_ref = self.get_token(token_id)
+ except exception.TokenNotFound:
+ # NOTE(morganfainberg): Token doesn't exist, skip it.
+ continue
+ if token_ref:
+ if tenant_id is not None:
+ if not self._token_match_tenant(token_ref, tenant_id):
+ continue
+ if trust_id is not None:
+ if not self._token_match_trust(token_ref, trust_id):
+ continue
+ if consumer_id is not None:
+ if not self._token_match_consumer(token_ref, consumer_id):
+ continue
+
+ tokens.append(token_id)
+ return tokens
+
+ def list_revoked_tokens(self):
+ revoked_token_list = self._get_key_or_default(self.revocation_key,
+ default=[])
+ if isinstance(revoked_token_list, list):
+ return revoked_token_list
+ return []
+
+ def flush_expired_tokens(self):
+ """Archive or delete tokens that have expired."""
+ raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache.py b/keystone-moon/keystone/token/persistence/backends/memcache.py
new file mode 100644
index 00000000..03f27eaf
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/memcache.py
@@ -0,0 +1,33 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import kvs
+
+
+CONF = cfg.CONF
+
+
+class Token(kvs.Token):
+ kvs_backend = 'openstack.kvs.Memcached'
+ memcached_backend = 'memcached'
+
+ def __init__(self, *args, **kwargs):
+ kwargs['memcached_backend'] = self.memcached_backend
+ kwargs['no_expiry_keys'] = [self.revocation_key]
+ kwargs['memcached_expire_time'] = CONF.token.expiration
+ kwargs['url'] = CONF.memcache.servers
+ super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache_pool.py b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
new file mode 100644
index 00000000..55f9e8ae
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import memcache
+
+
+CONF = cfg.CONF
+
+
+class Token(memcache.Token):
+ memcached_backend = 'pooled_memcached'
+
+ def __init__(self, *args, **kwargs):
+ for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+ 'pool_unused_timeout', 'pool_connection_get_timeout'):
+ kwargs[arg] = getattr(CONF.memcache, arg)
+ super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/sql.py b/keystone-moon/keystone/token/persistence/backends/sql.py
new file mode 100644
index 00000000..fc70fb92
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/sql.py
@@ -0,0 +1,279 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import functools
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _LI
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class TokenModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'token'
+ attributes = ['id', 'expires', 'user_id', 'trust_id']
+ id = sql.Column(sql.String(64), primary_key=True)
+ expires = sql.Column(sql.DateTime(), default=None)
+ extra = sql.Column(sql.JsonBlob())
+ valid = sql.Column(sql.Boolean(), default=True, nullable=False)
+ user_id = sql.Column(sql.String(64))
+ trust_id = sql.Column(sql.String(64))
+ __table_args__ = (
+ sql.Index('ix_token_expires', 'expires'),
+ sql.Index('ix_token_expires_valid', 'expires', 'valid'),
+ sql.Index('ix_token_user_id', 'user_id'),
+ sql.Index('ix_token_trust_id', 'trust_id')
+ )
+
+
+def _expiry_range_batched(session, upper_bound_func, batch_size):
+ """Returns the stop point of the next batch for expiration.
+
+ Return the timestamp of the next token that is `batch_size` rows from
+ being the oldest expired token.
+ """
+
+ # This expiry strategy splits the tokens into roughly equal sized batches
+ # to be deleted. It does this by finding the timestamp of a token
+ # `batch_size` rows from the oldest token and yielding that to the caller.
+ # It's expected that the caller will then delete all rows with a timestamp
+ # equal to or older than the one yielded. This may delete slightly more
+ # tokens than the batch_size, but that should be ok in almost all cases.
+ LOG.debug('Token expiration batch size: %d', batch_size)
+ query = session.query(TokenModel.expires)
+ query = query.filter(TokenModel.expires < upper_bound_func())
+ query = query.order_by(TokenModel.expires)
+ query = query.offset(batch_size - 1)
+ query = query.limit(1)
+ while True:
+ try:
+ next_expiration = query.one()[0]
+ except sql.NotFound:
+ # There are less than `batch_size` rows remaining, so fall
+ # through to the normal delete
+ break
+ yield next_expiration
+ yield upper_bound_func()
+
+
+def _expiry_range_all(session, upper_bound_func):
+ """Expires all tokens in one pass."""
+
+ yield upper_bound_func()
+
+
+class Token(token.persistence.Driver):
+ # Public interface
+ def get_token(self, token_id):
+ if token_id is None:
+ raise exception.TokenNotFound(token_id=token_id)
+ session = sql.get_session()
+ token_ref = session.query(TokenModel).get(token_id)
+ if not token_ref or not token_ref.valid:
+ raise exception.TokenNotFound(token_id=token_id)
+ return token_ref.to_dict()
+
+ def create_token(self, token_id, data):
+ data_copy = copy.deepcopy(data)
+ if not data_copy.get('expires'):
+ data_copy['expires'] = provider.default_expire_time()
+ if not data_copy.get('user_id'):
+ data_copy['user_id'] = data_copy['user']['id']
+
+ token_ref = TokenModel.from_dict(data_copy)
+ token_ref.valid = True
+ session = sql.get_session()
+ with session.begin():
+ session.add(token_ref)
+ return token_ref.to_dict()
+
+ def delete_token(self, token_id):
+ session = sql.get_session()
+ with session.begin():
+ token_ref = session.query(TokenModel).get(token_id)
+ if not token_ref or not token_ref.valid:
+ raise exception.TokenNotFound(token_id=token_id)
+ token_ref.valid = False
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Deletes all tokens in one session
+
+ The user_id will be ignored if the trust_id is specified. user_id
+ will always be specified.
+ If using a trust, the token's user_id is set to the trustee's user ID
+ or the trustor's user ID, so will use trust_id to query the tokens.
+
+ """
+ session = sql.get_session()
+ with session.begin():
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter_by(valid=True)
+ query = query.filter(TokenModel.expires > now)
+ if trust_id:
+ query = query.filter(TokenModel.trust_id == trust_id)
+ else:
+ query = query.filter(TokenModel.user_id == user_id)
+
+ for token_ref in query.all():
+ if tenant_id:
+ token_ref_dict = token_ref.to_dict()
+ if not self._tenant_matches(tenant_id, token_ref_dict):
+ continue
+ if consumer_id:
+ token_ref_dict = token_ref.to_dict()
+ if not self._consumer_matches(consumer_id, token_ref_dict):
+ continue
+
+ token_ref.valid = False
+
+ def _tenant_matches(self, tenant_id, token_ref_dict):
+ return ((tenant_id is None) or
+ (token_ref_dict.get('tenant') and
+ token_ref_dict['tenant'].get('id') == tenant_id))
+
+ def _consumer_matches(self, consumer_id, ref):
+ if consumer_id is None:
+ return True
+ else:
+ try:
+ oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
+ return oauth and oauth['consumer_id'] == consumer_id
+ except KeyError:
+ return False
+
+ def _list_tokens_for_trust(self, trust_id):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.trust_id == trust_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ tokens.append(token_ref_dict['id'])
+ return tokens
+
+ def _list_tokens_for_user(self, user_id, tenant_id=None):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.user_id == user_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ if self._tenant_matches(tenant_id, token_ref_dict):
+ tokens.append(token_ref['id'])
+ return tokens
+
+ def _list_tokens_for_consumer(self, user_id, consumer_id):
+ tokens = []
+ session = sql.get_session()
+ with session.begin():
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.user_id == user_id)
+ token_references = query.filter_by(valid=True)
+
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ if self._consumer_matches(consumer_id, token_ref_dict):
+ tokens.append(token_ref_dict['id'])
+ return tokens
+
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ if not CONF.token.revoke_by_id:
+ return []
+ if trust_id:
+ return self._list_tokens_for_trust(trust_id)
+ if consumer_id:
+ return self._list_tokens_for_consumer(user_id, consumer_id)
+ else:
+ return self._list_tokens_for_user(user_id, tenant_id)
+
+ def list_revoked_tokens(self):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel.id, TokenModel.expires)
+ query = query.filter(TokenModel.expires > now)
+ token_references = query.filter_by(valid=False)
+ for token_ref in token_references:
+ record = {
+ 'id': token_ref[0],
+ 'expires': token_ref[1],
+ }
+ tokens.append(record)
+ return tokens
+
+ def _expiry_range_strategy(self, dialect):
+ """Choose a token range expiration strategy
+
+ Based on the DB dialect, select an expiry range callable that is
+ appropriate.
+ """
+
+ # DB2 and MySQL can both benefit from a batched strategy. On DB2 the
+ # transaction log can fill up and on MySQL w/Galera, large
+ # transactions can exceed the maximum write set size.
+ if dialect == 'ibm_db_sa':
+ # Limit of 100 is known to not fill a transaction log
+ # of default maximum size while not significantly
+ # impacting the performance of large token purges on
+ # systems where the maximum transaction log size has
+ # been increased beyond the default.
+ return functools.partial(_expiry_range_batched,
+ batch_size=100)
+ elif dialect == 'mysql':
+ # We want somewhat more than 100, since Galera replication delay is
+ # at least RTT*2. This can be a significant amount of time if
+ # doing replication across a WAN.
+ return functools.partial(_expiry_range_batched,
+ batch_size=1000)
+ return _expiry_range_all
+
+ def flush_expired_tokens(self):
+ session = sql.get_session()
+ dialect = session.bind.dialect.name
+ expiry_range_func = self._expiry_range_strategy(dialect)
+ query = session.query(TokenModel.expires)
+ total_removed = 0
+ upper_bound_func = timeutils.utcnow
+ for expiry_time in expiry_range_func(session, upper_bound_func):
+ delete_query = query.filter(TokenModel.expires <=
+ expiry_time)
+ row_count = delete_query.delete(synchronize_session=False)
+ total_removed += row_count
+ LOG.debug('Removed %d total expired tokens', total_removed)
+
+ session.flush()
+ LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
diff --git a/keystone-moon/keystone/token/persistence/core.py b/keystone-moon/keystone/token/persistence/core.py
new file mode 100644
index 00000000..19f0df35
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/core.py
@@ -0,0 +1,361 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Token persistence service."""
+
+import abc
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='token')
+REVOCATION_MEMOIZE = cache.get_memoization_decorator(
+ section='token', expiration_section='revoke')
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api',
+ 'token_provider_api', 'trust_api')
+class PersistenceManager(manager.Manager):
+ """Default pivot point for the Token backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ def __init__(self):
+ super(PersistenceManager, self).__init__(CONF.token.driver)
+
+ def _assert_valid(self, token_id, token_ref):
+ """Raise TokenNotFound if the token is expired."""
+ current_time = timeutils.normalize_time(timeutils.utcnow())
+ expires = token_ref.get('expires')
+ if not expires or current_time > timeutils.normalize_time(expires):
+ raise exception.TokenNotFound(token_id=token_id)
+
+ def get_token(self, token_id):
+ if not token_id:
+ # NOTE(morganfainberg): There are cases when the
+ # context['token_id'] will in-fact be None. This also saves
+ # a round-trip to the backend if we don't have a token_id.
+ raise exception.TokenNotFound(token_id='')
+ unique_id = self.token_provider_api.unique_id(token_id)
+ token_ref = self._get_token(unique_id)
+ # NOTE(morganfainberg): Lift expired checking to the manager, there is
+ # no reason to make the drivers implement this check. With caching,
+ # self._get_token could return an expired token. Make sure we behave
+ # as expected and raise TokenNotFound on those instances.
+ self._assert_valid(token_id, token_ref)
+ return token_ref
+
+ @MEMOIZE
+ def _get_token(self, token_id):
+ # Only ever use the "unique" id in the cache key.
+ return self.driver.get_token(token_id)
+
+ def create_token(self, token_id, data):
+ unique_id = self.token_provider_api.unique_id(token_id)
+ data_copy = copy.deepcopy(data)
+ data_copy['id'] = unique_id
+ ret = self.driver.create_token(unique_id, data_copy)
+ if MEMOIZE.should_cache(ret):
+ # NOTE(morganfainberg): when doing a cache set, you must pass the
+ # same arguments through, the same as invalidate (this includes
+ # "self"). First argument is always the value to be cached
+ self._get_token.set(ret, self, unique_id)
+ return ret
+
+ def delete_token(self, token_id):
+ if not CONF.token.revoke_by_id:
+ return
+ unique_id = self.token_provider_api.unique_id(token_id)
+ self.driver.delete_token(unique_id)
+ self._invalidate_individual_token_cache(unique_id)
+ self.invalidate_revocation_list()
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ if not CONF.token.revoke_by_id:
+ return
+ token_list = self.driver._list_tokens(user_id, tenant_id, trust_id,
+ consumer_id)
+ self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id)
+ for token_id in token_list:
+ unique_id = self.token_provider_api.unique_id(token_id)
+ self._invalidate_individual_token_cache(unique_id)
+ self.invalidate_revocation_list()
+
+ @REVOCATION_MEMOIZE
+ def list_revoked_tokens(self):
+ return self.driver.list_revoked_tokens()
+
+ def invalidate_revocation_list(self):
+ # NOTE(morganfainberg): Note that ``self`` needs to be passed to
+ # invalidate() because of the way the invalidation method works on
+ # determining cache-keys.
+ self.list_revoked_tokens.invalidate(self)
+
+ def delete_tokens_for_domain(self, domain_id):
+ """Delete all tokens for a given domain.
+
+ It will delete all the project-scoped tokens for the projects
+ that are owned by the given domain, as well as any tokens issued
+ to users that are owned by this domain.
+
+ However, deletion of domain_scoped tokens will still need to be
+ implemented as stated in TODO below.
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ projects = self.resource_api.list_projects()
+ for project in projects:
+ if project['domain_id'] == domain_id:
+ for user_id in self.assignment_api.list_user_ids_for_project(
+ project['id']):
+ self.delete_tokens_for_user(user_id, project['id'])
+ # TODO(morganfainberg): implement deletion of domain_scoped tokens.
+
+ users = self.identity_api.list_users(domain_id)
+ user_ids = (user['id'] for user in users)
+ self.delete_tokens_for_users(user_ids)
+
+ def delete_tokens_for_user(self, user_id, project_id=None):
+ """Delete all tokens for a given user or user-project combination.
+
+ This method adds in the extra logic for handling trust-scoped token
+ revocations in a single call instead of needing to explicitly handle
+ trusts in the caller's logic.
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ self.delete_tokens(user_id, tenant_id=project_id)
+ for trust in self.trust_api.list_trusts_for_trustee(user_id):
+ # Ensure we revoke tokens associated to the trust / project
+ # user_id combination.
+ self.delete_tokens(user_id, trust_id=trust['id'],
+ tenant_id=project_id)
+ for trust in self.trust_api.list_trusts_for_trustor(user_id):
+ # Ensure we revoke tokens associated to the trust / project /
+ # user_id combination where the user_id is the trustor.
+
+ # NOTE(morganfainberg): This revocation is a bit coarse, but it
+ # covers a number of cases such as disabling of the trustor user,
+ # deletion of the trustor user (for any number of reasons). It
+ # might make sense to refine this and be more surgical on the
+ # deletions (e.g. don't revoke tokens for the trusts when the
+ # trustor changes password). For now, to maintain previous
+ # functionality, this will continue to be a bit overzealous on
+ # revocations.
+ self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
+ tenant_id=project_id)
+
+ def delete_tokens_for_users(self, user_ids, project_id=None):
+ """Delete all tokens for a list of user_ids.
+
+ :param user_ids: list of user identifiers
+ :param project_id: optional project identifier
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ for user_id in user_ids:
+ self.delete_tokens_for_user(user_id, project_id=project_id)
+
+ def _invalidate_individual_token_cache(self, token_id):
+ # NOTE(morganfainberg): invalidate takes the exact same arguments as
+ # the normal method, this means we need to pass "self" in (which gets
+ # stripped off).
+
+ # FIXME(morganfainberg): Does this cache actually need to be
+ # invalidated? We maintain a cached revocation list, which should be
+ # consulted before accepting a token as valid. For now we will
+ # do the explicit individual token invalidation.
+ self._get_token.invalidate(self, token_id)
+ self.token_provider_api.invalidate_individual_token_cache(token_id)
+
+
+# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
+# class-level optional dependency control attribute is populated as empty
+# this is because of the override of .__getattr__ and ensures that if the
+# optional dependency injector changes attributes, this class doesn't break.
+@dependency.optional()
+@dependency.requires('token_provider_api')
+@dependency.provider('token_api')
+class Manager(object):
+ """The token_api provider.
+
+ This class is a proxy class to the token_provider_api's persistence
+ manager.
+ """
+ def __init__(self):
+ # NOTE(morganfainberg): __init__ is required for dependency processing.
+ super(Manager, self).__init__()
+
+ def __getattr__(self, item):
+ """Forward calls to the `token_provider_api` persistence manager."""
+
+ # NOTE(morganfainberg): Prevent infinite recursion, raise an
+ # AttributeError for 'token_provider_api' ensuring that the dep
+ # injection doesn't infinitely try and lookup self.token_provider_api
+ # on _process_dependencies. This doesn't need an exception string as
+ # it should only ever be hit on instantiation.
+ if item == 'token_provider_api':
+ raise AttributeError()
+
+ f = getattr(self.token_provider_api._persistence, item)
+ LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
+ 'utilizing methods on `token_provider_api` and may be '
+ 'removed in Kilo.'), item)
+ setattr(self, item, f)
+ return f
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+ """Interface description for a Token driver."""
+
+ @abc.abstractmethod
+ def get_token(self, token_id):
+ """Get a token by id.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :returns: token_ref
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_token(self, token_id, data):
+ """Create a token by id and data.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :param data: dictionary with additional reference information
+
+ ::
+
+ {
+ expires=''
+ id=token_id,
+ user=user_ref,
+ tenant=tenant_ref,
+ metadata=metadata_ref
+ }
+
+ :type data: dict
+ :returns: token_ref or None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_token(self, token_id):
+ """Deletes a token by id.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :returns: None.
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Deletes tokens by user.
+
+ If the tenant_id is not None, only delete the tokens by user id under
+ the specified tenant.
+
+ If the trust_id is not None, it will be used to query tokens and the
+ user_id will be ignored.
+
+ If the consumer_id is not None, only delete the tokens by consumer id
+ that match the specified consumer id.
+
+ :param user_id: identity of user
+ :type user_id: string
+ :param tenant_id: identity of the tenant
+ :type tenant_id: string
+ :param trust_id: identity of the trust
+ :type trust_id: string
+ :param consumer_id: identity of the consumer
+ :type consumer_id: string
+ :returns: None.
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ token_list = self._list_tokens(user_id,
+ tenant_id=tenant_id,
+ trust_id=trust_id,
+ consumer_id=consumer_id)
+
+ for token in token_list:
+ try:
+ self.delete_token(token)
+ except exception.NotFound:
+ pass
+
+ @abc.abstractmethod
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Returns a list of current token_id's for a user
+
+ This is effectively a private method only used by the ``delete_tokens``
+ method and should not be called by anything outside of the
+ ``token_api`` manager or the token driver itself.
+
+ :param user_id: identity of the user
+ :type user_id: string
+ :param tenant_id: identity of the tenant
+ :type tenant_id: string
+ :param trust_id: identity of the trust
+ :type trust_id: string
+ :param consumer_id: identity of the consumer
+ :type consumer_id: string
+ :returns: list of token_id's
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_revoked_tokens(self):
+ """Returns a list of all revoked tokens
+
+ :returns: list of token_id's
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def flush_expired_tokens(self):
+ """Archive or delete tokens that have expired.
+ """
+ raise exception.NotImplemented() # pragma: no cover