aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/token
diff options
context:
space:
mode:
authorWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
committerWuKong <rebirthmonkey@gmail.com>2015-06-30 18:47:29 +0200
commitb8c756ecdd7cced1db4300935484e8c83701c82e (patch)
tree87e51107d82b217ede145de9d9d59e2100725bd7 /keystone-moon/keystone/token
parentc304c773bae68fb854ed9eab8fb35c4ef17cf136 (diff)
migrate moon code from github to opnfv
Change-Id: Ice53e368fd1114d56a75271aa9f2e598e3eba604 Signed-off-by: WuKong <rebirthmonkey@gmail.com>
Diffstat (limited to 'keystone-moon/keystone/token')
-rw-r--r--keystone-moon/keystone/token/__init__.py18
-rw-r--r--keystone-moon/keystone/token/controllers.py523
-rw-r--r--keystone-moon/keystone/token/persistence/__init__.py16
-rw-r--r--keystone-moon/keystone/token/persistence/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/token/persistence/backends/kvs.py357
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache.py33
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache_pool.py28
-rw-r--r--keystone-moon/keystone/token/persistence/backends/sql.py279
-rw-r--r--keystone-moon/keystone/token/persistence/core.py361
-rw-r--r--keystone-moon/keystone/token/provider.py584
-rw-r--r--keystone-moon/keystone/token/providers/__init__.py0
-rw-r--r--keystone-moon/keystone/token/providers/common.py709
-rw-r--r--keystone-moon/keystone/token/providers/fernet/__init__.py13
-rw-r--r--keystone-moon/keystone/token/providers/fernet/core.py267
-rw-r--r--keystone-moon/keystone/token/providers/fernet/token_formatters.py545
-rw-r--r--keystone-moon/keystone/token/providers/fernet/utils.py243
-rw-r--r--keystone-moon/keystone/token/providers/pki.py53
-rw-r--r--keystone-moon/keystone/token/providers/pkiz.py51
-rw-r--r--keystone-moon/keystone/token/providers/uuid.py33
-rw-r--r--keystone-moon/keystone/token/routers.py59
20 files changed, 4172 insertions, 0 deletions
diff --git a/keystone-moon/keystone/token/__init__.py b/keystone-moon/keystone/token/__init__.py
new file mode 100644
index 00000000..a73e19f9
--- /dev/null
+++ b/keystone-moon/keystone/token/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token import controllers # noqa
+from keystone.token import persistence # noqa
+from keystone.token import provider # noqa
+from keystone.token import routers # noqa
diff --git a/keystone-moon/keystone/token/controllers.py b/keystone-moon/keystone/token/controllers.py
new file mode 100644
index 00000000..3304acb5
--- /dev/null
+++ b/keystone-moon/keystone/token/controllers.py
@@ -0,0 +1,523 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import sys
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class ExternalAuthNotApplicable(Exception):
+ """External authentication is not applicable."""
+ pass
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
+ 'resource_api', 'role_api', 'token_provider_api',
+ 'trust_api')
+class Auth(controller.V2Controller):
+
+ @controller.v2_deprecated
+ def ca_cert(self, context, auth=None):
+ ca_file = open(CONF.signing.ca_certs, 'r')
+ data = ca_file.read()
+ ca_file.close()
+ return data
+
+ @controller.v2_deprecated
+ def signing_cert(self, context, auth=None):
+ cert_file = open(CONF.signing.certfile, 'r')
+ data = cert_file.read()
+ cert_file.close()
+ return data
+
+ @controller.v2_deprecated
+ def authenticate(self, context, auth=None):
+ """Authenticate credentials and return a token.
+
+ Accept auth as a dict that looks like::
+
+ {
+ "auth":{
+ "passwordCredentials":{
+ "username":"test_user",
+ "password":"mypass"
+ },
+ "tenantName":"customer-x"
+ }
+ }
+
+ In this case, tenant is optional, if not provided the token will be
+ considered "unscoped" and can later be used to get a scoped token.
+
+ Alternatively, this call accepts auth with only a token and tenant
+ that will return a token that is scoped to that tenant.
+ """
+
+ if auth is None:
+ raise exception.ValidationError(attribute='auth',
+ target='request body')
+
+ if "token" in auth:
+ # Try to authenticate using a token
+ auth_info = self._authenticate_token(
+ context, auth)
+ else:
+ # Try external authentication
+ try:
+ auth_info = self._authenticate_external(
+ context, auth)
+ except ExternalAuthNotApplicable:
+ # Try local authentication
+ auth_info = self._authenticate_local(
+ context, auth)
+
+ user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info
+ # Validate that the auth info is valid and nothing is disabled
+ try:
+ self.identity_api.assert_user_enabled(
+ user_id=user_ref['id'], user=user_ref)
+ if tenant_ref:
+ self.resource_api.assert_project_enabled(
+ project_id=tenant_ref['id'], project=tenant_ref)
+ except AssertionError as e:
+ six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+ sys.exc_info()[2])
+ # NOTE(morganfainberg): Make sure the data is in correct form since it
+ # might be consumed external to Keystone and this is a v2.0 controller.
+ # The user_ref is encoded into the auth_token_data which is returned as
+ # part of the token data. The token provider doesn't care about the
+ # format.
+ user_ref = self.v3_to_v2_user(user_ref)
+ if tenant_ref:
+ tenant_ref = self.filter_domain_id(tenant_ref)
+ auth_token_data = self._get_auth_token_data(user_ref,
+ tenant_ref,
+ metadata_ref,
+ expiry,
+ audit_id)
+
+ if tenant_ref:
+ catalog_ref = self.catalog_api.get_catalog(
+ user_ref['id'], tenant_ref['id'])
+ else:
+ catalog_ref = {}
+
+ auth_token_data['id'] = 'placeholder'
+ if bind:
+ auth_token_data['bind'] = bind
+
+ roles_ref = []
+ for role_id in metadata_ref.get('roles', []):
+ role_ref = self.role_api.get_role(role_id)
+ roles_ref.append(dict(name=role_ref['name']))
+
+ (token_id, token_data) = self.token_provider_api.issue_v2_token(
+ auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
+
+ # NOTE(wanghong): We consume a trust use only when we are using trusts
+ # and have successfully issued a token.
+ if CONF.trust.enabled and 'trust_id' in auth:
+ self.trust_api.consume_use(auth['trust_id'])
+
+ return token_data
+
+ def _restrict_scope(self, token_model_ref):
+ # A trust token cannot be used to get another token
+ if token_model_ref.trust_scoped:
+ raise exception.Forbidden()
+ if not CONF.token.allow_rescope_scoped_token:
+ # Do not allow conversion from scoped tokens.
+ if token_model_ref.project_scoped or token_model_ref.domain_scoped:
+ raise exception.Forbidden(action=_("rescope a scoped token"))
+
+ def _authenticate_token(self, context, auth):
+ """Try to authenticate using an already existing token.
+
+ Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+ """
+ if 'token' not in auth:
+ raise exception.ValidationError(
+ attribute='token', target='auth')
+
+ if "id" not in auth['token']:
+ raise exception.ValidationError(
+ attribute="id", target="token")
+
+ old_token = auth['token']['id']
+ if len(old_token) > CONF.max_token_size:
+ raise exception.ValidationSizeError(attribute='token',
+ size=CONF.max_token_size)
+
+ try:
+ token_model_ref = token_model.KeystoneToken(
+ token_id=old_token,
+ token_data=self.token_provider_api.validate_token(old_token))
+ except exception.NotFound as e:
+ raise exception.Unauthorized(e)
+
+ wsgi.validate_token_bind(context, token_model_ref)
+
+ self._restrict_scope(token_model_ref)
+ user_id = token_model_ref.user_id
+ tenant_id = self._get_project_id_from_auth(auth)
+
+ if not CONF.trust.enabled and 'trust_id' in auth:
+ raise exception.Forbidden('Trusts are disabled.')
+ elif CONF.trust.enabled and 'trust_id' in auth:
+ trust_ref = self.trust_api.get_trust(auth['trust_id'])
+ if trust_ref is None:
+ raise exception.Forbidden()
+ if user_id != trust_ref['trustee_user_id']:
+ raise exception.Forbidden()
+ if (trust_ref['project_id'] and
+ tenant_id != trust_ref['project_id']):
+ raise exception.Forbidden()
+ if ('expires' in trust_ref) and (trust_ref['expires']):
+ expiry = trust_ref['expires']
+ if expiry < timeutils.parse_isotime(timeutils.isotime()):
+ raise exception.Forbidden()
+ user_id = trust_ref['trustor_user_id']
+ trustor_user_ref = self.identity_api.get_user(
+ trust_ref['trustor_user_id'])
+ if not trustor_user_ref['enabled']:
+ raise exception.Forbidden()
+ trustee_user_ref = self.identity_api.get_user(
+ trust_ref['trustee_user_id'])
+ if not trustee_user_ref['enabled']:
+ raise exception.Forbidden()
+
+ if trust_ref['impersonation'] is True:
+ current_user_ref = trustor_user_ref
+ else:
+ current_user_ref = trustee_user_ref
+
+ else:
+ current_user_ref = self.identity_api.get_user(user_id)
+
+ metadata_ref = {}
+ tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+ user_id, tenant_id)
+
+ expiry = token_model_ref.expires
+ if CONF.trust.enabled and 'trust_id' in auth:
+ trust_id = auth['trust_id']
+ trust_roles = []
+ for role in trust_ref['roles']:
+ if 'roles' not in metadata_ref:
+ raise exception.Forbidden()
+ if role['id'] in metadata_ref['roles']:
+ trust_roles.append(role['id'])
+ else:
+ raise exception.Forbidden()
+ if 'expiry' in trust_ref and trust_ref['expiry']:
+ trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
+ if trust_expiry < expiry:
+ expiry = trust_expiry
+ metadata_ref['roles'] = trust_roles
+ metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
+ metadata_ref['trust_id'] = trust_id
+
+ bind = token_model_ref.bind
+ audit_id = token_model_ref.audit_chain_id
+
+ return (current_user_ref, tenant_ref, metadata_ref, expiry, bind,
+ audit_id)
+
+ def _authenticate_local(self, context, auth):
+ """Try to authenticate against the identity backend.
+
+ Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+ """
+ if 'passwordCredentials' not in auth:
+ raise exception.ValidationError(
+ attribute='passwordCredentials', target='auth')
+
+ if "password" not in auth['passwordCredentials']:
+ raise exception.ValidationError(
+ attribute='password', target='passwordCredentials')
+
+ password = auth['passwordCredentials']['password']
+ if password and len(password) > CONF.identity.max_password_length:
+ raise exception.ValidationSizeError(
+ attribute='password', size=CONF.identity.max_password_length)
+
+ if (not auth['passwordCredentials'].get("userId") and
+ not auth['passwordCredentials'].get("username")):
+ raise exception.ValidationError(
+ attribute='username or userId',
+ target='passwordCredentials')
+
+ user_id = auth['passwordCredentials'].get('userId')
+ if user_id and len(user_id) > CONF.max_param_size:
+ raise exception.ValidationSizeError(attribute='userId',
+ size=CONF.max_param_size)
+
+ username = auth['passwordCredentials'].get('username', '')
+
+ if username:
+ if len(username) > CONF.max_param_size:
+ raise exception.ValidationSizeError(attribute='username',
+ size=CONF.max_param_size)
+ try:
+ user_ref = self.identity_api.get_user_by_name(
+ username, CONF.identity.default_domain_id)
+ user_id = user_ref['id']
+ except exception.UserNotFound as e:
+ raise exception.Unauthorized(e)
+
+ try:
+ user_ref = self.identity_api.authenticate(
+ context,
+ user_id=user_id,
+ password=password)
+ except AssertionError as e:
+ raise exception.Unauthorized(e.args[0])
+
+ metadata_ref = {}
+ tenant_id = self._get_project_id_from_auth(auth)
+ tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+ user_id, tenant_id)
+
+ expiry = provider.default_expire_time()
+ bind = None
+ audit_id = None
+ return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
+
+ def _authenticate_external(self, context, auth):
+ """Try to authenticate an external user via REMOTE_USER variable.
+
+ Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+ """
+ environment = context.get('environment', {})
+ if not environment.get('REMOTE_USER'):
+ raise ExternalAuthNotApplicable()
+
+ username = environment['REMOTE_USER']
+ try:
+ user_ref = self.identity_api.get_user_by_name(
+ username, CONF.identity.default_domain_id)
+ user_id = user_ref['id']
+ except exception.UserNotFound as e:
+ raise exception.Unauthorized(e)
+
+ metadata_ref = {}
+ tenant_id = self._get_project_id_from_auth(auth)
+ tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+ user_id, tenant_id)
+
+ expiry = provider.default_expire_time()
+ bind = None
+ if ('kerberos' in CONF.token.bind and
+ environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
+ bind = {'kerberos': username}
+ audit_id = None
+
+ return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
+
+ def _get_auth_token_data(self, user, tenant, metadata, expiry, audit_id):
+ return dict(user=user,
+ tenant=tenant,
+ metadata=metadata,
+ expires=expiry,
+ parent_audit_id=audit_id)
+
+ def _get_project_id_from_auth(self, auth):
+ """Extract tenant information from auth dict.
+
+ Returns a valid tenant_id if it exists, or None if not specified.
+ """
+ tenant_id = auth.get('tenantId')
+ if tenant_id and len(tenant_id) > CONF.max_param_size:
+ raise exception.ValidationSizeError(attribute='tenantId',
+ size=CONF.max_param_size)
+
+ tenant_name = auth.get('tenantName')
+ if tenant_name and len(tenant_name) > CONF.max_param_size:
+ raise exception.ValidationSizeError(attribute='tenantName',
+ size=CONF.max_param_size)
+
+ if tenant_name:
+ try:
+ tenant_ref = self.resource_api.get_project_by_name(
+ tenant_name, CONF.identity.default_domain_id)
+ tenant_id = tenant_ref['id']
+ except exception.ProjectNotFound as e:
+ raise exception.Unauthorized(e)
+ return tenant_id
+
+ def _get_project_roles_and_ref(self, user_id, tenant_id):
+ """Returns the project roles for this user, and the project ref."""
+
+ tenant_ref = None
+ role_list = []
+ if tenant_id:
+ try:
+ tenant_ref = self.resource_api.get_project(tenant_id)
+ role_list = self.assignment_api.get_roles_for_user_and_project(
+ user_id, tenant_id)
+ except exception.ProjectNotFound:
+ pass
+
+ if not role_list:
+ msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
+ msg = msg % {'u_id': user_id, 't_id': tenant_id}
+ LOG.warning(msg)
+ raise exception.Unauthorized(msg)
+
+ return (tenant_ref, role_list)
+
+ def _get_token_ref(self, token_id, belongs_to=None):
+ """Returns a token if a valid one exists.
+
+ Optionally, limited to a token owned by a specific tenant.
+
+ """
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id,
+ token_data=self.token_provider_api.validate_token(token_id))
+ if belongs_to:
+ if not token_ref.project_scoped:
+ raise exception.Unauthorized(
+ _('Token does not belong to specified tenant.'))
+ if token_ref.project_id != belongs_to:
+ raise exception.Unauthorized(
+ _('Token does not belong to specified tenant.'))
+ return token_ref
+
+ @controller.v2_deprecated
+ @controller.protected()
+ def validate_token_head(self, context, token_id):
+ """Check that a token is valid.
+
+ Optionally, also ensure that it is owned by a specific tenant.
+
+ Identical to ``validate_token``, except does not return a response.
+
+ The code in ``keystone.common.wsgi.render_response`` will remove
+ the content body.
+
+ """
+ belongs_to = context['query_string'].get('belongsTo')
+ return self.token_provider_api.validate_v2_token(token_id, belongs_to)
+
+ @controller.v2_deprecated
+ @controller.protected()
+ def validate_token(self, context, token_id):
+ """Check that a token is valid.
+
+ Optionally, also ensure that it is owned by a specific tenant.
+
+ Returns metadata about the token along any associated roles.
+
+ """
+ belongs_to = context['query_string'].get('belongsTo')
+ # TODO(ayoung) validate against revocation API
+ return self.token_provider_api.validate_v2_token(token_id, belongs_to)
+
+ @controller.v2_deprecated
+ def delete_token(self, context, token_id):
+ """Delete a token, effectively invalidating it for authz."""
+ # TODO(termie): this stuff should probably be moved to middleware
+ self.assert_admin(context)
+ self.token_provider_api.revoke_token(token_id)
+
+ @controller.v2_deprecated
+ @controller.protected()
+ def revocation_list(self, context, auth=None):
+ if not CONF.token.revoke_by_id:
+ raise exception.Gone()
+ tokens = self.token_provider_api.list_revoked_tokens()
+
+ for t in tokens:
+ expires = t['expires']
+ if expires and isinstance(expires, datetime.datetime):
+ t['expires'] = timeutils.isotime(expires)
+ data = {'revoked': tokens}
+ json_data = jsonutils.dumps(data)
+ signed_text = cms.cms_sign_text(json_data,
+ CONF.signing.certfile,
+ CONF.signing.keyfile)
+
+ return {'signed': signed_text}
+
+ @controller.v2_deprecated
+ def endpoints(self, context, token_id):
+ """Return a list of endpoints available to the token."""
+ self.assert_admin(context)
+
+ token_ref = self._get_token_ref(token_id)
+
+ catalog_ref = None
+ if token_ref.project_id:
+ catalog_ref = self.catalog_api.get_catalog(
+ token_ref.user_id,
+ token_ref.project_id)
+
+ return Auth.format_endpoint_list(catalog_ref)
+
+ @classmethod
+ def format_endpoint_list(cls, catalog_ref):
+ """Formats a list of endpoints according to Identity API v2.
+
+ The v2.0 API wants an endpoint list to look like::
+
+ {
+ 'endpoints': [
+ {
+ 'id': $endpoint_id,
+ 'name': $SERVICE[name],
+ 'type': $SERVICE,
+ 'tenantId': $tenant_id,
+ 'region': $REGION,
+ }
+ ],
+ 'endpoints_links': [],
+ }
+
+ """
+ if not catalog_ref:
+ return {}
+
+ endpoints = []
+ for region_name, region_ref in six.iteritems(catalog_ref):
+ for service_type, service_ref in six.iteritems(region_ref):
+ endpoints.append({
+ 'id': service_ref.get('id'),
+ 'name': service_ref.get('name'),
+ 'type': service_type,
+ 'region': region_name,
+ 'publicURL': service_ref.get('publicURL'),
+ 'internalURL': service_ref.get('internalURL'),
+ 'adminURL': service_ref.get('adminURL'),
+ })
+
+ return {'endpoints': endpoints, 'endpoints_links': []}
diff --git a/keystone-moon/keystone/token/persistence/__init__.py b/keystone-moon/keystone/token/persistence/__init__.py
new file mode 100644
index 00000000..29ad5653
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/__init__.py
@@ -0,0 +1,16 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token.persistence.core import * # noqa
+
+
+__all__ = ['Manager', 'Driver', 'backends']
diff --git a/keystone-moon/keystone/token/persistence/backends/__init__.py b/keystone-moon/keystone/token/persistence/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/__init__.py
diff --git a/keystone-moon/keystone/token/persistence/backends/kvs.py b/keystone-moon/keystone/token/persistence/backends/kvs.py
new file mode 100644
index 00000000..b4807bf1
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/kvs.py
@@ -0,0 +1,357 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import kvs
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Token(token.persistence.Driver):
+ """KeyValueStore backend for tokens.
+
+ This is the base implementation for any/all key-value-stores (e.g.
+ memcached) for the Token backend. It is recommended to only use the base
+ in-memory implementation for testing purposes.
+ """
+
+ revocation_key = 'revocation-list'
+ kvs_backend = 'openstack.kvs.Memory'
+
+ def __init__(self, backing_store=None, **kwargs):
+ super(Token, self).__init__()
+ self._store = kvs.get_key_value_store('token-driver')
+ if backing_store is not None:
+ self.kvs_backend = backing_store
+ if not self._store.is_configured:
+ # Do not re-configure the backend if the store has been initialized
+ self._store.configure(backing_store=self.kvs_backend, **kwargs)
+ if self.__class__ == Token:
+ # NOTE(morganfainberg): Only warn if the base KVS implementation
+ # is instantiated.
+ LOG.warn(_LW('It is recommended to only use the base '
+ 'key-value-store implementation for the token driver '
+ 'for testing purposes. Please use '
+ 'keystone.token.persistence.backends.memcache.Token '
+ 'or keystone.token.persistence.backends.sql.Token '
+ 'instead.'))
+
+ def _prefix_token_id(self, token_id):
+ return 'token-%s' % token_id.encode('utf-8')
+
+ def _prefix_user_id(self, user_id):
+ return 'usertokens-%s' % user_id.encode('utf-8')
+
+ def _get_key_or_default(self, key, default=None):
+ try:
+ return self._store.get(key)
+ except exception.NotFound:
+ return default
+
+ def _get_key(self, key):
+ return self._store.get(key)
+
+ def _set_key(self, key, value, lock=None):
+ self._store.set(key, value, lock)
+
+ def _delete_key(self, key):
+ return self._store.delete(key)
+
+ def get_token(self, token_id):
+ ptk = self._prefix_token_id(token_id)
+ try:
+ token_ref = self._get_key(ptk)
+ except exception.NotFound:
+ raise exception.TokenNotFound(token_id=token_id)
+
+ return token_ref
+
+ def create_token(self, token_id, data):
+ """Create a token by id and data.
+
+ It is assumed the caller has performed data validation on the "data"
+ parameter.
+ """
+ data_copy = copy.deepcopy(data)
+ ptk = self._prefix_token_id(token_id)
+ if not data_copy.get('expires'):
+ data_copy['expires'] = provider.default_expire_time()
+ if not data_copy.get('user_id'):
+ data_copy['user_id'] = data_copy['user']['id']
+
+ # NOTE(morganfainberg): for ease of manipulating the data without
+ # concern about the backend, always store the value(s) in the
+ # index as the isotime (string) version so this is where the string is
+ # built.
+ expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
+
+ self._set_key(ptk, data_copy)
+ user_id = data['user']['id']
+ user_key = self._prefix_user_id(user_id)
+ self._update_user_token_list(user_key, token_id, expires_str)
+ if CONF.trust.enabled and data.get('trust_id'):
+ # NOTE(morganfainberg): If trusts are enabled and this is a trust
+ # scoped token, we add the token to the trustee list as well. This
+ # allows password changes of the trustee to also expire the token.
+ # There is no harm in placing the token in multiple lists, as
+ # _list_tokens is smart enough to handle almost any case of
+ # valid/invalid/expired for a given token.
+ token_data = data_copy['token_data']
+ if data_copy['token_version'] == token.provider.V2:
+ trustee_user_id = token_data['access']['trust'][
+ 'trustee_user_id']
+ elif data_copy['token_version'] == token.provider.V3:
+ trustee_user_id = token_data['OS-TRUST:trust'][
+ 'trustee_user_id']
+ else:
+ raise exception.UnsupportedTokenVersionException(
+ _('Unknown token version %s') %
+ data_copy.get('token_version'))
+
+ trustee_key = self._prefix_user_id(trustee_user_id)
+ self._update_user_token_list(trustee_key, token_id, expires_str)
+
+ return data_copy
+
+ def _get_user_token_list_with_expiry(self, user_key):
+ """Return a list of tuples in the format (token_id, token_expiry) for
+ the user_key.
+ """
+ return self._get_key_or_default(user_key, default=[])
+
+ def _get_user_token_list(self, user_key):
+ """Return a list of token_ids for the user_key."""
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ # Each element is a tuple of (token_id, token_expiry). Most code does
+ # not care about the expiry, it is stripped out and only a
+ # list of token_ids are returned.
+ return [t[0] for t in token_list]
+
+ def _update_user_token_list(self, user_key, token_id, expires_isotime_str):
+ current_time = self._get_current_time()
+ revoked_token_list = set([t['id'] for t in
+ self.list_revoked_tokens()])
+
+ with self._store.get_lock(user_key) as lock:
+ filtered_list = []
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ for item in token_list:
+ try:
+ item_id, expires = self._format_token_index_item(item)
+ except (ValueError, TypeError):
+ # NOTE(morganfainberg): Skip on expected errors
+ # possibilities from the `_format_token_index_item` method.
+ continue
+
+ if expires < current_time:
+ LOG.debug(('Token `%(token_id)s` is expired, removing '
+ 'from `%(user_key)s`.'),
+ {'token_id': item_id, 'user_key': user_key})
+ continue
+
+ if item_id in revoked_token_list:
+ # NOTE(morganfainberg): If the token has been revoked, it
+ # can safely be removed from this list. This helps to keep
+ # the user_token_list as reasonably small as possible.
+ LOG.debug(('Token `%(token_id)s` is revoked, removing '
+ 'from `%(user_key)s`.'),
+ {'token_id': item_id, 'user_key': user_key})
+ continue
+ filtered_list.append(item)
+ filtered_list.append((token_id, expires_isotime_str))
+ self._set_key(user_key, filtered_list, lock)
+ return filtered_list
+
+ def _get_current_time(self):
+ return timeutils.normalize_time(timeutils.utcnow())
+
+ def _add_to_revocation_list(self, data, lock):
+ filtered_list = []
+ revoked_token_data = {}
+
+ current_time = self._get_current_time()
+ expires = data['expires']
+
+ if isinstance(expires, six.string_types):
+ expires = timeutils.parse_isotime(expires)
+
+ expires = timeutils.normalize_time(expires)
+
+ if expires < current_time:
+ LOG.warning(_LW('Token `%s` is expired, not adding to the '
+ 'revocation list.'), data['id'])
+ return
+
+ revoked_token_data['expires'] = timeutils.isotime(expires,
+ subsecond=True)
+ revoked_token_data['id'] = data['id']
+
+ token_list = self._get_key_or_default(self.revocation_key, default=[])
+ if not isinstance(token_list, list):
+ # NOTE(morganfainberg): In the case that the revocation list is not
+ # in a format we understand, reinitialize it. This is an attempt to
+ # not allow the revocation list to be completely broken if
+ # somehow the key is changed outside of keystone (e.g. memcache
+ # that is shared by multiple applications). Logging occurs at error
+ # level so that the cloud administrators have some awareness that
+ # the revocation_list needed to be cleared out. In all, this should
+ # be recoverable. Keystone cannot control external applications
+ # from changing a key in some backends, however, it is possible to
+ # gracefully handle and notify of this event.
+ LOG.error(_LE('Reinitializing revocation list due to error '
+ 'in loading revocation list from backend. '
+ 'Expected `list` type got `%(type)s`. Old '
+ 'revocation list data: %(list)r'),
+ {'type': type(token_list), 'list': token_list})
+ token_list = []
+
+ # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
+ # to keep the list of tokens revoked at the minimum.
+ for token_data in token_list:
+ try:
+ expires_at = timeutils.normalize_time(
+ timeutils.parse_isotime(token_data['expires']))
+ except ValueError:
+ LOG.warning(_LW('Removing `%s` from revocation list due to '
+ 'invalid expires data in revocation list.'),
+ token_data.get('id', 'INVALID_TOKEN_DATA'))
+ continue
+ if expires_at > current_time:
+ filtered_list.append(token_data)
+ filtered_list.append(revoked_token_data)
+ self._set_key(self.revocation_key, filtered_list, lock)
+
+ def delete_token(self, token_id):
+ # Test for existence
+ with self._store.get_lock(self.revocation_key) as lock:
+ data = self.get_token(token_id)
+ ptk = self._prefix_token_id(token_id)
+ result = self._delete_key(ptk)
+ self._add_to_revocation_list(data, lock)
+ return result
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ return super(Token, self).delete_tokens(
+ user_id=user_id,
+ tenant_id=tenant_id,
+ trust_id=trust_id,
+ consumer_id=consumer_id,
+ )
+
+ def _format_token_index_item(self, item):
+ try:
+ token_id, expires = item
+ except (TypeError, ValueError):
+ LOG.debug(('Invalid token entry expected tuple of '
+ '`(<token_id>, <expires>)` got: `%(item)r`'),
+ dict(item=item))
+ raise
+
+ try:
+ expires = timeutils.normalize_time(
+ timeutils.parse_isotime(expires))
+ except ValueError:
+ LOG.debug(('Invalid expires time on token `%(token_id)s`:'
+ ' %(expires)r'),
+ dict(token_id=token_id, expires=expires))
+ raise
+ return token_id, expires
+
+ def _token_match_tenant(self, token_ref, tenant_id):
+ if token_ref.get('tenant'):
+ return token_ref['tenant'].get('id') == tenant_id
+ return False
+
+ def _token_match_trust(self, token_ref, trust_id):
+ if not token_ref.get('trust_id'):
+ return False
+ return token_ref['trust_id'] == trust_id
+
+ def _token_match_consumer(self, token_ref, consumer_id):
+ try:
+ oauth = token_ref['token_data']['token']['OS-OAUTH1']
+ return oauth.get('consumer_id') == consumer_id
+ except KeyError:
+ return False
+
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ # This function is used to generate the list of tokens that should be
+ # revoked when revoking by token identifiers. This approach will be
+ # deprecated soon, probably in the Juno release. Setting revoke_by_id
+ # to False indicates that this kind of recording should not be
+ # performed. In order to test the revocation events, tokens shouldn't
+ # be deleted from the backends. This check ensures that tokens are
+ # still recorded.
+ if not CONF.token.revoke_by_id:
+ return []
+ tokens = []
+ user_key = self._prefix_user_id(user_id)
+ token_list = self._get_user_token_list_with_expiry(user_key)
+ current_time = self._get_current_time()
+ for item in token_list:
+ try:
+ token_id, expires = self._format_token_index_item(item)
+ except (TypeError, ValueError):
+ # NOTE(morganfainberg): Skip on expected error possibilities
+ # from the `_format_token_index_item` method.
+ continue
+
+ if expires < current_time:
+ continue
+
+ try:
+ token_ref = self.get_token(token_id)
+ except exception.TokenNotFound:
+ # NOTE(morganfainberg): Token doesn't exist, skip it.
+ continue
+ if token_ref:
+ if tenant_id is not None:
+ if not self._token_match_tenant(token_ref, tenant_id):
+ continue
+ if trust_id is not None:
+ if not self._token_match_trust(token_ref, trust_id):
+ continue
+ if consumer_id is not None:
+ if not self._token_match_consumer(token_ref, consumer_id):
+ continue
+
+ tokens.append(token_id)
+ return tokens
+
+ def list_revoked_tokens(self):
+ revoked_token_list = self._get_key_or_default(self.revocation_key,
+ default=[])
+ if isinstance(revoked_token_list, list):
+ return revoked_token_list
+ return []
+
+ def flush_expired_tokens(self):
+ """Archive or delete tokens that have expired."""
+ raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache.py b/keystone-moon/keystone/token/persistence/backends/memcache.py
new file mode 100644
index 00000000..03f27eaf
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/memcache.py
@@ -0,0 +1,33 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import kvs
+
+
+CONF = cfg.CONF
+
+
+class Token(kvs.Token):
+ kvs_backend = 'openstack.kvs.Memcached'
+ memcached_backend = 'memcached'
+
+ def __init__(self, *args, **kwargs):
+ kwargs['memcached_backend'] = self.memcached_backend
+ kwargs['no_expiry_keys'] = [self.revocation_key]
+ kwargs['memcached_expire_time'] = CONF.token.expiration
+ kwargs['url'] = CONF.memcache.servers
+ super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache_pool.py b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
new file mode 100644
index 00000000..55f9e8ae
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import memcache
+
+
+CONF = cfg.CONF
+
+
+class Token(memcache.Token):
+ memcached_backend = 'pooled_memcached'
+
+ def __init__(self, *args, **kwargs):
+ for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+ 'pool_unused_timeout', 'pool_connection_get_timeout'):
+ kwargs[arg] = getattr(CONF.memcache, arg)
+ super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/sql.py b/keystone-moon/keystone/token/persistence/backends/sql.py
new file mode 100644
index 00000000..fc70fb92
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/backends/sql.py
@@ -0,0 +1,279 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import functools
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _LI
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class TokenModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'token'
+ attributes = ['id', 'expires', 'user_id', 'trust_id']
+ id = sql.Column(sql.String(64), primary_key=True)
+ expires = sql.Column(sql.DateTime(), default=None)
+ extra = sql.Column(sql.JsonBlob())
+ valid = sql.Column(sql.Boolean(), default=True, nullable=False)
+ user_id = sql.Column(sql.String(64))
+ trust_id = sql.Column(sql.String(64))
+ __table_args__ = (
+ sql.Index('ix_token_expires', 'expires'),
+ sql.Index('ix_token_expires_valid', 'expires', 'valid'),
+ sql.Index('ix_token_user_id', 'user_id'),
+ sql.Index('ix_token_trust_id', 'trust_id')
+ )
+
+
+def _expiry_range_batched(session, upper_bound_func, batch_size):
+ """Returns the stop point of the next batch for expiration.
+
+ Return the timestamp of the next token that is `batch_size` rows from
+ being the oldest expired token.
+ """
+
+ # This expiry strategy splits the tokens into roughly equal sized batches
+ # to be deleted. It does this by finding the timestamp of a token
+ # `batch_size` rows from the oldest token and yielding that to the caller.
+ # It's expected that the caller will then delete all rows with a timestamp
+ # equal to or older than the one yielded. This may delete slightly more
+ # tokens than the batch_size, but that should be ok in almost all cases.
+ LOG.debug('Token expiration batch size: %d', batch_size)
+ query = session.query(TokenModel.expires)
+ query = query.filter(TokenModel.expires < upper_bound_func())
+ query = query.order_by(TokenModel.expires)
+ query = query.offset(batch_size - 1)
+ query = query.limit(1)
+ while True:
+ try:
+ next_expiration = query.one()[0]
+ except sql.NotFound:
+ # There are less than `batch_size` rows remaining, so fall
+ # through to the normal delete
+ break
+ yield next_expiration
+ yield upper_bound_func()
+
+
+def _expiry_range_all(session, upper_bound_func):
+ """Expires all tokens in one pass."""
+
+ yield upper_bound_func()
+
+
+class Token(token.persistence.Driver):
+ # Public interface
+ def get_token(self, token_id):
+ if token_id is None:
+ raise exception.TokenNotFound(token_id=token_id)
+ session = sql.get_session()
+ token_ref = session.query(TokenModel).get(token_id)
+ if not token_ref or not token_ref.valid:
+ raise exception.TokenNotFound(token_id=token_id)
+ return token_ref.to_dict()
+
+ def create_token(self, token_id, data):
+ data_copy = copy.deepcopy(data)
+ if not data_copy.get('expires'):
+ data_copy['expires'] = provider.default_expire_time()
+ if not data_copy.get('user_id'):
+ data_copy['user_id'] = data_copy['user']['id']
+
+ token_ref = TokenModel.from_dict(data_copy)
+ token_ref.valid = True
+ session = sql.get_session()
+ with session.begin():
+ session.add(token_ref)
+ return token_ref.to_dict()
+
+ def delete_token(self, token_id):
+ session = sql.get_session()
+ with session.begin():
+ token_ref = session.query(TokenModel).get(token_id)
+ if not token_ref or not token_ref.valid:
+ raise exception.TokenNotFound(token_id=token_id)
+ token_ref.valid = False
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Deletes all tokens in one session
+
+ The user_id will be ignored if the trust_id is specified. user_id
+ will always be specified.
+ If using a trust, the token's user_id is set to the trustee's user ID
+ or the trustor's user ID, so will use trust_id to query the tokens.
+
+ """
+ session = sql.get_session()
+ with session.begin():
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter_by(valid=True)
+ query = query.filter(TokenModel.expires > now)
+ if trust_id:
+ query = query.filter(TokenModel.trust_id == trust_id)
+ else:
+ query = query.filter(TokenModel.user_id == user_id)
+
+ for token_ref in query.all():
+ if tenant_id:
+ token_ref_dict = token_ref.to_dict()
+ if not self._tenant_matches(tenant_id, token_ref_dict):
+ continue
+ if consumer_id:
+ token_ref_dict = token_ref.to_dict()
+ if not self._consumer_matches(consumer_id, token_ref_dict):
+ continue
+
+ token_ref.valid = False
+
+ def _tenant_matches(self, tenant_id, token_ref_dict):
+ return ((tenant_id is None) or
+ (token_ref_dict.get('tenant') and
+ token_ref_dict['tenant'].get('id') == tenant_id))
+
+ def _consumer_matches(self, consumer_id, ref):
+ if consumer_id is None:
+ return True
+ else:
+ try:
+ oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
+ return oauth and oauth['consumer_id'] == consumer_id
+ except KeyError:
+ return False
+
+ def _list_tokens_for_trust(self, trust_id):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.trust_id == trust_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ tokens.append(token_ref_dict['id'])
+ return tokens
+
+ def _list_tokens_for_user(self, user_id, tenant_id=None):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.user_id == user_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ if self._tenant_matches(tenant_id, token_ref_dict):
+ tokens.append(token_ref['id'])
+ return tokens
+
+ def _list_tokens_for_consumer(self, user_id, consumer_id):
+ tokens = []
+ session = sql.get_session()
+ with session.begin():
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.user_id == user_id)
+ token_references = query.filter_by(valid=True)
+
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ if self._consumer_matches(consumer_id, token_ref_dict):
+ tokens.append(token_ref_dict['id'])
+ return tokens
+
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ if not CONF.token.revoke_by_id:
+ return []
+ if trust_id:
+ return self._list_tokens_for_trust(trust_id)
+ if consumer_id:
+ return self._list_tokens_for_consumer(user_id, consumer_id)
+ else:
+ return self._list_tokens_for_user(user_id, tenant_id)
+
+ def list_revoked_tokens(self):
+ session = sql.get_session()
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel.id, TokenModel.expires)
+ query = query.filter(TokenModel.expires > now)
+ token_references = query.filter_by(valid=False)
+ for token_ref in token_references:
+ record = {
+ 'id': token_ref[0],
+ 'expires': token_ref[1],
+ }
+ tokens.append(record)
+ return tokens
+
+ def _expiry_range_strategy(self, dialect):
+ """Choose a token range expiration strategy
+
+ Based on the DB dialect, select an expiry range callable that is
+ appropriate.
+ """
+
+ # DB2 and MySQL can both benefit from a batched strategy. On DB2 the
+ # transaction log can fill up and on MySQL w/Galera, large
+ # transactions can exceed the maximum write set size.
+ if dialect == 'ibm_db_sa':
+ # Limit of 100 is known to not fill a transaction log
+ # of default maximum size while not significantly
+ # impacting the performance of large token purges on
+ # systems where the maximum transaction log size has
+ # been increased beyond the default.
+ return functools.partial(_expiry_range_batched,
+ batch_size=100)
+ elif dialect == 'mysql':
+ # We want somewhat more than 100, since Galera replication delay is
+ # at least RTT*2. This can be a significant amount of time if
+ # doing replication across a WAN.
+ return functools.partial(_expiry_range_batched,
+ batch_size=1000)
+ return _expiry_range_all
+
+ def flush_expired_tokens(self):
+ session = sql.get_session()
+ dialect = session.bind.dialect.name
+ expiry_range_func = self._expiry_range_strategy(dialect)
+ query = session.query(TokenModel.expires)
+ total_removed = 0
+ upper_bound_func = timeutils.utcnow
+ for expiry_time in expiry_range_func(session, upper_bound_func):
+ delete_query = query.filter(TokenModel.expires <=
+ expiry_time)
+ row_count = delete_query.delete(synchronize_session=False)
+ total_removed += row_count
+ LOG.debug('Removed %d total expired tokens', total_removed)
+
+ session.flush()
+ LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
diff --git a/keystone-moon/keystone/token/persistence/core.py b/keystone-moon/keystone/token/persistence/core.py
new file mode 100644
index 00000000..19f0df35
--- /dev/null
+++ b/keystone-moon/keystone/token/persistence/core.py
@@ -0,0 +1,361 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Token persistence service."""
+
+import abc
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='token')
+REVOCATION_MEMOIZE = cache.get_memoization_decorator(
+ section='token', expiration_section='revoke')
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api',
+ 'token_provider_api', 'trust_api')
+class PersistenceManager(manager.Manager):
+ """Default pivot point for the Token backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ def __init__(self):
+ super(PersistenceManager, self).__init__(CONF.token.driver)
+
+ def _assert_valid(self, token_id, token_ref):
+ """Raise TokenNotFound if the token is expired."""
+ current_time = timeutils.normalize_time(timeutils.utcnow())
+ expires = token_ref.get('expires')
+ if not expires or current_time > timeutils.normalize_time(expires):
+ raise exception.TokenNotFound(token_id=token_id)
+
+ def get_token(self, token_id):
+ if not token_id:
+ # NOTE(morganfainberg): There are cases when the
+ # context['token_id'] will in-fact be None. This also saves
+ # a round-trip to the backend if we don't have a token_id.
+ raise exception.TokenNotFound(token_id='')
+ unique_id = self.token_provider_api.unique_id(token_id)
+ token_ref = self._get_token(unique_id)
+ # NOTE(morganfainberg): Lift expired checking to the manager, there is
+ # no reason to make the drivers implement this check. With caching,
+ # self._get_token could return an expired token. Make sure we behave
+ # as expected and raise TokenNotFound on those instances.
+ self._assert_valid(token_id, token_ref)
+ return token_ref
+
+ @MEMOIZE
+ def _get_token(self, token_id):
+ # Only ever use the "unique" id in the cache key.
+ return self.driver.get_token(token_id)
+
+ def create_token(self, token_id, data):
+ unique_id = self.token_provider_api.unique_id(token_id)
+ data_copy = copy.deepcopy(data)
+ data_copy['id'] = unique_id
+ ret = self.driver.create_token(unique_id, data_copy)
+ if MEMOIZE.should_cache(ret):
+ # NOTE(morganfainberg): when doing a cache set, you must pass the
+ # same arguments through, the same as invalidate (this includes
+ # "self"). First argument is always the value to be cached
+ self._get_token.set(ret, self, unique_id)
+ return ret
+
+ def delete_token(self, token_id):
+ if not CONF.token.revoke_by_id:
+ return
+ unique_id = self.token_provider_api.unique_id(token_id)
+ self.driver.delete_token(unique_id)
+ self._invalidate_individual_token_cache(unique_id)
+ self.invalidate_revocation_list()
+
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ if not CONF.token.revoke_by_id:
+ return
+ token_list = self.driver._list_tokens(user_id, tenant_id, trust_id,
+ consumer_id)
+ self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id)
+ for token_id in token_list:
+ unique_id = self.token_provider_api.unique_id(token_id)
+ self._invalidate_individual_token_cache(unique_id)
+ self.invalidate_revocation_list()
+
+ @REVOCATION_MEMOIZE
+ def list_revoked_tokens(self):
+ return self.driver.list_revoked_tokens()
+
+ def invalidate_revocation_list(self):
+ # NOTE(morganfainberg): Note that ``self`` needs to be passed to
+ # invalidate() because of the way the invalidation method works on
+ # determining cache-keys.
+ self.list_revoked_tokens.invalidate(self)
+
+ def delete_tokens_for_domain(self, domain_id):
+ """Delete all tokens for a given domain.
+
+ It will delete all the project-scoped tokens for the projects
+ that are owned by the given domain, as well as any tokens issued
+ to users that are owned by this domain.
+
+ However, deletion of domain_scoped tokens will still need to be
+ implemented as stated in TODO below.
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ projects = self.resource_api.list_projects()
+ for project in projects:
+ if project['domain_id'] == domain_id:
+ for user_id in self.assignment_api.list_user_ids_for_project(
+ project['id']):
+ self.delete_tokens_for_user(user_id, project['id'])
+ # TODO(morganfainberg): implement deletion of domain_scoped tokens.
+
+ users = self.identity_api.list_users(domain_id)
+ user_ids = (user['id'] for user in users)
+ self.delete_tokens_for_users(user_ids)
+
+ def delete_tokens_for_user(self, user_id, project_id=None):
+ """Delete all tokens for a given user or user-project combination.
+
+ This method adds in the extra logic for handling trust-scoped token
+ revocations in a single call instead of needing to explicitly handle
+ trusts in the caller's logic.
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ self.delete_tokens(user_id, tenant_id=project_id)
+ for trust in self.trust_api.list_trusts_for_trustee(user_id):
+ # Ensure we revoke tokens associated to the trust / project
+ # user_id combination.
+ self.delete_tokens(user_id, trust_id=trust['id'],
+ tenant_id=project_id)
+ for trust in self.trust_api.list_trusts_for_trustor(user_id):
+ # Ensure we revoke tokens associated to the trust / project /
+ # user_id combination where the user_id is the trustor.
+
+ # NOTE(morganfainberg): This revocation is a bit coarse, but it
+ # covers a number of cases such as disabling of the trustor user,
+ # deletion of the trustor user (for any number of reasons). It
+ # might make sense to refine this and be more surgical on the
+ # deletions (e.g. don't revoke tokens for the trusts when the
+ # trustor changes password). For now, to maintain previous
+ # functionality, this will continue to be a bit overzealous on
+ # revocations.
+ self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
+ tenant_id=project_id)
+
+ def delete_tokens_for_users(self, user_ids, project_id=None):
+ """Delete all tokens for a list of user_ids.
+
+ :param user_ids: list of user identifiers
+ :param project_id: optional project identifier
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ for user_id in user_ids:
+ self.delete_tokens_for_user(user_id, project_id=project_id)
+
+ def _invalidate_individual_token_cache(self, token_id):
+ # NOTE(morganfainberg): invalidate takes the exact same arguments as
+ # the normal method, this means we need to pass "self" in (which gets
+ # stripped off).
+
+ # FIXME(morganfainberg): Does this cache actually need to be
+ # invalidated? We maintain a cached revocation list, which should be
+ # consulted before accepting a token as valid. For now we will
+ # do the explicit individual token invalidation.
+ self._get_token.invalidate(self, token_id)
+ self.token_provider_api.invalidate_individual_token_cache(token_id)
+
+
+# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
+# class-level optional dependency control attribute is populated as empty
+# this is because of the override of .__getattr__ and ensures that if the
+# optional dependency injector changes attributes, this class doesn't break.
+@dependency.optional()
+@dependency.requires('token_provider_api')
+@dependency.provider('token_api')
+class Manager(object):
+ """The token_api provider.
+
+ This class is a proxy class to the token_provider_api's persistence
+ manager.
+ """
+ def __init__(self):
+ # NOTE(morganfainberg): __init__ is required for dependency processing.
+ super(Manager, self).__init__()
+
+ def __getattr__(self, item):
+ """Forward calls to the `token_provider_api` persistence manager."""
+
+ # NOTE(morganfainberg): Prevent infinite recursion, raise an
+ # AttributeError for 'token_provider_api' ensuring that the dep
+ # injection doesn't infinitely try and lookup self.token_provider_api
+ # on _process_dependencies. This doesn't need an exception string as
+ # it should only ever be hit on instantiation.
+ if item == 'token_provider_api':
+ raise AttributeError()
+
+ f = getattr(self.token_provider_api._persistence, item)
+ LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
+ 'utilizing methods on `token_provider_api` and may be '
+ 'removed in Kilo.'), item)
+ setattr(self, item, f)
+ return f
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+ """Interface description for a Token driver."""
+
+ @abc.abstractmethod
+ def get_token(self, token_id):
+ """Get a token by id.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :returns: token_ref
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_token(self, token_id, data):
+ """Create a token by id and data.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :param data: dictionary with additional reference information
+
+ ::
+
+ {
+ expires=''
+ id=token_id,
+ user=user_ref,
+ tenant=tenant_ref,
+ metadata=metadata_ref
+ }
+
+ :type data: dict
+ :returns: token_ref or None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_token(self, token_id):
+ """Deletes a token by id.
+
+ :param token_id: identity of the token
+ :type token_id: string
+ :returns: None.
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Deletes tokens by user.
+
+ If the tenant_id is not None, only delete the tokens by user id under
+ the specified tenant.
+
+ If the trust_id is not None, it will be used to query tokens and the
+ user_id will be ignored.
+
+ If the consumer_id is not None, only delete the tokens by consumer id
+ that match the specified consumer id.
+
+ :param user_id: identity of user
+ :type user_id: string
+ :param tenant_id: identity of the tenant
+ :type tenant_id: string
+ :param trust_id: identity of the trust
+ :type trust_id: string
+ :param consumer_id: identity of the consumer
+ :type consumer_id: string
+ :returns: None.
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ if not CONF.token.revoke_by_id:
+ return
+ token_list = self._list_tokens(user_id,
+ tenant_id=tenant_id,
+ trust_id=trust_id,
+ consumer_id=consumer_id)
+
+ for token in token_list:
+ try:
+ self.delete_token(token)
+ except exception.NotFound:
+ pass
+
+ @abc.abstractmethod
+ def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+ consumer_id=None):
+ """Returns a list of current token_id's for a user
+
+ This is effectively a private method only used by the ``delete_tokens``
+ method and should not be called by anything outside of the
+ ``token_api`` manager or the token driver itself.
+
+ :param user_id: identity of the user
+ :type user_id: string
+ :param tenant_id: identity of the tenant
+ :type tenant_id: string
+ :param trust_id: identity of the trust
+ :type trust_id: string
+ :param consumer_id: identity of the consumer
+ :type consumer_id: string
+ :returns: list of token_id's
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_revoked_tokens(self):
+ """Returns a list of all revoked tokens
+
+ :returns: list of token_id's
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def flush_expired_tokens(self):
+ """Archive or delete tokens that have expired.
+ """
+ raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/token/provider.py b/keystone-moon/keystone/token/provider.py
new file mode 100644
index 00000000..fb41d4bb
--- /dev/null
+++ b/keystone-moon/keystone/token/provider.py
@@ -0,0 +1,584 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Token provider interface."""
+
+import abc
+import base64
+import datetime
+import sys
+import uuid
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.models import token_model
+from keystone import notifications
+from keystone.token import persistence
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='token')
+
+# NOTE(morganfainberg): This is for compatibility in case someone was relying
+# on the old location of the UnsupportedTokenVersionException for their code.
+UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException
+
+# supported token versions
+V2 = token_model.V2
+V3 = token_model.V3
+VERSIONS = token_model.VERSIONS
+
+
+def base64_encode(s):
+ """Encode a URL-safe string."""
+ return base64.urlsafe_b64encode(s).rstrip('=')
+
+
+def random_urlsafe_str():
+ """Generate a random URL-safe string."""
+ # chop the padding (==) off the end of the encoding to save space
+ return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+
+
+def random_urlsafe_str_to_bytes(s):
+ """Convert a string generated by ``random_urlsafe_str()`` to bytes."""
+ # restore the padding (==) at the end of the string
+ return base64.urlsafe_b64decode(s + '==')
+
+
+def default_expire_time():
+ """Determine when a fresh token should expire.
+
+ Expiration time varies based on configuration (see ``[token] expiration``).
+
+ :returns: a naive UTC datetime.datetime object
+
+ """
+ expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
+ return timeutils.utcnow() + expire_delta
+
+
+def audit_info(parent_audit_id):
+ """Build the audit data for a token.
+
+ If ``parent_audit_id`` is None, the list will be one element in length
+ containing a newly generated audit_id.
+
+ If ``parent_audit_id`` is supplied, the list will be two elements in length
+ containing a newly generated audit_id and the ``parent_audit_id``. The
+ ``parent_audit_id`` will always be element index 1 in the resulting
+ list.
+
+ :param parent_audit_id: the audit of the original token in the chain
+ :type parent_audit_id: str
+ :returns: Keystone token audit data
+ """
+ audit_id = random_urlsafe_str()
+ if parent_audit_id is not None:
+ return [audit_id, parent_audit_id]
+ return [audit_id]
+
+
+@dependency.provider('token_provider_api')
+@dependency.requires('assignment_api', 'revoke_api')
+class Manager(manager.Manager):
+ """Default pivot point for the token provider backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ V2 = V2
+ V3 = V3
+ VERSIONS = VERSIONS
+ INVALIDATE_PROJECT_TOKEN_PERSISTENCE = 'invalidate_project_tokens'
+ INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
+ _persistence_manager = None
+
+ def __init__(self):
+ super(Manager, self).__init__(CONF.token.provider)
+ self._register_callback_listeners()
+
+ def _register_callback_listeners(self):
+ # This is used by the @dependency.provider decorator to register the
+ # provider (token_provider_api) manager to listen for trust deletions.
+ callbacks = {
+ notifications.ACTIONS.deleted: [
+ ['OS-TRUST:trust', self._trust_deleted_event_callback],
+ ['user', self._delete_user_tokens_callback],
+ ['domain', self._delete_domain_tokens_callback],
+ ],
+ notifications.ACTIONS.disabled: [
+ ['user', self._delete_user_tokens_callback],
+ ['domain', self._delete_domain_tokens_callback],
+ ['project', self._delete_project_tokens_callback],
+ ],
+ notifications.ACTIONS.internal: [
+ [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
+ self._delete_user_tokens_callback],
+ [notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
+ self._delete_user_project_tokens_callback],
+ [notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
+ self._delete_user_oauth_consumer_tokens_callback],
+ ]
+ }
+
+ for event, cb_info in six.iteritems(callbacks):
+ for resource_type, callback_fns in cb_info:
+ notifications.register_event_callback(event, resource_type,
+ callback_fns)
+
+ @property
+ def _needs_persistence(self):
+ return self.driver.needs_persistence()
+
+ @property
+ def _persistence(self):
+ # NOTE(morganfainberg): This should not be handled via __init__ to
+ # avoid dependency injection oddities circular dependencies (where
+ # the provider manager requires the token persistence manager, which
+ # requires the token provider manager).
+ if self._persistence_manager is None:
+ self._persistence_manager = persistence.PersistenceManager()
+ return self._persistence_manager
+
+ def unique_id(self, token_id):
+ """Return a unique ID for a token.
+
+ The returned value is useful as the primary key of a database table,
+ memcache store, or other lookup table.
+
+ :returns: Given a PKI token, returns it's hashed value. Otherwise,
+ returns the passed-in value (such as a UUID token ID or an
+ existing hash).
+ """
+ return cms.cms_hash_token(token_id, mode=CONF.token.hash_algorithm)
+
+ def _create_token(self, token_id, token_data):
+ try:
+ if isinstance(token_data['expires'], six.string_types):
+ token_data['expires'] = timeutils.normalize_time(
+ timeutils.parse_isotime(token_data['expires']))
+ self._persistence.create_token(token_id, token_data)
+ except Exception:
+ exc_info = sys.exc_info()
+ # an identical token may have been created already.
+ # if so, return the token_data as it is also identical
+ try:
+ self._persistence.get_token(token_id)
+ except exception.TokenNotFound:
+ six.reraise(*exc_info)
+
+ def validate_token(self, token_id, belongs_to=None):
+ unique_id = self.unique_id(token_id)
+ # NOTE(morganfainberg): Ensure we never use the long-form token_id
+ # (PKI) as part of the cache_key.
+ token = self._validate_token(unique_id)
+ self._token_belongs_to(token, belongs_to)
+ self._is_valid_token(token)
+ return token
+
+ def check_revocation_v2(self, token):
+ try:
+ token_data = token['access']
+ except KeyError:
+ raise exception.TokenNotFound(_('Failed to validate token'))
+
+ token_values = self.revoke_api.model.build_token_values_v2(
+ token_data, CONF.identity.default_domain_id)
+ self.revoke_api.check_token(token_values)
+
+ def validate_v2_token(self, token_id, belongs_to=None):
+ unique_id = self.unique_id(token_id)
+ if self._needs_persistence:
+ # NOTE(morganfainberg): Ensure we never use the long-form token_id
+ # (PKI) as part of the cache_key.
+ token_ref = self._persistence.get_token(unique_id)
+ else:
+ token_ref = token_id
+ token = self._validate_v2_token(token_ref)
+ self._token_belongs_to(token, belongs_to)
+ self._is_valid_token(token)
+ return token
+
+ def check_revocation_v3(self, token):
+ try:
+ token_data = token['token']
+ except KeyError:
+ raise exception.TokenNotFound(_('Failed to validate token'))
+ token_values = self.revoke_api.model.build_token_values(token_data)
+ self.revoke_api.check_token(token_values)
+
+ def check_revocation(self, token):
+ version = self.driver.get_token_version(token)
+ if version == V2:
+ return self.check_revocation_v2(token)
+ else:
+ return self.check_revocation_v3(token)
+
+ def validate_v3_token(self, token_id):
+ unique_id = self.unique_id(token_id)
+ # NOTE(lbragstad): Only go to persistent storage if we have a token to
+ # fetch from the backend. If the Fernet token provider is being used
+ # this step isn't necessary. The Fernet token reference is persisted in
+ # the token_id, so in this case set the token_ref as the identifier of
+ # the token.
+ if not self._needs_persistence:
+ token_ref = token_id
+ else:
+ # NOTE(morganfainberg): Ensure we never use the long-form token_id
+ # (PKI) as part of the cache_key.
+ token_ref = self._persistence.get_token(unique_id)
+ token = self._validate_v3_token(token_ref)
+ self._is_valid_token(token)
+ return token
+
+ @MEMOIZE
+ def _validate_token(self, token_id):
+ if not self._needs_persistence:
+ return self.driver.validate_v3_token(token_id)
+ token_ref = self._persistence.get_token(token_id)
+ version = self.driver.get_token_version(token_ref)
+ if version == self.V3:
+ return self.driver.validate_v3_token(token_ref)
+ elif version == self.V2:
+ return self.driver.validate_v2_token(token_ref)
+ raise exception.UnsupportedTokenVersionException()
+
+ @MEMOIZE
+ def _validate_v2_token(self, token_id):
+ return self.driver.validate_v2_token(token_id)
+
+ @MEMOIZE
+ def _validate_v3_token(self, token_id):
+ return self.driver.validate_v3_token(token_id)
+
+ def _is_valid_token(self, token):
+ """Verify the token is valid format and has not expired."""
+
+ current_time = timeutils.normalize_time(timeutils.utcnow())
+
+ try:
+ # Get the data we need from the correct location (V2 and V3 tokens
+ # differ in structure, Try V3 first, fall back to V2 second)
+ token_data = token.get('token', token.get('access'))
+ expires_at = token_data.get('expires_at',
+ token_data.get('expires'))
+ if not expires_at:
+ expires_at = token_data['token']['expires']
+ expiry = timeutils.normalize_time(
+ timeutils.parse_isotime(expires_at))
+ except Exception:
+ LOG.exception(_LE('Unexpected error or malformed token '
+ 'determining token expiry: %s'), token)
+ raise exception.TokenNotFound(_('Failed to validate token'))
+
+ if current_time < expiry:
+ self.check_revocation(token)
+ # Token has not expired and has not been revoked.
+ return None
+ else:
+ raise exception.TokenNotFound(_('Failed to validate token'))
+
+ def _token_belongs_to(self, token, belongs_to):
+ """Check if the token belongs to the right tenant.
+
+ This is only used on v2 tokens. The structural validity of the token
+ will have already been checked before this method is called.
+
+ """
+ if belongs_to:
+ token_data = token['access']['token']
+ if ('tenant' not in token_data or
+ token_data['tenant']['id'] != belongs_to):
+ raise exception.Unauthorized()
+
+ def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+ token_id, token_data = self.driver.issue_v2_token(
+ token_ref, roles_ref, catalog_ref)
+
+ if self._needs_persistence:
+ data = dict(key=token_id,
+ id=token_id,
+ expires=token_data['access']['token']['expires'],
+ user=token_ref['user'],
+ tenant=token_ref['tenant'],
+ metadata=token_ref['metadata'],
+ token_data=token_data,
+ bind=token_ref.get('bind'),
+ trust_id=token_ref['metadata'].get('trust_id'),
+ token_version=self.V2)
+ self._create_token(token_id, data)
+
+ return token_id, token_data
+
+ def issue_v3_token(self, user_id, method_names, expires_at=None,
+ project_id=None, domain_id=None, auth_context=None,
+ trust=None, metadata_ref=None, include_catalog=True,
+ parent_audit_id=None):
+ token_id, token_data = self.driver.issue_v3_token(
+ user_id, method_names, expires_at, project_id, domain_id,
+ auth_context, trust, metadata_ref, include_catalog,
+ parent_audit_id)
+
+ if metadata_ref is None:
+ metadata_ref = {}
+
+ if 'project' in token_data['token']:
+ # project-scoped token, fill in the v2 token data
+ # all we care are the role IDs
+
+ # FIXME(gyee): is there really a need to store roles in metadata?
+ role_ids = [r['id'] for r in token_data['token']['roles']]
+ metadata_ref = {'roles': role_ids}
+
+ if trust:
+ metadata_ref.setdefault('trust_id', trust['id'])
+ metadata_ref.setdefault('trustee_user_id',
+ trust['trustee_user_id'])
+
+ data = dict(key=token_id,
+ id=token_id,
+ expires=token_data['token']['expires_at'],
+ user=token_data['token']['user'],
+ tenant=token_data['token'].get('project'),
+ metadata=metadata_ref,
+ token_data=token_data,
+ trust_id=trust['id'] if trust else None,
+ token_version=self.V3)
+ if self._needs_persistence:
+ self._create_token(token_id, data)
+ return token_id, token_data
+
+ def invalidate_individual_token_cache(self, token_id):
+ # NOTE(morganfainberg): invalidate takes the exact same arguments as
+ # the normal method, this means we need to pass "self" in (which gets
+ # stripped off).
+
+ # FIXME(morganfainberg): Does this cache actually need to be
+ # invalidated? We maintain a cached revocation list, which should be
+ # consulted before accepting a token as valid. For now we will
+ # do the explicit individual token invalidation.
+
+ self._validate_token.invalidate(self, token_id)
+ self._validate_v2_token.invalidate(self, token_id)
+ self._validate_v3_token.invalidate(self, token_id)
+
+ def revoke_token(self, token_id, revoke_chain=False):
+ revoke_by_expires = False
+ project_id = None
+ domain_id = None
+
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id,
+ token_data=self.validate_token(token_id))
+
+ user_id = token_ref.user_id
+ expires_at = token_ref.expires
+ audit_id = token_ref.audit_id
+ audit_chain_id = token_ref.audit_chain_id
+ if token_ref.project_scoped:
+ project_id = token_ref.project_id
+ if token_ref.domain_scoped:
+ domain_id = token_ref.domain_id
+
+ if audit_id is None and not revoke_chain:
+ LOG.debug('Received token with no audit_id.')
+ revoke_by_expires = True
+
+ if audit_chain_id is None and revoke_chain:
+ LOG.debug('Received token with no audit_chain_id.')
+ revoke_by_expires = True
+
+ if revoke_by_expires:
+ self.revoke_api.revoke_by_expiration(user_id, expires_at,
+ project_id=project_id,
+ domain_id=domain_id)
+ elif revoke_chain:
+ self.revoke_api.revoke_by_audit_chain_id(audit_chain_id,
+ project_id=project_id,
+ domain_id=domain_id)
+ else:
+ self.revoke_api.revoke_by_audit_id(audit_id)
+
+ if CONF.token.revoke_by_id and self._needs_persistence:
+ self._persistence.delete_token(token_id=token_id)
+
+ def list_revoked_tokens(self):
+ return self._persistence.list_revoked_tokens()
+
+ def _trust_deleted_event_callback(self, service, resource_type, operation,
+ payload):
+ if CONF.token.revoke_by_id:
+ trust_id = payload['resource_info']
+ trust = self.trust_api.get_trust(trust_id, deleted=True)
+ self._persistence.delete_tokens(user_id=trust['trustor_user_id'],
+ trust_id=trust_id)
+
+ def _delete_user_tokens_callback(self, service, resource_type, operation,
+ payload):
+ if CONF.token.revoke_by_id:
+ user_id = payload['resource_info']
+ self._persistence.delete_tokens_for_user(user_id)
+
+ def _delete_domain_tokens_callback(self, service, resource_type,
+ operation, payload):
+ if CONF.token.revoke_by_id:
+ domain_id = payload['resource_info']
+ self._persistence.delete_tokens_for_domain(domain_id=domain_id)
+
+ def _delete_user_project_tokens_callback(self, service, resource_type,
+ operation, payload):
+ if CONF.token.revoke_by_id:
+ user_id = payload['resource_info']['user_id']
+ project_id = payload['resource_info']['project_id']
+ self._persistence.delete_tokens_for_user(user_id=user_id,
+ project_id=project_id)
+
+ def _delete_project_tokens_callback(self, service, resource_type,
+ operation, payload):
+ if CONF.token.revoke_by_id:
+ project_id = payload['resource_info']
+ self._persistence.delete_tokens_for_users(
+ self.assignment_api.list_user_ids_for_project(project_id),
+ project_id=project_id)
+
+ def _delete_user_oauth_consumer_tokens_callback(self, service,
+ resource_type, operation,
+ payload):
+ if CONF.token.revoke_by_id:
+ user_id = payload['resource_info']['user_id']
+ consumer_id = payload['resource_info']['consumer_id']
+ self._persistence.delete_tokens(user_id=user_id,
+ consumer_id=consumer_id)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Provider(object):
+ """Interface description for a Token provider."""
+
+ @abc.abstractmethod
+ def needs_persistence(self):
+ """Determine if the token should be persisted.
+
+ If the token provider requires that the token be persisted to a
+ backend this should return True, otherwise return False.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_token_version(self, token_data):
+ """Return the version of the given token data.
+
+ If the given token data is unrecognizable,
+ UnsupportedTokenVersionException is raised.
+
+ :param token_data: token_data
+ :type token_data: dict
+ :returns: token version string
+ :raises: keystone.token.provider.UnsupportedTokenVersionException
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+ """Issue a V2 token.
+
+ :param token_ref: token data to generate token from
+ :type token_ref: dict
+ :param roles_ref: optional roles list
+ :type roles_ref: dict
+ :param catalog_ref: optional catalog information
+ :type catalog_ref: dict
+ :returns: (token_id, token_data)
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def issue_v3_token(self, user_id, method_names, expires_at=None,
+ project_id=None, domain_id=None, auth_context=None,
+ trust=None, metadata_ref=None, include_catalog=True,
+ parent_audit_id=None):
+ """Issue a V3 Token.
+
+ :param user_id: identity of the user
+ :type user_id: string
+ :param method_names: names of authentication methods
+ :type method_names: list
+ :param expires_at: optional time the token will expire
+ :type expires_at: string
+ :param project_id: optional project identity
+ :type project_id: string
+ :param domain_id: optional domain identity
+ :type domain_id: string
+ :param auth_context: optional context from the authorization plugins
+ :type auth_context: dict
+ :param trust: optional trust reference
+ :type trust: dict
+ :param metadata_ref: optional metadata reference
+ :type metadata_ref: dict
+ :param include_catalog: optional, include the catalog in token data
+ :type include_catalog: boolean
+ :param parent_audit_id: optional, the audit id of the parent token
+ :type parent_audit_id: string
+ :returns: (token_id, token_data)
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def validate_v2_token(self, token_ref):
+ """Validate the given V2 token and return the token data.
+
+ Must raise Unauthorized exception if unable to validate token.
+
+ :param token_ref: the token reference
+ :type token_ref: dict
+ :returns: token data
+ :raises: keystone.exception.TokenNotFound
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def validate_v3_token(self, token_ref):
+ """Validate the given V3 token and return the token_data.
+
+ :param token_ref: the token reference
+ :type token_ref: dict
+ :returns: token data
+ :raises: keystone.exception.TokenNotFound
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def _get_token_id(self, token_data):
+ """Generate the token_id based upon the data in token_data.
+
+ :param token_data: token information
+ :type token_data: dict
+ returns: token identifier
+ """
+ raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/token/providers/__init__.py b/keystone-moon/keystone/token/providers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/__init__.py
diff --git a/keystone-moon/keystone/token/providers/common.py b/keystone-moon/keystone/token/providers/common.py
new file mode 100644
index 00000000..717e1495
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/common.py
@@ -0,0 +1,709 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+from six.moves.urllib import parse
+
+from keystone.common import controller as common_controller
+from keystone.common import dependency
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.openstack.common import versionutils
+from keystone import token
+from keystone.token import provider
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+@dependency.requires('catalog_api', 'resource_api')
+class V2TokenDataHelper(object):
+ """Creates V2 token data."""
+
+ def v3_to_v2_token(self, token_id, v3_token_data):
+ token_data = {}
+ # Build v2 token
+ v3_token = v3_token_data['token']
+
+ token = {}
+ token['id'] = token_id
+ token['expires'] = v3_token.get('expires_at')
+ token['issued_at'] = v3_token.get('issued_at')
+ token['audit_ids'] = v3_token.get('audit_ids')
+
+ if 'project' in v3_token:
+ # v3 token_data does not contain all tenant attributes
+ tenant = self.resource_api.get_project(
+ v3_token['project']['id'])
+ token['tenant'] = common_controller.V2Controller.filter_domain_id(
+ tenant)
+ token_data['token'] = token
+
+ # Build v2 user
+ v3_user = v3_token['user']
+ user = common_controller.V2Controller.v3_to_v2_user(v3_user)
+
+ # Set user roles
+ user['roles'] = []
+ role_ids = []
+ for role in v3_token.get('roles', []):
+ # Filter role id since it's not included in v2 token response
+ role_ids.append(role.pop('id'))
+ user['roles'].append(role)
+ user['roles_links'] = []
+
+ token_data['user'] = user
+
+ # Get and build v2 service catalog
+ token_data['serviceCatalog'] = []
+ if 'tenant' in token:
+ catalog_ref = self.catalog_api.get_catalog(
+ user['id'], token['tenant']['id'])
+ if catalog_ref:
+ token_data['serviceCatalog'] = self.format_catalog(catalog_ref)
+
+ # Build v2 metadata
+ metadata = {}
+ metadata['roles'] = role_ids
+ # Setting is_admin to keep consistency in v2 response
+ metadata['is_admin'] = 0
+ token_data['metadata'] = metadata
+
+ return {'access': token_data}
+
+ @classmethod
+ def format_token(cls, token_ref, roles_ref=None, catalog_ref=None,
+ trust_ref=None):
+ audit_info = None
+ user_ref = token_ref['user']
+ metadata_ref = token_ref['metadata']
+ if roles_ref is None:
+ roles_ref = []
+ expires = token_ref.get('expires', provider.default_expire_time())
+ if expires is not None:
+ if not isinstance(expires, six.text_type):
+ expires = timeutils.isotime(expires)
+
+ token_data = token_ref.get('token_data')
+ if token_data:
+ token_audit = token_data.get(
+ 'access', token_data).get('token', {}).get('audit_ids')
+ audit_info = token_audit
+
+ if audit_info is None:
+ audit_info = provider.audit_info(token_ref.get('parent_audit_id'))
+
+ o = {'access': {'token': {'id': token_ref['id'],
+ 'expires': expires,
+ 'issued_at': timeutils.strtime(),
+ 'audit_ids': audit_info
+ },
+ 'user': {'id': user_ref['id'],
+ 'name': user_ref['name'],
+ 'username': user_ref['name'],
+ 'roles': roles_ref,
+ 'roles_links': metadata_ref.get('roles_links',
+ [])
+ }
+ }
+ }
+ if 'bind' in token_ref:
+ o['access']['token']['bind'] = token_ref['bind']
+ if 'tenant' in token_ref and token_ref['tenant']:
+ token_ref['tenant']['enabled'] = True
+ o['access']['token']['tenant'] = token_ref['tenant']
+ if catalog_ref is not None:
+ o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
+ catalog_ref)
+ if metadata_ref:
+ if 'is_admin' in metadata_ref:
+ o['access']['metadata'] = {'is_admin':
+ metadata_ref['is_admin']}
+ else:
+ o['access']['metadata'] = {'is_admin': 0}
+ if 'roles' in metadata_ref:
+ o['access']['metadata']['roles'] = metadata_ref['roles']
+ if CONF.trust.enabled and trust_ref:
+ o['access']['trust'] = {'trustee_user_id':
+ trust_ref['trustee_user_id'],
+ 'id': trust_ref['id'],
+ 'trustor_user_id':
+ trust_ref['trustor_user_id'],
+ 'impersonation':
+ trust_ref['impersonation']
+ }
+ return o
+
+ @classmethod
+ def format_catalog(cls, catalog_ref):
+ """Munge catalogs from internal to output format
+ Internal catalogs look like::
+
+ {$REGION: {
+ {$SERVICE: {
+ $key1: $value1,
+ ...
+ }
+ }
+ }
+
+ The legacy api wants them to look like::
+
+ [{'name': $SERVICE[name],
+ 'type': $SERVICE,
+ 'endpoints': [{
+ 'tenantId': $tenant_id,
+ ...
+ 'region': $REGION,
+ }],
+ 'endpoints_links': [],
+ }]
+
+ """
+ if not catalog_ref:
+ return []
+
+ services = {}
+ for region, region_ref in six.iteritems(catalog_ref):
+ for service, service_ref in six.iteritems(region_ref):
+ new_service_ref = services.get(service, {})
+ new_service_ref['name'] = service_ref.pop('name')
+ new_service_ref['type'] = service
+ new_service_ref['endpoints_links'] = []
+ service_ref['region'] = region
+
+ endpoints_ref = new_service_ref.get('endpoints', [])
+ endpoints_ref.append(service_ref)
+
+ new_service_ref['endpoints'] = endpoints_ref
+ services[service] = new_service_ref
+
+ return services.values()
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
+ 'identity_api', 'resource_api', 'role_api', 'trust_api')
+class V3TokenDataHelper(object):
+ """Token data helper."""
+ def __init__(self):
+ # Keep __init__ around to ensure dependency injection works.
+ super(V3TokenDataHelper, self).__init__()
+
+ def _get_filtered_domain(self, domain_id):
+ domain_ref = self.resource_api.get_domain(domain_id)
+ return {'id': domain_ref['id'], 'name': domain_ref['name']}
+
+ def _get_filtered_project(self, project_id):
+ project_ref = self.resource_api.get_project(project_id)
+ filtered_project = {
+ 'id': project_ref['id'],
+ 'name': project_ref['name']}
+ filtered_project['domain'] = self._get_filtered_domain(
+ project_ref['domain_id'])
+ return filtered_project
+
+ def _populate_scope(self, token_data, domain_id, project_id):
+ if 'domain' in token_data or 'project' in token_data:
+ # scope already exist, no need to populate it again
+ return
+
+ if domain_id:
+ token_data['domain'] = self._get_filtered_domain(domain_id)
+ if project_id:
+ token_data['project'] = self._get_filtered_project(project_id)
+
+ def _get_roles_for_user(self, user_id, domain_id, project_id):
+ roles = []
+ if domain_id:
+ roles = self.assignment_api.get_roles_for_user_and_domain(
+ user_id, domain_id)
+ if project_id:
+ roles = self.assignment_api.get_roles_for_user_and_project(
+ user_id, project_id)
+ return [self.role_api.get_role(role_id) for role_id in roles]
+
+ def _populate_roles_for_groups(self, group_ids,
+ project_id=None, domain_id=None,
+ user_id=None):
+ def _check_roles(roles, user_id, project_id, domain_id):
+ # User was granted roles so simply exit this function.
+ if roles:
+ return
+ if project_id:
+ msg = _('User %(user_id)s has no access '
+ 'to project %(project_id)s') % {
+ 'user_id': user_id,
+ 'project_id': project_id}
+ elif domain_id:
+ msg = _('User %(user_id)s has no access '
+ 'to domain %(domain_id)s') % {
+ 'user_id': user_id,
+ 'domain_id': domain_id}
+ # Since no roles were found a user is not authorized to
+ # perform any operations. Raise an exception with
+ # appropriate error message.
+ raise exception.Unauthorized(msg)
+
+ roles = self.assignment_api.get_roles_for_groups(group_ids,
+ project_id,
+ domain_id)
+ _check_roles(roles, user_id, project_id, domain_id)
+ return roles
+
+ def _populate_user(self, token_data, user_id, trust):
+ if 'user' in token_data:
+ # no need to repopulate user if it already exists
+ return
+
+ user_ref = self.identity_api.get_user(user_id)
+ if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
+ trustor_user_ref = (self.identity_api.get_user(
+ trust['trustor_user_id']))
+ try:
+ self.identity_api.assert_user_enabled(trust['trustor_user_id'])
+ except AssertionError:
+ raise exception.Forbidden(_('Trustor is disabled.'))
+ if trust['impersonation']:
+ user_ref = trustor_user_ref
+ token_data['OS-TRUST:trust'] = (
+ {
+ 'id': trust['id'],
+ 'trustor_user': {'id': trust['trustor_user_id']},
+ 'trustee_user': {'id': trust['trustee_user_id']},
+ 'impersonation': trust['impersonation']
+ })
+ filtered_user = {
+ 'id': user_ref['id'],
+ 'name': user_ref['name'],
+ 'domain': self._get_filtered_domain(user_ref['domain_id'])}
+ token_data['user'] = filtered_user
+
+ def _populate_oauth_section(self, token_data, access_token):
+ if access_token:
+ access_token_id = access_token['id']
+ consumer_id = access_token['consumer_id']
+ token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
+ 'consumer_id': consumer_id})
+
+ def _populate_roles(self, token_data, user_id, domain_id, project_id,
+ trust, access_token):
+ if 'roles' in token_data:
+ # no need to repopulate roles
+ return
+
+ if access_token:
+ filtered_roles = []
+ authed_role_ids = jsonutils.loads(access_token['role_ids'])
+ all_roles = self.role_api.list_roles()
+ for role in all_roles:
+ for authed_role in authed_role_ids:
+ if authed_role == role['id']:
+ filtered_roles.append({'id': role['id'],
+ 'name': role['name']})
+ token_data['roles'] = filtered_roles
+ return
+
+ if CONF.trust.enabled and trust:
+ token_user_id = trust['trustor_user_id']
+ token_project_id = trust['project_id']
+ # trusts do not support domains yet
+ token_domain_id = None
+ else:
+ token_user_id = user_id
+ token_project_id = project_id
+ token_domain_id = domain_id
+
+ if token_domain_id or token_project_id:
+ roles = self._get_roles_for_user(token_user_id,
+ token_domain_id,
+ token_project_id)
+ filtered_roles = []
+ if CONF.trust.enabled and trust:
+ for trust_role in trust['roles']:
+ match_roles = [x for x in roles
+ if x['id'] == trust_role['id']]
+ if match_roles:
+ filtered_roles.append(match_roles[0])
+ else:
+ raise exception.Forbidden(
+ _('Trustee has no delegated roles.'))
+ else:
+ for role in roles:
+ filtered_roles.append({'id': role['id'],
+ 'name': role['name']})
+
+ # user has no project or domain roles, therefore access denied
+ if not filtered_roles:
+ if token_project_id:
+ msg = _('User %(user_id)s has no access '
+ 'to project %(project_id)s') % {
+ 'user_id': user_id,
+ 'project_id': token_project_id}
+ else:
+ msg = _('User %(user_id)s has no access '
+ 'to domain %(domain_id)s') % {
+ 'user_id': user_id,
+ 'domain_id': token_domain_id}
+ LOG.debug(msg)
+ raise exception.Unauthorized(msg)
+
+ token_data['roles'] = filtered_roles
+
+ def _populate_service_catalog(self, token_data, user_id,
+ domain_id, project_id, trust):
+ if 'catalog' in token_data:
+ # no need to repopulate service catalog
+ return
+
+ if CONF.trust.enabled and trust:
+ user_id = trust['trustor_user_id']
+ if project_id or domain_id:
+ service_catalog = self.catalog_api.get_v3_catalog(
+ user_id, project_id)
+ # TODO(ayoung): Enforce Endpoints for trust
+ token_data['catalog'] = service_catalog
+
+ def _populate_service_providers(self, token_data):
+ if 'service_providers' in token_data:
+ return
+
+ service_providers = self.federation_api.get_enabled_service_providers()
+ if service_providers:
+ token_data['service_providers'] = service_providers
+
+ def _populate_token_dates(self, token_data, expires=None, trust=None,
+ issued_at=None):
+ if not expires:
+ expires = provider.default_expire_time()
+ if not isinstance(expires, six.string_types):
+ expires = timeutils.isotime(expires, subsecond=True)
+ token_data['expires_at'] = expires
+ token_data['issued_at'] = (issued_at or
+ timeutils.isotime(subsecond=True))
+
+ def _populate_audit_info(self, token_data, audit_info=None):
+ if audit_info is None or isinstance(audit_info, six.string_types):
+ token_data['audit_ids'] = provider.audit_info(audit_info)
+ elif isinstance(audit_info, list):
+ token_data['audit_ids'] = audit_info
+ else:
+ msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
+ {'data': audit_info, 'type': type(audit_info)})
+ LOG.error(msg)
+ raise exception.UnexpectedError(msg)
+
+ def get_token_data(self, user_id, method_names, extras=None,
+ domain_id=None, project_id=None, expires=None,
+ trust=None, token=None, include_catalog=True,
+ bind=None, access_token=None, issued_at=None,
+ audit_info=None):
+ if extras is None:
+ extras = {}
+ if extras:
+ versionutils.deprecated(
+ what='passing token data with "extras"',
+ as_of=versionutils.deprecated.KILO,
+ in_favor_of='well-defined APIs')
+ token_data = {'methods': method_names,
+ 'extras': extras}
+
+ # We've probably already written these to the token
+ if token:
+ for x in ('roles', 'user', 'catalog', 'project', 'domain'):
+ if x in token:
+ token_data[x] = token[x]
+
+ if CONF.trust.enabled and trust:
+ if user_id != trust['trustee_user_id']:
+ raise exception.Forbidden(_('User is not a trustee.'))
+
+ if bind:
+ token_data['bind'] = bind
+
+ self._populate_scope(token_data, domain_id, project_id)
+ self._populate_user(token_data, user_id, trust)
+ self._populate_roles(token_data, user_id, domain_id, project_id, trust,
+ access_token)
+ self._populate_audit_info(token_data, audit_info)
+
+ if include_catalog:
+ self._populate_service_catalog(token_data, user_id, domain_id,
+ project_id, trust)
+ self._populate_service_providers(token_data)
+ self._populate_token_dates(token_data, expires=expires, trust=trust,
+ issued_at=issued_at)
+ self._populate_oauth_section(token_data, access_token)
+ return {'token': token_data}
+
+
+@dependency.requires('catalog_api', 'identity_api', 'oauth_api',
+ 'resource_api', 'role_api', 'trust_api')
+class BaseProvider(provider.Provider):
+ def __init__(self, *args, **kwargs):
+ super(BaseProvider, self).__init__(*args, **kwargs)
+ self.v3_token_data_helper = V3TokenDataHelper()
+ self.v2_token_data_helper = V2TokenDataHelper()
+
+ def get_token_version(self, token_data):
+ if token_data and isinstance(token_data, dict):
+ if 'token_version' in token_data:
+ if token_data['token_version'] in token.provider.VERSIONS:
+ return token_data['token_version']
+ # FIXME(morganfainberg): deprecate the following logic in future
+ # revisions. It is better to just specify the token_version in
+ # the token_data itself. This way we can support future versions
+ # that might have the same fields.
+ if 'access' in token_data:
+ return token.provider.V2
+ if 'token' in token_data and 'methods' in token_data['token']:
+ return token.provider.V3
+ raise exception.UnsupportedTokenVersionException()
+
+ def issue_v2_token(self, token_ref, roles_ref=None,
+ catalog_ref=None):
+ metadata_ref = token_ref['metadata']
+ trust_ref = None
+ if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
+ trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
+
+ token_data = self.v2_token_data_helper.format_token(
+ token_ref, roles_ref, catalog_ref, trust_ref)
+ token_id = self._get_token_id(token_data)
+ token_data['access']['token']['id'] = token_id
+ return token_id, token_data
+
+ def _is_mapped_token(self, auth_context):
+ return (federation.IDENTITY_PROVIDER in auth_context and
+ federation.PROTOCOL in auth_context)
+
+ def issue_v3_token(self, user_id, method_names, expires_at=None,
+ project_id=None, domain_id=None, auth_context=None,
+ trust=None, metadata_ref=None, include_catalog=True,
+ parent_audit_id=None):
+ # for V2, trust is stashed in metadata_ref
+ if (CONF.trust.enabled and not trust and metadata_ref and
+ 'trust_id' in metadata_ref):
+ trust = self.trust_api.get_trust(metadata_ref['trust_id'])
+
+ token_ref = None
+ if auth_context and self._is_mapped_token(auth_context):
+ token_ref = self._handle_mapped_tokens(
+ auth_context, project_id, domain_id)
+
+ access_token = None
+ if 'oauth1' in method_names:
+ access_token_id = auth_context['access_token_id']
+ access_token = self.oauth_api.get_access_token(access_token_id)
+
+ token_data = self.v3_token_data_helper.get_token_data(
+ user_id,
+ method_names,
+ auth_context.get('extras') if auth_context else None,
+ domain_id=domain_id,
+ project_id=project_id,
+ expires=expires_at,
+ trust=trust,
+ bind=auth_context.get('bind') if auth_context else None,
+ token=token_ref,
+ include_catalog=include_catalog,
+ access_token=access_token,
+ audit_info=parent_audit_id)
+
+ token_id = self._get_token_id(token_data)
+ return token_id, token_data
+
+ def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
+ def get_federated_domain():
+ return (CONF.federation.federated_domain_name or
+ federation.FEDERATED_DOMAIN_KEYWORD)
+
+ federated_domain = get_federated_domain()
+ user_id = auth_context['user_id']
+ group_ids = auth_context['group_ids']
+ idp = auth_context[federation.IDENTITY_PROVIDER]
+ protocol = auth_context[federation.PROTOCOL]
+ token_data = {
+ 'user': {
+ 'id': user_id,
+ 'name': parse.unquote(user_id),
+ federation.FEDERATION: {
+ 'identity_provider': {'id': idp},
+ 'protocol': {'id': protocol}
+ },
+ 'domain': {
+ 'id': federated_domain,
+ 'name': federated_domain
+ }
+ }
+ }
+
+ if project_id or domain_id:
+ roles = self.v3_token_data_helper._populate_roles_for_groups(
+ group_ids, project_id, domain_id, user_id)
+ token_data.update({'roles': roles})
+ else:
+ token_data['user'][federation.FEDERATION].update({
+ 'groups': [{'id': x} for x in group_ids]
+ })
+ return token_data
+
+ def _verify_token_ref(self, token_ref):
+ """Verify and return the given token_ref."""
+ if not token_ref:
+ raise exception.Unauthorized()
+ return token_ref
+
+ def _assert_is_not_federation_token(self, token_ref):
+ """Make sure we aren't using v2 auth on a federation token."""
+ token_data = token_ref.get('token_data')
+ if (token_data and self.get_token_version(token_data) ==
+ token.provider.V3):
+ if 'OS-FEDERATION' in token_data['token']['user']:
+ msg = _('Attempting to use OS-FEDERATION token with V2 '
+ 'Identity Service, use V3 Authentication')
+ raise exception.Unauthorized(msg)
+
+ def _assert_default_domain(self, token_ref):
+ """Make sure we are operating on default domain only."""
+ if (token_ref.get('token_data') and
+ self.get_token_version(token_ref.get('token_data')) ==
+ token.provider.V3):
+ # this is a V3 token
+ msg = _('Non-default domain is not supported')
+ # user in a non-default is prohibited
+ if (token_ref['token_data']['token']['user']['domain']['id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(msg)
+ # domain scoping is prohibited
+ if token_ref['token_data']['token'].get('domain'):
+ raise exception.Unauthorized(
+ _('Domain scoped token is not supported'))
+ # project in non-default domain is prohibited
+ if token_ref['token_data']['token'].get('project'):
+ project = token_ref['token_data']['token']['project']
+ project_domain_id = project['domain']['id']
+ # scoped to project in non-default domain is prohibited
+ if project_domain_id != CONF.identity.default_domain_id:
+ raise exception.Unauthorized(msg)
+ # if token is scoped to trust, both trustor and trustee must
+ # be in the default domain. Furthermore, the delegated project
+ # must also be in the default domain
+ metadata_ref = token_ref['metadata']
+ if CONF.trust.enabled and 'trust_id' in metadata_ref:
+ trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
+ trustee_user_ref = self.identity_api.get_user(
+ trust_ref['trustee_user_id'])
+ if (trustee_user_ref['domain_id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(msg)
+ trustor_user_ref = self.identity_api.get_user(
+ trust_ref['trustor_user_id'])
+ if (trustor_user_ref['domain_id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(msg)
+ project_ref = self.resource_api.get_project(
+ trust_ref['project_id'])
+ if (project_ref['domain_id'] !=
+ CONF.identity.default_domain_id):
+ raise exception.Unauthorized(msg)
+
+ def validate_v2_token(self, token_ref):
+ try:
+ self._assert_is_not_federation_token(token_ref)
+ self._assert_default_domain(token_ref)
+ # FIXME(gyee): performance or correctness? Should we return the
+ # cached token or reconstruct it? Obviously if we are going with
+ # the cached token, any role, project, or domain name changes
+ # will not be reflected. One may argue that with PKI tokens,
+ # we are essentially doing cached token validation anyway.
+ # Lets go with the cached token strategy. Since token
+ # management layer is now pluggable, one can always provide
+ # their own implementation to suit their needs.
+ token_data = token_ref.get('token_data')
+ if (not token_data or
+ self.get_token_version(token_data) !=
+ token.provider.V2):
+ # token is created by old v2 logic
+ metadata_ref = token_ref['metadata']
+ roles_ref = []
+ for role_id in metadata_ref.get('roles', []):
+ roles_ref.append(self.role_api.get_role(role_id))
+
+ # Get a service catalog if possible
+ # This is needed for on-behalf-of requests
+ catalog_ref = None
+ if token_ref.get('tenant'):
+ catalog_ref = self.catalog_api.get_catalog(
+ token_ref['user']['id'],
+ token_ref['tenant']['id'])
+
+ trust_ref = None
+ if CONF.trust.enabled and 'trust_id' in metadata_ref:
+ trust_ref = self.trust_api.get_trust(
+ metadata_ref['trust_id'])
+
+ token_data = self.v2_token_data_helper.format_token(
+ token_ref, roles_ref, catalog_ref, trust_ref)
+
+ trust_id = token_data['access'].get('trust', {}).get('id')
+ if trust_id:
+ # token trust validation
+ self.trust_api.get_trust(trust_id)
+
+ return token_data
+ except exception.ValidationError as e:
+ LOG.exception(_LE('Failed to validate token'))
+ raise exception.TokenNotFound(e)
+
+ def validate_v3_token(self, token_ref):
+ # FIXME(gyee): performance or correctness? Should we return the
+ # cached token or reconstruct it? Obviously if we are going with
+ # the cached token, any role, project, or domain name changes
+ # will not be reflected. One may argue that with PKI tokens,
+ # we are essentially doing cached token validation anyway.
+ # Lets go with the cached token strategy. Since token
+ # management layer is now pluggable, one can always provide
+ # their own implementation to suit their needs.
+
+ trust_id = token_ref.get('trust_id')
+ if trust_id:
+ # token trust validation
+ self.trust_api.get_trust(trust_id)
+
+ token_data = token_ref.get('token_data')
+ if not token_data or 'token' not in token_data:
+ # token ref is created by V2 API
+ project_id = None
+ project_ref = token_ref.get('tenant')
+ if project_ref:
+ project_id = project_ref['id']
+
+ issued_at = token_ref['token_data']['access']['token']['issued_at']
+ audit = token_ref['token_data']['access']['token'].get('audit_ids')
+
+ token_data = self.v3_token_data_helper.get_token_data(
+ token_ref['user']['id'],
+ ['password', 'token'],
+ project_id=project_id,
+ bind=token_ref.get('bind'),
+ expires=token_ref['expires'],
+ issued_at=issued_at,
+ audit_info=audit)
+ return token_data
diff --git a/keystone-moon/keystone/token/providers/fernet/__init__.py b/keystone-moon/keystone/token/providers/fernet/__init__.py
new file mode 100644
index 00000000..953ef624
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/fernet/__init__.py
@@ -0,0 +1,13 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token.providers.fernet.core import * # noqa
diff --git a/keystone-moon/keystone/token/providers/fernet/core.py b/keystone-moon/keystone/token/providers/fernet/core.py
new file mode 100644
index 00000000..b1da263b
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/fernet/core.py
@@ -0,0 +1,267 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import dependency
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _
+from keystone.token import provider
+from keystone.token.providers import common
+from keystone.token.providers.fernet import token_formatters as tf
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('trust_api')
+class Provider(common.BaseProvider):
+ def __init__(self, *args, **kwargs):
+ super(Provider, self).__init__(*args, **kwargs)
+
+ self.token_formatter = tf.TokenFormatter()
+
+ def needs_persistence(self):
+ """Should the token be written to a backend."""
+ return False
+
+ def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+ """Issue a V2 formatted token.
+
+ :param token_ref: reference describing the token
+ :param roles_ref: reference describing the roles for the token
+ :param catalog_ref: reference describing the token's catalog
+ :returns: tuple containing the ID of the token and the token data
+
+ """
+ # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
+ # token format. Raise a 501 if we're dealing with bind.
+ if token_ref.get('bind'):
+ raise exception.NotImplemented()
+
+ user_id = token_ref['user']['id']
+ # Default to password since methods not provided by token_ref
+ method_names = ['password']
+ project_id = None
+ # Verify that tenant is not None in token_ref
+ if token_ref.get('tenant'):
+ project_id = token_ref['tenant']['id']
+
+ parent_audit_id = token_ref.get('parent_audit_id')
+ # If parent_audit_id is defined then a token authentication was made
+ if parent_audit_id:
+ method_names.append('token')
+
+ audit_ids = provider.audit_info(parent_audit_id)
+
+ # Get v3 token data and exclude building v3 specific catalog. This is
+ # due to the fact that the V2TokenDataHelper.format_token() method
+ # doesn't build any of the token_reference from other Keystone APIs.
+ # Instead, it builds it from what is persisted in the token reference.
+ # Here we are going to leverage the V3TokenDataHelper.get_token_data()
+ # method written for V3 because it goes through and populates the token
+ # reference dynamically. Once we have a V3 token reference, we can
+ # attempt to convert it to a V2 token response.
+ v3_token_data = self.v3_token_data_helper.get_token_data(
+ user_id,
+ method_names,
+ project_id=project_id,
+ token=token_ref,
+ include_catalog=False,
+ audit_info=audit_ids)
+
+ expires_at = v3_token_data['token']['expires_at']
+ token_id = self.token_formatter.create_token(user_id, expires_at,
+ audit_ids,
+ methods=method_names,
+ project_id=project_id)
+ # Convert v3 to v2 token data and build v2 catalog
+ token_data = self.v2_token_data_helper.v3_to_v2_token(token_id,
+ v3_token_data)
+
+ return token_id, token_data
+
+ def _build_federated_info(self, token_data):
+ """Extract everything needed for federated tokens.
+
+ This dictionary is passed to the FederatedPayload token formatter,
+ which unpacks the values and builds the Fernet token.
+
+ """
+ group_ids = token_data.get('user', {}).get(
+ federation.FEDERATION, {}).get('groups')
+ idp_id = token_data.get('user', {}).get(
+ federation.FEDERATION, {}).get('identity_provider', {}).get('id')
+ protocol_id = token_data.get('user', {}).get(
+ federation.FEDERATION, {}).get('protocol', {}).get('id')
+ if not group_ids:
+ group_ids = list()
+ federated_dict = dict(group_ids=group_ids, idp_id=idp_id,
+ protocol_id=protocol_id)
+ return federated_dict
+
+ def _rebuild_federated_info(self, federated_dict, user_id):
+ """Format federated information into the token reference.
+
+ The federated_dict is passed back from the FederatedPayload token
+ formatter. The responsibility of this method is to format the
+ information passed back from the token formatter into the token
+ reference before constructing the token data from the
+ V3TokenDataHelper.
+
+ """
+ g_ids = federated_dict['group_ids']
+ idp_id = federated_dict['idp_id']
+ protocol_id = federated_dict['protocol_id']
+ federated_info = dict(groups=g_ids,
+ identity_provider=dict(id=idp_id),
+ protocol=dict(id=protocol_id))
+ token_dict = {'user': {federation.FEDERATION: federated_info}}
+ token_dict['user']['id'] = user_id
+ token_dict['user']['name'] = user_id
+ return token_dict
+
+ def issue_v3_token(self, user_id, method_names, expires_at=None,
+ project_id=None, domain_id=None, auth_context=None,
+ trust=None, metadata_ref=None, include_catalog=True,
+ parent_audit_id=None):
+ """Issue a V3 formatted token.
+
+ Here is where we need to detect what is given to us, and what kind of
+ token the user is expecting. Depending on the outcome of that, we can
+ pass all the information to be packed to the proper token format
+ handler.
+
+ :param user_id: ID of the user
+ :param method_names: method of authentication
+ :param expires_at: token expiration time
+ :param project_id: ID of the project being scoped to
+ :param domain_id: ID of the domain being scoped to
+ :param auth_context: authentication context
+ :param trust: ID of the trust
+ :param metadata_ref: metadata reference
+ :param include_catalog: return the catalog in the response if True,
+ otherwise don't return the catalog
+ :param parent_audit_id: ID of the patent audit entity
+ :returns: tuple containing the id of the token and the token data
+
+ """
+ # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
+ # token format. Raise a 501 if we're dealing with bind.
+ if auth_context.get('bind'):
+ raise exception.NotImplemented()
+
+ token_ref = None
+ # NOTE(lbragstad): This determines if we are dealing with a federated
+ # token or not. The groups for the user will be in the returned token
+ # reference.
+ federated_dict = None
+ if auth_context and self._is_mapped_token(auth_context):
+ token_ref = self._handle_mapped_tokens(
+ auth_context, project_id, domain_id)
+ federated_dict = self._build_federated_info(token_ref)
+
+ token_data = self.v3_token_data_helper.get_token_data(
+ user_id,
+ method_names,
+ auth_context.get('extras') if auth_context else None,
+ domain_id=domain_id,
+ project_id=project_id,
+ expires=expires_at,
+ trust=trust,
+ bind=auth_context.get('bind') if auth_context else None,
+ token=token_ref,
+ include_catalog=include_catalog,
+ audit_info=parent_audit_id)
+
+ token = self.token_formatter.create_token(
+ user_id,
+ token_data['token']['expires_at'],
+ token_data['token']['audit_ids'],
+ methods=method_names,
+ domain_id=domain_id,
+ project_id=project_id,
+ trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
+ federated_info=federated_dict)
+ return token, token_data
+
+ def validate_v2_token(self, token_ref):
+ """Validate a V2 formatted token.
+
+ :param token_ref: reference describing the token to validate
+ :returns: the token data
+ :raises keystone.exception.Unauthorized: if v3 token is used
+
+ """
+ (user_id, methods,
+ audit_ids, domain_id,
+ project_id, trust_id,
+ federated_info, created_at,
+ expires_at) = self.token_formatter.validate_token(token_ref)
+
+ if trust_id or domain_id or federated_info:
+ msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
+ 'domain, or federated tokens.')
+ raise exception.Unauthorized(msg)
+
+ v3_token_data = self.v3_token_data_helper.get_token_data(
+ user_id,
+ methods,
+ project_id=project_id,
+ expires=expires_at,
+ issued_at=created_at,
+ token=token_ref,
+ include_catalog=False,
+ audit_info=audit_ids)
+ return self.v2_token_data_helper.v3_to_v2_token(token_ref,
+ v3_token_data)
+
+ def validate_v3_token(self, token):
+ """Validate a V3 formatted token.
+
+ :param token: a string describing the token to validate
+ :returns: the token data
+ :raises keystone.exception.Unauthorized: if token format version isn't
+ supported
+
+ """
+ (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+ federated_info, created_at, expires_at) = (
+ self.token_formatter.validate_token(token))
+
+ token_dict = None
+ if federated_info:
+ token_dict = self._rebuild_federated_info(federated_info, user_id)
+ trust_ref = self.trust_api.get_trust(trust_id)
+
+ return self.v3_token_data_helper.get_token_data(
+ user_id,
+ method_names=methods,
+ domain_id=domain_id,
+ project_id=project_id,
+ issued_at=created_at,
+ expires=expires_at,
+ trust=trust_ref,
+ token=token_dict,
+ audit_info=audit_ids)
+
+ def _get_token_id(self, token_data):
+ """Generate the token_id based upon the data in token_data.
+
+ :param token_data: token information
+ :type token_data: dict
+ :raises keystone.exception.NotImplemented: when called
+ """
+ raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/token/providers/fernet/token_formatters.py b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
new file mode 100644
index 00000000..50960923
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
@@ -0,0 +1,545 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import datetime
+import struct
+import uuid
+
+from cryptography import fernet
+import msgpack
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+from six.moves import urllib
+
+from keystone.auth import plugins as auth_plugins
+from keystone import exception
+from keystone.i18n import _
+from keystone.token import provider
+from keystone.token.providers.fernet import utils
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+# Fernet byte indexes as as computed by pypi/keyless_fernet and defined in
+# https://github.com/fernet/spec
+TIMESTAMP_START = 1
+TIMESTAMP_END = 9
+
+
+class TokenFormatter(object):
+ """Packs and unpacks payloads into tokens for transport."""
+
+ @property
+ def crypto(self):
+ """Return a cryptography instance.
+
+ You can extend this class with a custom crypto @property to provide
+ your own token encoding / decoding. For example, using a different
+ cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
+ security requirements.
+
+ This @property just needs to return an object that implements
+ ``encrypt(plaintext)`` and ``decrypt(ciphertext)``.
+
+ """
+ keys = utils.load_keys()
+
+ if not keys:
+ raise exception.KeysNotFound()
+
+ fernet_instances = [fernet.Fernet(key) for key in utils.load_keys()]
+ return fernet.MultiFernet(fernet_instances)
+
+ def pack(self, payload):
+ """Pack a payload for transport as a token."""
+ # base64 padding (if any) is not URL-safe
+ return urllib.parse.quote(self.crypto.encrypt(payload))
+
+ def unpack(self, token):
+ """Unpack a token, and validate the payload."""
+ token = urllib.parse.unquote(six.binary_type(token))
+
+ try:
+ return self.crypto.decrypt(token)
+ except fernet.InvalidToken as e:
+ raise exception.Unauthorized(six.text_type(e))
+
+ @classmethod
+ def creation_time(cls, fernet_token):
+ """Returns the creation time of a valid Fernet token."""
+ # tokens may be transmitted as Unicode, but they're just ASCII
+ # (pypi/cryptography will refuse to operate on Unicode input)
+ fernet_token = six.binary_type(fernet_token)
+
+ # the base64 padding on fernet tokens is made URL-safe
+ fernet_token = urllib.parse.unquote(fernet_token)
+
+ # fernet tokens are base64 encoded and the padding made URL-safe
+ token_bytes = base64.urlsafe_b64decode(fernet_token)
+
+ # slice into the byte array to get just the timestamp
+ timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END]
+
+ # convert those bytes to an integer
+ # (it's a 64-bit "unsigned long long int" in C)
+ timestamp_int = struct.unpack(">Q", timestamp_bytes)[0]
+
+ # and with an integer, it's trivial to produce a datetime object
+ created_at = datetime.datetime.utcfromtimestamp(timestamp_int)
+
+ return created_at
+
+ def create_token(self, user_id, expires_at, audit_ids, methods=None,
+ domain_id=None, project_id=None, trust_id=None,
+ federated_info=None):
+ """Given a set of payload attributes, generate a Fernet token."""
+ if trust_id:
+ version = TrustScopedPayload.version
+ payload = TrustScopedPayload.assemble(
+ user_id,
+ methods,
+ project_id,
+ expires_at,
+ audit_ids,
+ trust_id)
+ elif federated_info:
+ version = FederatedPayload.version
+ payload = FederatedPayload.assemble(
+ user_id,
+ methods,
+ expires_at,
+ audit_ids,
+ federated_info)
+ elif project_id:
+ version = ProjectScopedPayload.version
+ payload = ProjectScopedPayload.assemble(
+ user_id,
+ methods,
+ project_id,
+ expires_at,
+ audit_ids)
+ elif domain_id:
+ version = DomainScopedPayload.version
+ payload = DomainScopedPayload.assemble(
+ user_id,
+ methods,
+ domain_id,
+ expires_at,
+ audit_ids)
+ else:
+ version = UnscopedPayload.version
+ payload = UnscopedPayload.assemble(
+ user_id,
+ methods,
+ expires_at,
+ audit_ids)
+
+ versioned_payload = (version,) + payload
+ serialized_payload = msgpack.packb(versioned_payload)
+ token = self.pack(serialized_payload)
+
+ return token
+
+ def validate_token(self, token):
+ """Validates a Fernet token and returns the payload attributes."""
+ # Convert v2 unicode token to a string
+ if not isinstance(token, six.binary_type):
+ token = token.encode('ascii')
+
+ serialized_payload = self.unpack(token)
+ versioned_payload = msgpack.unpackb(serialized_payload)
+ version, payload = versioned_payload[0], versioned_payload[1:]
+
+ # depending on the formatter, these may or may not be defined
+ domain_id = None
+ project_id = None
+ trust_id = None
+ federated_info = None
+
+ if version == UnscopedPayload.version:
+ (user_id, methods, expires_at, audit_ids) = (
+ UnscopedPayload.disassemble(payload))
+ elif version == DomainScopedPayload.version:
+ (user_id, methods, domain_id, expires_at, audit_ids) = (
+ DomainScopedPayload.disassemble(payload))
+ elif version == ProjectScopedPayload.version:
+ (user_id, methods, project_id, expires_at, audit_ids) = (
+ ProjectScopedPayload.disassemble(payload))
+ elif version == TrustScopedPayload.version:
+ (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
+ TrustScopedPayload.disassemble(payload))
+ elif version == FederatedPayload.version:
+ (user_id, methods, expires_at, audit_ids, federated_info) = (
+ FederatedPayload.disassemble(payload))
+ else:
+ # If the token_format is not recognized, raise Unauthorized.
+ raise exception.Unauthorized(_(
+ 'This is not a recognized Fernet payload version: %s') %
+ version)
+
+ # rather than appearing in the payload, the creation time is encoded
+ # into the token format itself
+ created_at = TokenFormatter.creation_time(token)
+ created_at = timeutils.isotime(at=created_at, subsecond=True)
+ expires_at = timeutils.parse_isotime(expires_at)
+ expires_at = timeutils.isotime(at=expires_at, subsecond=True)
+
+ return (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+ federated_info, created_at, expires_at)
+
+
+class BasePayload(object):
+ # each payload variant should have a unique version
+ version = None
+
+ @classmethod
+ def assemble(cls, *args):
+ """Assemble the payload of a token.
+
+ :param args: whatever data should go into the payload
+ :returns: the payload of a token
+
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Disassemble an unscoped payload into the component data.
+
+ :param payload: this variant of payload
+ :returns: a tuple of the payloads component data
+
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def convert_uuid_hex_to_bytes(cls, uuid_string):
+ """Compress UUID formatted strings to bytes.
+
+ :param uuid_string: uuid string to compress to bytes
+ :returns: a byte representation of the uuid
+
+ """
+ # TODO(lbragstad): Wrap this in an exception. Not sure what the case
+ # would be where we couldn't handle what we've been given but incase
+ # the integrity of the token has been compromised.
+ uuid_obj = uuid.UUID(uuid_string)
+ return uuid_obj.bytes
+
+ @classmethod
+ def convert_uuid_bytes_to_hex(cls, uuid_byte_string):
+ """Generate uuid.hex format based on byte string.
+
+ :param uuid_byte_string: uuid string to generate from
+ :returns: uuid hex formatted string
+
+ """
+ # TODO(lbragstad): Wrap this in an exception. Not sure what the case
+ # would be where we couldn't handle what we've been given but incase
+ # the integrity of the token has been compromised.
+ uuid_obj = uuid.UUID(bytes=uuid_byte_string)
+ return uuid_obj.hex
+
+ @classmethod
+ def _convert_time_string_to_int(cls, time_string):
+ """Convert a time formatted string to a timestamp integer.
+
+ :param time_string: time formatted string
+ :returns: an integer timestamp
+
+ """
+ time_object = timeutils.parse_isotime(time_string)
+ return (timeutils.normalize_time(time_object) -
+ datetime.datetime.utcfromtimestamp(0)).total_seconds()
+
+ @classmethod
+ def _convert_int_to_time_string(cls, time_int):
+ """Convert a timestamp integer to a string.
+
+ :param time_int: integer representing timestamp
+ :returns: a time formatted strings
+
+ """
+ time_object = datetime.datetime.utcfromtimestamp(int(time_int))
+ return timeutils.isotime(time_object)
+
+ @classmethod
+ def attempt_convert_uuid_hex_to_bytes(cls, value):
+ """Attempt to convert value to bytes or return value.
+
+ :param value: value to attempt to convert to bytes
+ :returns: uuid value in bytes or value
+
+ """
+ try:
+ return cls.convert_uuid_hex_to_bytes(value)
+ except ValueError:
+ # this might not be a UUID, depending on the situation (i.e.
+ # federation)
+ return value
+
+ @classmethod
+ def attempt_convert_uuid_bytes_to_hex(cls, value):
+ """Attempt to convert value to hex or return value.
+
+ :param value: value to attempt to convert to hex
+ :returns: uuid value in hex or value
+
+ """
+ try:
+ return cls.convert_uuid_bytes_to_hex(value)
+ except ValueError:
+ return value
+
+
+class UnscopedPayload(BasePayload):
+ version = 0
+
+ @classmethod
+ def assemble(cls, user_id, methods, expires_at, audit_ids):
+ """Assemble the payload of an unscoped token.
+
+ :param user_id: identifier of the user in the token request
+ :param methods: list of authentication methods used
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :returns: the payload of an unscoped token
+
+ """
+ b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+ return (b_user_id, methods, expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Disassemble an unscoped payload into the component data.
+
+ :param payload: the payload of an unscoped token
+ :return: a tuple containing the user_id, auth methods, expires_at, and
+ audit_ids
+
+ """
+ user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ expires_at_str = cls._convert_int_to_time_string(payload[2])
+ audit_ids = list(map(provider.base64_encode, payload[3]))
+ return (user_id, methods, expires_at_str, audit_ids)
+
+
+class DomainScopedPayload(BasePayload):
+ version = 1
+
+ @classmethod
+ def assemble(cls, user_id, methods, domain_id, expires_at, audit_ids):
+ """Assemble the payload of a domain-scoped token.
+
+ :param user_id: ID of the user in the token request
+ :param methods: list of authentication methods used
+ :param domain_id: ID of the domain to scope to
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :returns: the payload of a domain-scoped token
+
+ """
+ b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ try:
+ b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id)
+ except ValueError:
+ # the default domain ID is configurable, and probably isn't a UUID
+ if domain_id == CONF.identity.default_domain_id:
+ b_domain_id = domain_id
+ else:
+ raise
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+ return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Disassemble a payload into the component data.
+
+ :param payload: the payload of a token
+ :return: a tuple containing the user_id, auth methods, domain_id,
+ expires_at_str, and audit_ids
+
+ """
+ user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ try:
+ domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
+ except ValueError:
+ # the default domain ID is configurable, and probably isn't a UUID
+ if payload[2] == CONF.identity.default_domain_id:
+ domain_id = payload[2]
+ else:
+ raise
+ expires_at_str = cls._convert_int_to_time_string(payload[3])
+ audit_ids = list(map(provider.base64_encode, payload[4]))
+
+ return (user_id, methods, domain_id, expires_at_str, audit_ids)
+
+
+class ProjectScopedPayload(BasePayload):
+ version = 2
+
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, expires_at, audit_ids):
+ """Assemble the payload of a project-scoped token.
+
+ :param user_id: ID of the user in the token request
+ :param methods: list of authentication methods used
+ :param project_id: ID of the project to scope to
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :returns: the payload of a project-scoped token
+
+ """
+ b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+ return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Disassemble a payload into the component data.
+
+ :param payload: the payload of a token
+ :return: a tuple containing the user_id, auth methods, project_id,
+ expires_at_str, and audit_ids
+
+ """
+ user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+ expires_at_str = cls._convert_int_to_time_string(payload[3])
+ audit_ids = list(map(provider.base64_encode, payload[4]))
+
+ return (user_id, methods, project_id, expires_at_str, audit_ids)
+
+
+class TrustScopedPayload(BasePayload):
+ version = 3
+
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, expires_at, audit_ids,
+ trust_id):
+ """Assemble the payload of a trust-scoped token.
+
+ :param user_id: ID of the user in the token request
+ :param methods: list of authentication methods used
+ :param project_id: ID of the project to scope to
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :param trust_id: ID of the trust in effect
+ :returns: the payload of a trust-scoped token
+
+ """
+ b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+ b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+
+ return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids,
+ b_trust_id)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Validate a trust-based payload.
+
+ :param token_string: a string representing the token
+ :returns: a tuple containing the user_id, auth methods, project_id,
+ expires_at_str, audit_ids, and trust_id
+
+ """
+ user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+ expires_at_str = cls._convert_int_to_time_string(payload[3])
+ audit_ids = list(map(provider.base64_encode, payload[4]))
+ trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
+
+ return (user_id, methods, project_id, expires_at_str, audit_ids,
+ trust_id)
+
+
+class FederatedPayload(BasePayload):
+ version = 4
+
+ @classmethod
+ def assemble(cls, user_id, methods, expires_at, audit_ids, federated_info):
+ """Assemble the payload of a federated token.
+
+ :param user_id: ID of the user in the token request
+ :param methods: list of authentication methods used
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :param federated_info: dictionary containing group IDs, the identity
+ provider ID, protocol ID, and federated domain
+ ID
+ :returns: the payload of a federated token
+
+ """
+ def pack_group_ids(group_dict):
+ return cls.convert_uuid_hex_to_bytes(group_dict['id'])
+
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_group_ids = map(pack_group_ids, federated_info['group_ids'])
+ b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
+ federated_info['idp_id'])
+ protocol_id = federated_info['protocol_id']
+ expires_at_int = cls._convert_time_string_to_int(expires_at)
+ b_audit_ids = map(provider.random_urlsafe_str_to_bytes, audit_ids)
+
+ return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id,
+ expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ """Validate a federated paylod.
+
+ :param token_string: a string representing the token
+ :return: a tuple containing the user_id, auth methods, audit_ids, and
+ a dictionary containing federated information such as the the
+ group IDs, the identity provider ID, the protocol ID, and the
+ federated domain ID
+
+ """
+ def unpack_group_ids(group_id_in_bytes):
+ group_id = cls.convert_uuid_bytes_to_hex(group_id_in_bytes)
+ return {'id': group_id}
+
+ user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ group_ids = map(unpack_group_ids, payload[2])
+ idp_id = cls.attempt_convert_uuid_bytes_to_hex(payload[3])
+ protocol_id = payload[4]
+ expires_at_str = cls._convert_int_to_time_string(payload[5])
+ audit_ids = map(provider.base64_encode, payload[6])
+ federated_info = dict(group_ids=group_ids, idp_id=idp_id,
+ protocol_id=protocol_id)
+ return (user_id, methods, expires_at_str, audit_ids, federated_info)
diff --git a/keystone-moon/keystone/token/providers/fernet/utils.py b/keystone-moon/keystone/token/providers/fernet/utils.py
new file mode 100644
index 00000000..56624ee5
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/fernet/utils.py
@@ -0,0 +1,243 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import stat
+
+from cryptography import fernet
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.i18n import _LE, _LW, _LI
+
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+def validate_key_repository():
+ """Validate permissions on the key repository directory."""
+ # NOTE(lbragstad): We shouldn't need to check if the directory was passed
+ # in as None because we don't set allow_no_values to True.
+
+ # ensure current user has full access to the key repository
+ if (not os.access(CONF.fernet_tokens.key_repository, os.R_OK) or not
+ os.access(CONF.fernet_tokens.key_repository, os.W_OK) or not
+ os.access(CONF.fernet_tokens.key_repository, os.X_OK)):
+ LOG.error(
+ _LE('Either [fernet_tokens] key_repository does not exist or '
+ 'Keystone does not have sufficient permission to access it: '
+ '%s'), CONF.fernet_tokens.key_repository)
+ return False
+
+ # ensure the key repository isn't world-readable
+ stat_info = os.stat(CONF.fernet_tokens.key_repository)
+ if stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH:
+ LOG.warning(_LW(
+ '[fernet_tokens] key_repository is world readable: %s'),
+ CONF.fernet_tokens.key_repository)
+
+ return True
+
+
+def _convert_to_integers(id_value):
+ """Cast user and group system identifiers to integers."""
+ # NOTE(lbragstad) os.chown() will raise a TypeError here if
+ # keystone_user_id and keystone_group_id are not integers. Let's
+ # cast them to integers if we can because it's possible to pass non-integer
+ # values into the fernet_setup utility.
+ try:
+ id_int = int(id_value)
+ except ValueError as e:
+ msg = ('Unable to convert Keystone user or group ID. Error: %s', e)
+ LOG.error(msg)
+ raise
+
+ return id_int
+
+
+def create_key_directory(keystone_user_id=None, keystone_group_id=None):
+ """If the configured key directory does not exist, attempt to create it."""
+ if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
+ LOG.info(_LI(
+ '[fernet_tokens] key_repository does not appear to exist; '
+ 'attempting to create it'))
+
+ try:
+ os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
+ except OSError:
+ LOG.error(_LE(
+ 'Failed to create [fernet_tokens] key_repository: either it '
+ 'already exists or you don\'t have sufficient permissions to '
+ 'create it'))
+
+ if keystone_user_id and keystone_group_id:
+ os.chown(
+ CONF.fernet_tokens.key_repository,
+ keystone_user_id,
+ keystone_group_id)
+ elif keystone_user_id or keystone_group_id:
+ LOG.warning(_LW(
+ 'Unable to change the ownership of [fernet_tokens] '
+ 'key_repository without a keystone user ID and keystone group '
+ 'ID both being provided: %s') %
+ CONF.fernet_tokens.key_repository)
+
+
+def _create_new_key(keystone_user_id, keystone_group_id):
+ """Securely create a new encryption key.
+
+ Create a new key that is readable by the Keystone group and Keystone user.
+ """
+ key = fernet.Fernet.generate_key()
+
+ # This ensures the key created is not world-readable
+ old_umask = os.umask(0o177)
+ if keystone_user_id and keystone_group_id:
+ old_egid = os.getegid()
+ old_euid = os.geteuid()
+ os.setegid(keystone_group_id)
+ os.seteuid(keystone_user_id)
+ elif keystone_user_id or keystone_group_id:
+ LOG.warning(_LW(
+ 'Unable to change the ownership of the new key without a keystone '
+ 'user ID and keystone group ID both being provided: %s') %
+ CONF.fernet_tokens.key_repository)
+ # Determine the file name of the new key
+ key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
+ try:
+ with open(key_file, 'w') as f:
+ f.write(key)
+ finally:
+ # After writing the key, set the umask back to it's original value. Do
+ # the same with group and user identifiers if a Keystone group or user
+ # was supplied.
+ os.umask(old_umask)
+ if keystone_user_id and keystone_group_id:
+ os.seteuid(old_euid)
+ os.setegid(old_egid)
+
+ LOG.info(_LI('Created a new key: %s'), key_file)
+
+
+def initialize_key_repository(keystone_user_id=None, keystone_group_id=None):
+ """Create a key repository and bootstrap it with a key.
+
+ :param keystone_user_id: User ID of the Keystone user.
+ :param keystone_group_id: Group ID of the Keystone user.
+
+ """
+ # make sure we have work to do before proceeding
+ if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'),
+ os.F_OK):
+ LOG.info(_LI('Key repository is already initialized; aborting.'))
+ return
+
+ # bootstrap an existing key
+ _create_new_key(keystone_user_id, keystone_group_id)
+
+ # ensure that we end up with a primary and secondary key
+ rotate_keys(keystone_user_id, keystone_group_id)
+
+
+def rotate_keys(keystone_user_id=None, keystone_group_id=None):
+ """Create a new primary key and revoke excess active keys.
+
+ :param keystone_user_id: User ID of the Keystone user.
+ :param keystone_group_id: Group ID of the Keystone user.
+
+ Key rotation utilizes the following behaviors:
+
+ - The highest key number is used as the primary key (used for encryption).
+ - All keys can be used for decryption.
+ - New keys are always created as key "0," which serves as a placeholder
+ before promoting it to be the primary key.
+
+ This strategy allows you to safely perform rotation on one node in a
+ cluster, before syncing the results of the rotation to all other nodes
+ (during both key rotation and synchronization, all nodes must recognize all
+ primary keys).
+
+ """
+ # read the list of key files
+ key_files = dict()
+ for filename in os.listdir(CONF.fernet_tokens.key_repository):
+ path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
+ if os.path.isfile(path):
+ key_files[int(filename)] = path
+
+ LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), {
+ 'count': len(key_files),
+ 'list': key_files.values()})
+
+ # determine the number of the new primary key
+ current_primary_key = max(key_files.keys())
+ LOG.info(_LI('Current primary key is: %s'), current_primary_key)
+ new_primary_key = current_primary_key + 1
+ LOG.info(_LI('Next primary key will be: %s'), new_primary_key)
+
+ # promote the next primary key to be the primary
+ os.rename(
+ os.path.join(CONF.fernet_tokens.key_repository, '0'),
+ os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key)))
+ key_files.pop(0)
+ key_files[new_primary_key] = os.path.join(
+ CONF.fernet_tokens.key_repository,
+ str(new_primary_key))
+ LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key)
+
+ # add a new key to the rotation, which will be the *next* primary
+ _create_new_key(keystone_user_id, keystone_group_id)
+
+ # check for bad configuration
+ if CONF.fernet_tokens.max_active_keys < 1:
+ LOG.warning(_LW(
+ '[fernet_tokens] max_active_keys must be at least 1 to maintain a '
+ 'primary key.'))
+ CONF.fernet_tokens.max_active_keys = 1
+
+ # purge excess keys
+ keys = sorted(key_files.keys())
+ excess_keys = (
+ keys[:len(key_files) - CONF.fernet_tokens.max_active_keys + 1])
+ LOG.info(_LI('Excess keys to purge: %s'), excess_keys)
+ for i in excess_keys:
+ os.remove(key_files[i])
+
+
+def load_keys():
+ """Load keys from disk into a list.
+
+ The first key in the list is the primary key used for encryption. All
+ other keys are active secondary keys that can be used for decrypting
+ tokens.
+
+ """
+ if not validate_key_repository():
+ return []
+
+ # build a dictionary of key_number:encryption_key pairs
+ keys = dict()
+ for filename in os.listdir(CONF.fernet_tokens.key_repository):
+ path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
+ if os.path.isfile(path):
+ with open(path, 'r') as key_file:
+ keys[int(filename)] = key_file.read()
+
+ LOG.info(_LI(
+ 'Loaded %(count)s encryption keys from: %(dir)s'), {
+ 'count': len(keys),
+ 'dir': CONF.fernet_tokens.key_repository})
+
+ # return the encryption_keys, sorted by key number, descending
+ return [keys[x] for x in sorted(keys.keys(), reverse=True)]
diff --git a/keystone-moon/keystone/token/providers/pki.py b/keystone-moon/keystone/token/providers/pki.py
new file mode 100644
index 00000000..61b42817
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/pki.py
@@ -0,0 +1,53 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone PKI Token Provider"""
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.token.providers import common
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class Provider(common.BaseProvider):
+ def _get_token_id(self, token_data):
+ try:
+ # force conversion to a string as the keystone client cms code
+ # produces unicode. This can be removed if the client returns
+ # str()
+ # TODO(ayoung): Make to a byte_str for Python3
+ token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
+ token_id = str(cms.cms_sign_token(token_json,
+ CONF.signing.certfile,
+ CONF.signing.keyfile))
+ return token_id
+ except environment.subprocess.CalledProcessError:
+ LOG.exception(_LE('Unable to sign token'))
+ raise exception.UnexpectedError(_(
+ 'Unable to sign token.'))
+
+ def needs_persistence(self):
+ """Should the token be written to a backend."""
+ return True
diff --git a/keystone-moon/keystone/token/providers/pkiz.py b/keystone-moon/keystone/token/providers/pkiz.py
new file mode 100644
index 00000000..b6f2944d
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/pkiz.py
@@ -0,0 +1,51 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone Compressed PKI Token Provider"""
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.token.providers import common
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+ERROR_MESSAGE = _('Unable to sign token.')
+
+
+class Provider(common.BaseProvider):
+ def _get_token_id(self, token_data):
+ try:
+ # force conversion to a string as the keystone client cms code
+ # produces unicode. This can be removed if the client returns
+ # str()
+ # TODO(ayoung): Make to a byte_str for Python3
+ token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
+ token_id = str(cms.pkiz_sign(token_json,
+ CONF.signing.certfile,
+ CONF.signing.keyfile))
+ return token_id
+ except environment.subprocess.CalledProcessError:
+ LOG.exception(ERROR_MESSAGE)
+ raise exception.UnexpectedError(ERROR_MESSAGE)
+
+ def needs_persistence(self):
+ """Should the token be written to a backend."""
+ return True
diff --git a/keystone-moon/keystone/token/providers/uuid.py b/keystone-moon/keystone/token/providers/uuid.py
new file mode 100644
index 00000000..15118d82
--- /dev/null
+++ b/keystone-moon/keystone/token/providers/uuid.py
@@ -0,0 +1,33 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone UUID Token Provider"""
+
+from __future__ import absolute_import
+
+import uuid
+
+from keystone.token.providers import common
+
+
+class Provider(common.BaseProvider):
+ def __init__(self, *args, **kwargs):
+ super(Provider, self).__init__(*args, **kwargs)
+
+ def _get_token_id(self, token_data):
+ return uuid.uuid4().hex
+
+ def needs_persistence(self):
+ """Should the token be written to a backend."""
+ return True
diff --git a/keystone-moon/keystone/token/routers.py b/keystone-moon/keystone/token/routers.py
new file mode 100644
index 00000000..bcd40ee4
--- /dev/null
+++ b/keystone-moon/keystone/token/routers.py
@@ -0,0 +1,59 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from keystone.common import wsgi
+from keystone.token import controllers
+
+
+class Router(wsgi.ComposableRouter):
+ def add_routes(self, mapper):
+ token_controller = controllers.Auth()
+ mapper.connect('/tokens',
+ controller=token_controller,
+ action='authenticate',
+ conditions=dict(method=['POST']))
+ mapper.connect('/tokens/revoked',
+ controller=token_controller,
+ action='revocation_list',
+ conditions=dict(method=['GET']))
+ mapper.connect('/tokens/{token_id}',
+ controller=token_controller,
+ action='validate_token',
+ conditions=dict(method=['GET']))
+ # NOTE(morganfainberg): For policy enforcement reasons, the
+ # ``validate_token_head`` method is still used for HEAD requests.
+ # The controller method makes the same call as the validate_token
+ # call and lets wsgi.render_response remove the body data.
+ mapper.connect('/tokens/{token_id}',
+ controller=token_controller,
+ action='validate_token_head',
+ conditions=dict(method=['HEAD']))
+ mapper.connect('/tokens/{token_id}',
+ controller=token_controller,
+ action='delete_token',
+ conditions=dict(method=['DELETE']))
+ mapper.connect('/tokens/{token_id}/endpoints',
+ controller=token_controller,
+ action='endpoints',
+ conditions=dict(method=['GET']))
+
+ # Certificates used to verify auth tokens
+ mapper.connect('/certificates/ca',
+ controller=token_controller,
+ action='ca_cert',
+ conditions=dict(method=['GET']))
+
+ mapper.connect('/certificates/signing',
+ controller=token_controller,
+ action='signing_cert',
+ conditions=dict(method=['GET']))