aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon/keystone/common
diff options
context:
space:
mode:
Diffstat (limited to 'keystone-moon/keystone/common')
-rw-r--r--keystone-moon/keystone/common/authorization.py17
-rw-r--r--keystone-moon/keystone/common/cache/_context_cache.py129
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py53
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py554
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py7
-rw-r--r--keystone-moon/keystone/common/cache/core.py352
-rw-r--r--keystone-moon/keystone/common/config.py555
-rw-r--r--keystone-moon/keystone/common/controller.py196
-rw-r--r--keystone-moon/keystone/common/dependency.py2
-rw-r--r--keystone-moon/keystone/common/driver_hints.py47
-rw-r--r--keystone-moon/keystone/common/environment/__init__.py5
-rw-r--r--keystone-moon/keystone/common/environment/eventlet_server.py30
-rw-r--r--keystone-moon/keystone/common/extension.py1
-rw-r--r--keystone-moon/keystone/common/json_home.py1
-rw-r--r--keystone-moon/keystone/common/kvs/__init__.py1
-rw-r--r--keystone-moon/keystone/common/kvs/backends/inmemdb.py5
-rw-r--r--keystone-moon/keystone/common/kvs/backends/memcached.py14
-rw-r--r--keystone-moon/keystone/common/kvs/core.py66
-rw-r--r--keystone-moon/keystone/common/ldap/core.py137
-rw-r--r--keystone-moon/keystone/common/manager.py104
-rw-r--r--keystone-moon/keystone/common/models.py13
-rw-r--r--keystone-moon/keystone/common/openssl.py66
-rw-r--r--keystone-moon/keystone/common/router.py14
-rw-r--r--keystone-moon/keystone/common/sql/core.py110
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/README2
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/__init__.py17
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py41
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py27
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py90
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py27
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py41
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py35
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py24
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py39
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py46
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py40
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py)100
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py3
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py54
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py97
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py75
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py55
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py70
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py)17
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py60
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py76
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py42
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py66
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py46
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py125
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py62
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py50
-rw-r--r--keystone-moon/keystone/common/sql/migration_helpers.py129
-rw-r--r--keystone-moon/keystone/common/tokenless_auth.py11
-rw-r--r--keystone-moon/keystone/common/utils.py92
-rw-r--r--keystone-moon/keystone/common/validation/__init__.py13
-rw-r--r--keystone-moon/keystone/common/validation/parameter_types.py9
-rw-r--r--keystone-moon/keystone/common/validation/validators.py3
-rw-r--r--keystone-moon/keystone/common/wsgi.py129
74 files changed, 2337 insertions, 2270 deletions
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
index 2c578dfd..414b9525 100644
--- a/keystone-moon/keystone/common/authorization.py
+++ b/keystone-moon/keystone/common/authorization.py
@@ -31,8 +31,12 @@ It is a dictionary with the following attributes:
* ``token``: Token from the request
* ``user_id``: user ID of the principal
+* ``user_domain_id`` (optional): Domain ID of the principal if the principal
+ has a domain.
* ``project_id`` (optional): project ID of the scoped project if auth is
project-scoped
+* ``project_domain_id`` (optional): Domain ID of the scoped project if auth is
+ project-scoped.
* ``domain_id`` (optional): domain ID of the scoped domain if auth is
domain-scoped
* ``domain_name`` (optional): domain name of the scoped domain if auth is
@@ -64,9 +68,11 @@ def token_to_auth_context(token):
except KeyError:
LOG.warning(_LW('RBAC: Invalid user data in token'))
raise exception.Unauthorized()
+ auth_context['user_domain_id'] = token.user_domain_id
if token.project_scoped:
auth_context['project_id'] = token.project_id
+ auth_context['project_domain_id'] = token.project_domain_id
elif token.domain_scoped:
auth_context['domain_id'] = token.domain_id
auth_context['domain_name'] = token.domain_name
@@ -79,6 +85,8 @@ def token_to_auth_context(token):
auth_context['trustor_id'] = token.trustor_user_id
auth_context['trustee_id'] = token.trustee_user_id
else:
+ # NOTE(lbragstad): These variables will already be set to None but we
+ # add the else statement here for readability.
auth_context['trust_id'] = None
auth_context['trustor_id'] = None
auth_context['trustee_id'] = None
@@ -89,8 +97,13 @@ def token_to_auth_context(token):
if token.oauth_scoped:
auth_context['is_delegated_auth'] = True
- auth_context['consumer_id'] = token.oauth_consumer_id
- auth_context['access_token_id'] = token.oauth_access_token_id
+ auth_context['consumer_id'] = token.oauth_consumer_id
+ auth_context['access_token_id'] = token.oauth_access_token_id
+ else:
+ # NOTE(lbragstad): These variables will already be set to None but we
+ # add the else statement here for readability.
+ auth_context['consumer_id'] = None
+ auth_context['access_token_id'] = None
if token.is_federated_user:
auth_context['group_ids'] = token.federation_group_ids
diff --git a/keystone-moon/keystone/common/cache/_context_cache.py b/keystone-moon/keystone/common/cache/_context_cache.py
new file mode 100644
index 00000000..3895ca1f
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/_context_cache.py
@@ -0,0 +1,129 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A dogpile.cache proxy that caches objects in the request local cache."""
+from dogpile.cache import api
+from dogpile.cache import proxy
+from oslo_context import context as oslo_context
+from oslo_serialization import msgpackutils
+
+from keystone.models import revoke_model
+
+
+class _RevokeModelHandler(object):
+ # NOTE(morganfainberg): There needs to be reserved "registry" entries set
+ # in oslo_serialization for application-specific handlers. We picked 127
+ # here since it's waaaaaay far out before oslo_serialization will use it.
+ identity = 127
+ handles = (revoke_model.RevokeTree,)
+
+ def __init__(self, registry):
+ self._registry = registry
+
+ def serialize(self, obj):
+ return msgpackutils.dumps(obj.revoke_map,
+ registry=self._registry)
+
+ def deserialize(self, data):
+ revoke_map = msgpackutils.loads(data, registry=self._registry)
+ revoke_tree = revoke_model.RevokeTree()
+ revoke_tree.revoke_map = revoke_map
+ return revoke_tree
+
+
+# Register our new handler.
+_registry = msgpackutils.default_registry
+_registry.frozen = False
+_registry.register(_RevokeModelHandler(registry=_registry))
+_registry.frozen = True
+
+
+class _ResponseCacheProxy(proxy.ProxyBackend):
+
+ __key_pfx = '_request_cache_%s'
+
+ def _get_request_context(self):
+ # Return the current context or a new/empty context.
+ return oslo_context.get_current() or oslo_context.RequestContext()
+
+ def _get_request_key(self, key):
+ return self.__key_pfx % key
+
+ def _set_local_cache(self, key, value, ctx=None):
+ # Set a serialized version of the returned value in local cache for
+ # subsequent calls to the memoized method.
+ if not ctx:
+ ctx = self._get_request_context()
+ serialize = {'payload': value.payload, 'metadata': value.metadata}
+ setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
+ ctx.update_store()
+
+ def _get_local_cache(self, key):
+ # Return the version from our local request cache if it exists.
+ ctx = self._get_request_context()
+ try:
+ value = getattr(ctx, self._get_request_key(key))
+ except AttributeError:
+ return api.NO_VALUE
+
+ value = msgpackutils.loads(value)
+ return api.CachedValue(payload=value['payload'],
+ metadata=value['metadata'])
+
+ def _delete_local_cache(self, key):
+ # On invalidate/delete remove the value from the local request cache
+ ctx = self._get_request_context()
+ try:
+ delattr(ctx, self._get_request_key(key))
+ ctx.update_store()
+ except AttributeError: # nosec
+ # NOTE(morganfainberg): We will simply pass here, this value has
+ # not been cached locally in the request.
+ pass
+
+ def get(self, key):
+ value = self._get_local_cache(key)
+ if value is api.NO_VALUE:
+ value = self.proxied.get(key)
+ if value is not api.NO_VALUE:
+ self._set_local_cache(key, value)
+ return value
+
+ def set(self, key, value):
+ self._set_local_cache(key, value)
+ self.proxied.set(key, value)
+
+ def delete(self, key):
+ self._delete_local_cache(key)
+ self.proxied.delete(key)
+
+ def get_multi(self, keys):
+ values = {}
+ for key in keys:
+ v = self._get_local_cache(key)
+ if v is not api.NO_VALUE:
+ values[key] = v
+ query_keys = set(keys).difference(set(values.keys()))
+ values.update(dict(
+ zip(query_keys, self.proxied.get_multi(query_keys))))
+ return [values[k] for k in keys]
+
+ def set_multi(self, mapping):
+ ctx = self._get_request_context()
+ for k, v in mapping.items():
+ self._set_local_cache(k, v, ctx)
+ self.proxied.set_multi(mapping)
+
+ def delete_multi(self, keys):
+ for k in keys:
+ self._delete_local_cache(k)
+ self.proxied.delete_multi(keys)
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
index f3990b12..bbe4785a 100644
--- a/keystone-moon/keystone/common/cache/backends/memcache_pool.py
+++ b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
@@ -13,49 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""dogpile.cache backend that uses Memcached connection pool"""
+"""This module is deprecated."""
-import functools
-import logging
+from oslo_cache.backends import memcache_pool
+from oslo_log import versionutils
-from dogpile.cache.backends import memcached as memcached_backend
-from keystone.common.cache import _memcache_pool
-
-
-LOG = logging.getLogger(__name__)
-
-
-# Helper to ease backend refactoring
-class ClientProxy(object):
- def __init__(self, client_pool):
- self.client_pool = client_pool
-
- def _run_method(self, __name, *args, **kwargs):
- with self.client_pool.acquire() as client:
- return getattr(client, __name)(*args, **kwargs)
-
- def __getattr__(self, name):
- return functools.partial(self._run_method, name)
-
-
-class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
- # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
- def __init__(self, arguments):
- super(PooledMemcachedBackend, self).__init__(arguments)
- self.client_pool = _memcache_pool.MemcacheClientPool(
- self.url,
- arguments={
- 'dead_retry': arguments.get('dead_retry', 5 * 60),
- 'socket_timeout': arguments.get('socket_timeout', 3),
- },
- maxsize=arguments.get('pool_maxsize', 10),
- unused_timeout=arguments.get('pool_unused_timeout', 60),
- conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
- )
-
- # Since all methods in backend just call one of methods of client, this
- # lets us avoid need to hack it too much
- @property
- def client(self):
- return ClientProxy(self.client_pool)
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.memcache_pool backend',
+ in_favor_of='oslo_cache.memcache_pool backend',
+ remove_in=+1)
+class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
index cb5ad833..861aefed 100644
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -12,550 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import abc
-import datetime
+from oslo_cache.backends import mongo
+from oslo_log import versionutils
-from dogpile.cache import api
-from dogpile.cache import util as dp_util
-from oslo_log import log
-from oslo_utils import importutils
-from oslo_utils import timeutils
-import six
-from keystone import exception
-from keystone.i18n import _, _LW
-
-
-NO_VALUE = api.NO_VALUE
-LOG = log.getLogger(__name__)
-
-
-class MongoCacheBackend(api.CacheBackend):
- """A MongoDB based caching backend implementing dogpile backend APIs.
-
- Arguments accepted in the arguments dictionary:
-
- :param db_hosts: string (required), hostname or IP address of the
- MongoDB server instance. This can be a single MongoDB connection URI,
- or a list of MongoDB connection URIs.
-
- :param db_name: string (required), the name of the database to be used.
-
- :param cache_collection: string (required), the name of collection to store
- cached data.
- *Note:* Different collection name can be provided if there is need to
- create separate container (i.e. collection) for cache data. So region
- configuration is done per collection.
-
- Following are optional parameters for MongoDB backend configuration,
-
- :param username: string, the name of the user to authenticate.
-
- :param password: string, the password of the user to authenticate.
-
- :param max_pool_size: integer, the maximum number of connections that the
- pool will open simultaneously. By default the pool size is 10.
-
- :param w: integer, write acknowledgement for MongoDB client
-
- If not provided, then no default is set on MongoDB and then write
- acknowledgement behavior occurs as per MongoDB default. This parameter
- name is same as what is used in MongoDB docs. This value is specified
- at collection level so its applicable to `cache_collection` db write
- operations.
-
- If this is a replica set, write operations will block until they have
- been replicated to the specified number or tagged set of servers.
- Setting w=0 disables write acknowledgement and all other write concern
- options.
-
- :param read_preference: string, the read preference mode for MongoDB client
- Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
- ``secondaryPreferred``, or ``nearest``. This read_preference is
- specified at collection level so its applicable to `cache_collection`
- db read operations.
-
- :param use_replica: boolean, flag to indicate if replica client to be
- used. Default is `False`. `replicaset_name` value is required if
- `True`.
-
- :param replicaset_name: string, name of replica set.
- Becomes required if `use_replica` is `True`
-
- :param son_manipulator: string, name of class with module name which
- implements MongoDB SONManipulator.
- Default manipulator used is :class:`.BaseTransform`.
-
- This manipulator is added per database. In multiple cache
- configurations, the manipulator name should be same if same
- database name ``db_name`` is used in those configurations.
-
- SONManipulator is used to manipulate custom data types as they are
- saved or retrieved from MongoDB. Custom impl is only needed if cached
- data is custom class and needs transformations when saving or reading
- from db. If dogpile cached value contains built-in data types, then
- BaseTransform class is sufficient as it already handles dogpile
- CachedValue class transformation.
-
- :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
- time-to-live value.
- If value is greater than 0, then its assumed that cache_collection
- needs to be TTL type (has index at 'doc_date' field).
- By default, the value is -1 and its disabled.
- Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
-
- .. NOTE::
-
- This parameter is different from Dogpile own
- expiration_time, which is the number of seconds after which Dogpile
- will consider the value to be expired. When Dogpile considers a
- value to be expired, it continues to use the value until generation
- of a new value is complete, when using CacheRegion.get_or_create().
- Therefore, if you are setting `mongo_ttl_seconds`, you will want to
- make sure it is greater than expiration_time by at least enough
- seconds for new values to be generated, else the value would not
- be available during a regeneration, forcing all threads to wait for
- a regeneration each time a value expires.
-
- :param ssl: boolean, If True, create the connection to the server
- using SSL. Default is `False`. Client SSL connection parameters depends
- on server side SSL setup. For further reference on SSL configuration:
- <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
-
- :param ssl_keyfile: string, the private keyfile used to identify the
- local connection against mongod. If included with the certfile then
- only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
-
- :param ssl_certfile: string, the certificate file used to identify the
- local connection against mongod. Used only when `ssl` is `True`.
-
- :param ssl_ca_certs: string, the ca_certs file contains a set of
- concatenated 'certification authority' certificates, which are used to
- validate certificates passed from the other end of the connection.
- Used only when `ssl` is `True`.
-
- :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
- a certificate is required from the other side of the connection, and
- whether it will be validated if provided. It must be one of the three
- values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
- (not required, but validated if provided), or
- ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
- parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
- must point to a file of CA certificates. Used only when `ssl`
- is `True`.
-
- Rest of arguments are passed to mongo calls for read, write and remove.
- So related options can be specified to pass to these operations.
-
- Further details of various supported arguments can be referred from
- <http://api.mongodb.org/python/current/api/pymongo/>
-
- """
-
- def __init__(self, arguments):
- self.api = MongoApi(arguments)
-
- @dp_util.memoized_property
- def client(self):
- """Initializes MongoDB connection and collection defaults.
-
- This initialization is done only once and performed as part of lazy
- inclusion of MongoDB dependency i.e. add imports only if related
- backend is used.
-
- :return: :class:`.MongoApi` instance
- """
- self.api.get_cache_collection()
- return self.api
-
- def get(self, key):
- value = self.client.get(key)
- if value is None:
- return NO_VALUE
- else:
- return value
-
- def get_multi(self, keys):
- values = self.client.get_multi(keys)
- return [
- NO_VALUE if key not in values
- else values[key] for key in keys
- ]
-
- def set(self, key, value):
- self.client.set(key, value)
-
- def set_multi(self, mapping):
- self.client.set_multi(mapping)
-
- def delete(self, key):
- self.client.delete(key)
-
- def delete_multi(self, keys):
- self.client.delete_multi(keys)
-
-
-class MongoApi(object):
- """Class handling MongoDB specific functionality.
-
- This class uses PyMongo APIs internally to create database connection
- with configured pool size, ensures unique index on key, does database
- authentication and ensure TTL collection index if configured so.
- This class also serves as handle to cache collection for dogpile cache
- APIs.
-
- In a single deployment, multiple cache configuration can be defined. In
- that case of multiple cache collections usage, db client connection pool
- is shared when cache collections are within same database.
- """
-
- # class level attributes for re-use of db client connection and collection
- _DB = {} # dict of db_name: db connection reference
- _MONGO_COLLS = {} # dict of cache_collection : db collection reference
-
- def __init__(self, arguments):
- self._init_args(arguments)
- self._data_manipulator = None
-
- def _init_args(self, arguments):
- """Helper logic for collecting and parsing MongoDB specific arguments.
-
- The arguments passed in are separated out in connection specific
- setting and rest of arguments are passed to create/update/delete
- db operations.
- """
- self.conn_kwargs = {} # connection specific arguments
-
- self.hosts = arguments.pop('db_hosts', None)
- if self.hosts is None:
- msg = _('db_hosts value is required')
- raise exception.ValidationError(message=msg)
-
- self.db_name = arguments.pop('db_name', None)
- if self.db_name is None:
- msg = _('database db_name is required')
- raise exception.ValidationError(message=msg)
-
- self.cache_collection = arguments.pop('cache_collection', None)
- if self.cache_collection is None:
- msg = _('cache_collection name is required')
- raise exception.ValidationError(message=msg)
-
- self.username = arguments.pop('username', None)
- self.password = arguments.pop('password', None)
- self.max_pool_size = arguments.pop('max_pool_size', 10)
-
- self.w = arguments.pop('w', -1)
- try:
- self.w = int(self.w)
- except ValueError:
- msg = _('integer value expected for w (write concern attribute)')
- raise exception.ValidationError(message=msg)
-
- self.read_preference = arguments.pop('read_preference', None)
-
- self.use_replica = arguments.pop('use_replica', False)
- if self.use_replica:
- if arguments.get('replicaset_name') is None:
- msg = _('replicaset_name required when use_replica is True')
- raise exception.ValidationError(message=msg)
- self.replicaset_name = arguments.get('replicaset_name')
-
- self.son_manipulator = arguments.pop('son_manipulator', None)
-
- # set if mongo collection needs to be TTL type.
- # This needs to be max ttl for any cache entry.
- # By default, -1 means don't use TTL collection.
- # With ttl set, it creates related index and have doc_date field with
- # needed expiration interval
- self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
- try:
- self.ttl_seconds = int(self.ttl_seconds)
- except ValueError:
- msg = _('integer value expected for mongo_ttl_seconds')
- raise exception.ValidationError(message=msg)
-
- self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
- if self.conn_kwargs['ssl']:
- ssl_keyfile = arguments.pop('ssl_keyfile', None)
- ssl_certfile = arguments.pop('ssl_certfile', None)
- ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
- ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
- if ssl_keyfile:
- self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
- if ssl_certfile:
- self.conn_kwargs['ssl_certfile'] = ssl_certfile
- if ssl_ca_certs:
- self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
- if ssl_cert_reqs:
- self.conn_kwargs['ssl_cert_reqs'] = (
- self._ssl_cert_req_type(ssl_cert_reqs))
-
- # rest of arguments are passed to mongo crud calls
- self.meth_kwargs = arguments
-
- def _ssl_cert_req_type(self, req_type):
- try:
- import ssl
- except ImportError:
- raise exception.ValidationError(_('no ssl support available'))
- req_type = req_type.upper()
- try:
- return {
- 'NONE': ssl.CERT_NONE,
- 'OPTIONAL': ssl.CERT_OPTIONAL,
- 'REQUIRED': ssl.CERT_REQUIRED
- }[req_type]
- except KeyError:
- msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
- '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
- raise exception.ValidationError(message=msg)
-
- def _get_db(self):
- # defer imports until backend is used
- global pymongo
- import pymongo
- if self.use_replica:
- connection = pymongo.MongoReplicaSetClient(
- host=self.hosts, replicaSet=self.replicaset_name,
- max_pool_size=self.max_pool_size, **self.conn_kwargs)
- else: # used for standalone node or mongos in sharded setup
- connection = pymongo.MongoClient(
- host=self.hosts, max_pool_size=self.max_pool_size,
- **self.conn_kwargs)
-
- database = getattr(connection, self.db_name)
-
- self._assign_data_mainpulator()
- database.add_son_manipulator(self._data_manipulator)
- if self.username and self.password:
- database.authenticate(self.username, self.password)
- return database
-
- def _assign_data_mainpulator(self):
- if self._data_manipulator is None:
- if self.son_manipulator:
- self._data_manipulator = importutils.import_object(
- self.son_manipulator)
- else:
- self._data_manipulator = BaseTransform()
-
- def _get_doc_date(self):
- if self.ttl_seconds > 0:
- expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
- doc_date = timeutils.utcnow() + expire_delta
- else:
- doc_date = timeutils.utcnow()
- return doc_date
-
- def get_cache_collection(self):
- if self.cache_collection not in self._MONGO_COLLS:
- global pymongo
- import pymongo
- # re-use db client connection if already defined as part of
- # earlier dogpile cache configuration
- if self.db_name not in self._DB:
- self._DB[self.db_name] = self._get_db()
- coll = getattr(self._DB[self.db_name], self.cache_collection)
-
- self._assign_data_mainpulator()
- if self.read_preference:
- # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
- f = getattr(pymongo.read_preferences,
- 'read_pref_mode_from_name', None)
- if not f:
- f = pymongo.read_preferences.mongos_enum
- self.read_preference = f(self.read_preference)
- coll.read_preference = self.read_preference
- if self.w > -1:
- coll.write_concern['w'] = self.w
- if self.ttl_seconds > 0:
- kwargs = {'expireAfterSeconds': self.ttl_seconds}
- coll.ensure_index('doc_date', cache_for=5, **kwargs)
- else:
- self._validate_ttl_index(coll, self.cache_collection,
- self.ttl_seconds)
- self._MONGO_COLLS[self.cache_collection] = coll
-
- return self._MONGO_COLLS[self.cache_collection]
-
- def _get_cache_entry(self, key, value, meta, doc_date):
- """MongoDB cache data representation.
-
- Storing cache key as ``_id`` field as MongoDB by default creates
- unique index on this field. So no need to create separate field and
- index for storing cache key. Cache data has additional ``doc_date``
- field for MongoDB TTL collection support.
- """
- return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
-
- def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
- """Checks if existing TTL index is removed on a collection.
-
- This logs warning when existing collection has TTL index defined and
- new cache configuration tries to disable index with
- ``mongo_ttl_seconds < 0``. In that case, existing index needs
- to be addressed first to make new configuration effective.
- Refer to MongoDB documentation around TTL index for further details.
- """
- indexes = collection.index_information()
- for indx_name, index_data in indexes.items():
- if all(k in index_data for k in ('key', 'expireAfterSeconds')):
- existing_value = index_data['expireAfterSeconds']
- fld_present = 'doc_date' in index_data['key'][0]
- if fld_present and existing_value > -1 and ttl_seconds < 1:
- msg = _LW('TTL index already exists on db collection '
- '<%(c_name)s>, remove index <%(indx_name)s> '
- 'first to make updated mongo_ttl_seconds value '
- 'to be effective')
- LOG.warn(msg, {'c_name': coll_name,
- 'indx_name': indx_name})
-
- def get(self, key):
- critieria = {'_id': key}
- result = self.get_cache_collection().find_one(spec_or_id=critieria,
- **self.meth_kwargs)
- if result:
- return result['value']
- else:
- return None
-
- def get_multi(self, keys):
- db_results = self._get_results_as_dict(keys)
- return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
-
- def _get_results_as_dict(self, keys):
- critieria = {'_id': {'$in': keys}}
- db_results = self.get_cache_collection().find(spec=critieria,
- **self.meth_kwargs)
- return {doc['_id']: doc for doc in db_results}
-
- def set(self, key, value):
- doc_date = self._get_doc_date()
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- spec = {'_id': key}
- # find and modify does not have manipulator support
- # so need to do conversion as part of input document
- ref = self._data_manipulator.transform_incoming(ref, self)
- self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
- **self.meth_kwargs)
-
- def set_multi(self, mapping):
- """Insert multiple documents specified as key, value pairs.
-
- In this case, multiple documents can be added via insert provided they
- do not exist.
- Update of multiple existing documents is done one by one
- """
- doc_date = self._get_doc_date()
- insert_refs = []
- update_refs = []
- existing_docs = self._get_results_as_dict(list(mapping.keys()))
- for key, value in mapping.items():
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- if key in existing_docs:
- ref['_id'] = existing_docs[key]['_id']
- update_refs.append(ref)
- else:
- insert_refs.append(ref)
- if insert_refs:
- self.get_cache_collection().insert(insert_refs, manipulate=True,
- **self.meth_kwargs)
- for upd_doc in update_refs:
- self.get_cache_collection().save(upd_doc, manipulate=True,
- **self.meth_kwargs)
-
- def delete(self, key):
- critieria = {'_id': key}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
- def delete_multi(self, keys):
- critieria = {'_id': {'$in': keys}}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AbstractManipulator(object):
- """Abstract class with methods which need to be implemented for custom
- manipulation.
-
- Adding this as a base class for :class:`.BaseTransform` instead of adding
- import dependency of pymongo specific class i.e.
- `pymongo.son_manipulator.SONManipulator` and using that as base class.
- This is done to avoid pymongo dependency if MongoDB backend is not used.
- """
- @abc.abstractmethod
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB.
-
- :param son: the SON object to be inserted into the database
- :param collection: the collection the object is being inserted into
-
- :returns: transformed SON object
-
- """
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB.
-
- :param son: the SON object being retrieved from the database
- :param collection: the collection this object was stored in
-
- :returns: transformed SON object
- """
- raise exception.NotImplemented() # pragma: no cover
-
- def will_copy(self):
- """Will this SON manipulator make a copy of the incoming document?
-
- Derived classes that do need to make a copy should override this
- method, returning `True` instead of `False`.
-
- :returns: boolean
- """
- return False
-
-
-class BaseTransform(AbstractManipulator):
- """Base transformation class to store and read dogpile cached data
- from MongoDB.
-
- This is needed as dogpile internally stores data as a custom class
- i.e. dogpile.cache.api.CachedValue
-
- Note: Custom manipulator needs to always override ``transform_incoming``
- and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
- checks that overridden method in instance and its super are different.
- """
-
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB."""
- for (key, value) in list(son.items()):
- if isinstance(value, api.CachedValue):
- son[key] = value.payload # key is 'value' field here
- son['meta'] = value.metadata
- elif isinstance(value, dict): # Make sure we recurse into sub-docs
- son[key] = self.transform_incoming(value, collection)
- return son
-
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB."""
- metadata = None
- # make sure its top level dictionary with all expected fields names
- # present
- if isinstance(son, dict) and all(k in son for k in
- ('_id', 'value', 'meta', 'doc_date')):
- payload = son.pop('value', None)
- metadata = son.pop('meta', None)
- for (key, value) in list(son.items()):
- if isinstance(value, dict):
- son[key] = self.transform_outgoing(value, collection)
- if metadata is not None:
- son['value'] = api.CachedValue(payload, metadata)
- return son
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.mongo backend',
+ in_favor_of='oslo_cache.mongo backend',
+ remove_in=+1)
+class MongoCacheBackend(mongo.MongoCacheBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
index 38329c94..eda06ec8 100644
--- a/keystone-moon/keystone/common/cache/backends/noop.py
+++ b/keystone-moon/keystone/common/cache/backends/noop.py
@@ -13,11 +13,17 @@
# under the License.
from dogpile.cache import api
+from oslo_log import versionutils
NO_VALUE = api.NO_VALUE
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.common.cache.noop backend',
+ in_favor_of="dogpile.cache's Null backend",
+ remove_in=+1)
class NoopCacheBackend(api.CacheBackend):
"""A no op backend as a default caching backend.
@@ -27,6 +33,7 @@ class NoopCacheBackend(api.CacheBackend):
mechanism to cleanup it's internal dict and therefore could cause run-away
memory utilization.
"""
+
def __init__(self, *args):
return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
index 306587b3..6bb0af51 100644
--- a/keystone-moon/keystone/common/cache/core.py
+++ b/keystone-moon/keystone/common/cache/core.py
@@ -13,23 +13,41 @@
# under the License.
"""Keystone Caching Layer Implementation."""
-
import dogpile.cache
-from dogpile.cache import proxy
-from dogpile.cache import util
+from dogpile.cache import api
+from oslo_cache import core as cache
from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import importutils
-from keystone import exception
-from keystone.i18n import _, _LE
+from keystone.common.cache import _context_cache
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
+CACHE_REGION = cache.create_region()
+
+
+def configure_cache(region=None):
+ if region is None:
+ region = CACHE_REGION
+ # NOTE(morganfainberg): running cache.configure_cache_region()
+ # sets region.is_configured, this must be captured before
+ # cache.configure_cache_region is called.
+ configured = region.is_configured
+ cache.configure_cache_region(CONF, region)
+ # Only wrap the region if it was not configured. This should be pushed
+ # to oslo_cache lib somehow.
+ if not configured:
+ region.wrap(_context_cache._ResponseCacheProxy)
+
+
+def get_memoization_decorator(group, expiration_group=None, region=None):
+ if region is None:
+ region = CACHE_REGION
+ return cache.get_memoization_decorator(CONF, region, group,
+ expiration_group=expiration_group)
-make_region = dogpile.cache.make_region
+# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
+# we no longer need to register the backends here.
dogpile.cache.register_backend(
'keystone.common.cache.noop',
'keystone.common.cache.backends.noop',
@@ -46,263 +64,61 @@ dogpile.cache.register_backend(
'PooledMemcachedBackend')
-class DebugProxy(proxy.ProxyBackend):
- """Extra Logging ProxyBackend."""
- # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
- # a clean description of the information. Without use of repr, it might
- # be possible to run into encode/decode error(s). For logging/debugging
- # purposes encode/decode is irrelevant and we should be looking at the
- # data exactly as it stands.
-
- def get(self, key):
- value = self.proxied.get(key)
- LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return value
-
- def get_multi(self, keys):
- values = self.proxied.get_multi(keys)
- LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
- {'keys': keys, 'values': values})
- return values
-
- def set(self, key, value):
- LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return self.proxied.set(key, value)
-
- def set_multi(self, keys):
- LOG.debug('CACHE_SET_MULTI: "%r"', keys)
- self.proxied.set_multi(keys)
-
- def delete(self, key):
- self.proxied.delete(key)
- LOG.debug('CACHE_DELETE: "%r"', key)
-
- def delete_multi(self, keys):
- LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
- self.proxied.delete_multi(keys)
-
-
-def build_cache_config():
- """Build the cache region dictionary configuration.
-
- :returns: dict
+# TODO(morganfainberg): Move this logic up into oslo.cache directly
+# so we can handle region-wide invalidations or alternatively propose
+# a fix to dogpile.cache to make region-wide invalidates possible to
+# work across distributed processes.
+class _RegionInvalidator(object):
+
+ def __init__(self, region, region_name):
+ self.region = region
+ self.region_name = region_name
+ region_key = '_RegionExpiration.%(type)s.%(region_name)s'
+ self.soft_region_key = region_key % {'type': 'soft',
+ 'region_name': self.region_name}
+ self.hard_region_key = region_key % {'type': 'hard',
+ 'region_name': self.region_name}
+
+ @property
+ def hard_invalidated(self):
+ invalidated = self.region.backend.get(self.hard_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @hard_invalidated.setter
+ def hard_invalidated(self, value):
+ self.region.set(self.hard_region_key, value)
+
+ @hard_invalidated.deleter
+ def hard_invalidated(self):
+ self.region.delete(self.hard_region_key)
+
+ @property
+ def soft_invalidated(self):
+ invalidated = self.region.backend.get(self.soft_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @soft_invalidated.setter
+ def soft_invalidated(self, value):
+ self.region.set(self.soft_region_key, value)
+
+ @soft_invalidated.deleter
+ def soft_invalidated(self):
+ self.region.delete(self.soft_region_key)
+
+
+def apply_invalidation_patch(region, region_name):
+ """Patch the region interfaces to ensure we share the expiration time.
+
+ This method is used to patch region.invalidate, region._hard_invalidated,
+ and region._soft_invalidated.
"""
- prefix = CONF.cache.config_prefix
- conf_dict = {}
- conf_dict['%s.backend' % prefix] = CONF.cache.backend
- conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
- for argument in CONF.cache.backend_argument:
- try:
- (argname, argvalue) = argument.split(':', 1)
- except ValueError:
- msg = _LE('Unable to build cache config-key. Expected format '
- '"<argname>:<value>". Skipping unknown format: %s')
- LOG.error(msg, argument)
- continue
-
- arg_key = '.'.join([prefix, 'arguments', argname])
- conf_dict[arg_key] = argvalue
-
- LOG.debug('Keystone Cache Config: %s', conf_dict)
- # NOTE(yorik-sar): these arguments will be used for memcache-related
- # backends. Use setdefault for url to support old-style setting through
- # backend_argument=url:127.0.0.1:11211
- conf_dict.setdefault('%s.arguments.url' % prefix,
- CONF.cache.memcache_servers)
- for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
- 'pool_unused_timeout', 'pool_connection_get_timeout'):
- value = getattr(CONF.cache, 'memcache_' + arg)
- conf_dict['%s.arguments.%s' % (prefix, arg)] = value
-
- return conf_dict
-
-
-def configure_cache_region(region):
- """Configure a cache region.
-
- :param region: optional CacheRegion object, if not provided a new region
- will be instantiated
- :raises: exception.ValidationError
- :returns: dogpile.cache.CacheRegion
- """
- if not isinstance(region, dogpile.cache.CacheRegion):
- raise exception.ValidationError(
- _('region not type dogpile.cache.CacheRegion'))
-
- if not region.is_configured:
- # NOTE(morganfainberg): this is how you tell if a region is configured.
- # There is a request logged with dogpile.cache upstream to make this
- # easier / less ugly.
-
- config_dict = build_cache_config()
- region.configure_from_config(config_dict,
- '%s.' % CONF.cache.config_prefix)
-
- if CONF.cache.debug_cache_backend:
- region.wrap(DebugProxy)
-
- # NOTE(morganfainberg): if the backend requests the use of a
- # key_mangler, we should respect that key_mangler function. If a
- # key_mangler is not defined by the backend, use the sha1_mangle_key
- # mangler provided by dogpile.cache. This ensures we always use a fixed
- # size cache-key.
- if region.key_mangler is None:
- region.key_mangler = util.sha1_mangle_key
-
- for class_path in CONF.cache.proxies:
- # NOTE(morganfainberg): if we have any proxy wrappers, we should
- # ensure they are added to the cache region's backend. Since
- # configure_from_config doesn't handle the wrap argument, we need
- # to manually add the Proxies. For information on how the
- # ProxyBackends work, see the dogpile.cache documents on
- # "changing-backend-behavior"
- cls = importutils.import_class(class_path)
- LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
- region.wrap(cls)
-
- return region
-
-
-def get_should_cache_fn(section):
- """Build a function that returns a config section's caching status.
-
- For any given driver in keystone that has caching capabilities, a boolean
- config option for that driver's section (e.g. ``token``) should exist and
- default to ``True``. This function will use that value to tell the caching
- decorator if caching for that driver is enabled. To properly use this
- with the decorator, pass this function the configuration section and assign
- the result to a variable. Pass the new variable to the caching decorator
- as the named argument ``should_cache_fn``. e.g.::
-
- from keystone.common import cache
-
- SHOULD_CACHE = cache.get_should_cache_fn('token')
-
- @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :returns: function reference
- """
- def should_cache(value):
- if not CONF.cache.enabled:
- return False
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'caching', True)
- return should_cache
-
-
-def get_expiration_time_fn(section):
- """Build a function that returns a config section's expiration time status.
-
- For any given driver in keystone that has caching capabilities, an int
- config option called ``cache_time`` for that driver's section
- (e.g. ``token``) should exist and typically default to ``None``. This
- function will use that value to tell the caching decorator of the TTL
- override for caching the resulting objects. If the value of the config
- option is ``None`` the default value provided in the
- ``[cache] expiration_time`` option will be used by the decorator. The
- default may be set to something other than ``None`` in cases where the
- caching TTL should not be tied to the global default(s) (e.g.
- revocation_list changes very infrequently and can be cached for >1h by
- default).
-
- To properly use this with the decorator, pass this function the
- configuration section and assign the result to a variable. Pass the new
- variable to the caching decorator as the named argument
- ``expiration_time``. e.g.::
-
- from keystone.common import cache
-
- EXPIRATION_TIME = cache.get_expiration_time_fn('token')
-
- @cache.on_arguments(expiration_time=EXPIRATION_TIME)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :rtype: function reference
- """
- def get_expiration_time():
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'cache_time', None)
- return get_expiration_time
-
-
-def key_generate_to_str(s):
- # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
- # to stringify and handle the Unicode error explicitly as needed.
- try:
- return str(s)
- except UnicodeEncodeError:
- return s.encode('utf-8')
-
-
-def function_key_generator(namespace, fn, to_str=key_generate_to_str):
- # NOTE(morganfainberg): This wraps dogpile.cache's default
- # function_key_generator to change the default to_str mechanism.
- return util.function_key_generator(namespace, fn, to_str=to_str)
-
-
-REGION = dogpile.cache.make_region(
- function_key_generator=function_key_generator)
-on_arguments = REGION.cache_on_arguments
-
-
-def get_memoization_decorator(section, expiration_section=None):
- """Build a function based on the `on_arguments` decorator for the section.
-
- For any given driver in Keystone that has caching capabilities, a
- pair of functions is required to properly determine the status of the
- caching capabilities (a toggle to indicate caching is enabled and any
- override of the default TTL for cached data). This function will return
- an object that has the memoization decorator ``on_arguments``
- pre-configured for the driver.
-
- Example usage::
-
- from keystone.common import cache
-
- MEMOIZE = cache.get_memoization_decorator(section='token')
-
- @MEMOIZE
- def function(arg1, arg2):
- ...
-
-
- ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
- section='token', expiration_section='revoke')
-
- @ALTERNATE_MEMOIZE
- def function2(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :param expiration_section: name of the configuration section to examine
- for the expiration option. This will fall back
- to using ``section`` if the value is unspecified
- or ``None``
- :type expiration_section: string
- :rtype: function reference
- """
- if expiration_section is None:
- expiration_section = section
- should_cache = get_should_cache_fn(section)
- expiration_time = get_expiration_time_fn(expiration_section)
-
- memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
- expiration_time=expiration_time)
-
- # Make sure the actual "should_cache" and "expiration_time" methods are
- # available. This is potentially interesting/useful to pre-seed cache
- # values.
- memoize.should_cache = should_cache
- memoize.get_expiration_time = expiration_time
-
- return memoize
+ # Patch the region object. This logic needs to be moved up into dogpile
+ # itself. Patching the internal interfaces, unfortunately, is the only
+ # way to handle this at the moment.
+ invalidator = _RegionInvalidator(region=region, region_name=region_name)
+ setattr(region, '_hard_invalidated', invalidator.hard_invalidated)
+ setattr(region, '_soft_invalidated', invalidator.soft_invalidated)
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
index b42b29d6..56f419b6 100644
--- a/keystone-moon/keystone/common/config.py
+++ b/keystone-moon/keystone/common/config.py
@@ -12,23 +12,48 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+import os
+
+from oslo_cache import core as cache
from oslo_config import cfg
+from oslo_log import log
import oslo_messaging
+from oslo_middleware import cors
import passlib.utils
+from keystone import exception
+
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
_SSO_CALLBACK = '/etc/keystone/sso_callback_template.html'
+_DEPRECATE_PKI_MSG = ('PKI token support has been deprecated in the M '
+ 'release and will be removed in the O release. Fernet '
+ 'or UUID tokens are recommended.')
+
+_DEPRECATE_INHERIT_MSG = ('The option to enable the OS-INHERIT extension has '
+ 'been deprecated in the M release and will be '
+ 'removed in the O release. The OS-INHERIT extension '
+ 'will be enabled by default.')
+
+_DEPRECATE_EP_MSG = ('The option to enable the OS-ENDPOINT-POLICY extension '
+ 'has been deprecated in the M release and will be '
+ 'removed in the O release. The OS-ENDPOINT-POLICY '
+ 'extension will be enabled by default.')
+
FILE_OPTIONS = {
None: [
- cfg.StrOpt('admin_token', secret=True, default='ADMIN',
+ cfg.StrOpt('admin_token', secret=True, default=None,
help='A "shared secret" that can be used to bootstrap '
'Keystone. This "token" does not represent a user, '
- 'and carries no explicit authorization. To disable '
+ 'and carries no explicit authorization. If set '
+ 'to `None`, the value is ignored and the '
+ '`admin_token` log in mechanism is effectively '
+ 'disabled. To completely disable `admin_token` '
'in production (highly recommended), remove '
'AdminTokenAuthMiddleware from your paste '
'application pipelines (for example, in '
@@ -54,9 +79,10 @@ FILE_OPTIONS = {
'(e.g. /prefix/v3) or the endpoint should be found '
'on a different server.'),
cfg.IntOpt('max_project_tree_depth', default=5,
- help='Maximum depth of the project hierarchy. WARNING: '
- 'setting it to a large value may adversely impact '
- 'performance.'),
+ help='Maximum depth of the project hierarchy, excluding '
+ 'the project acting as a domain at the top of the '
+ 'hierarchy. WARNING: setting it to a large value may '
+ 'adversely impact performance.'),
cfg.IntOpt('max_param_size', default=64,
help='Limit the sizes of user & project ID/names.'),
# we allow tokens to be a bit larger to accommodate PKI
@@ -96,7 +122,10 @@ FILE_OPTIONS = {
'domain_id. Allowing such movement is not '
'recommended if the scope of a domain admin is being '
'restricted by use of an appropriate policy file '
- '(see policy.v3cloudsample as an example).'),
+ '(see policy.v3cloudsample as an example). This '
+ 'ability is deprecated and will be removed in a '
+ 'future release.',
+ deprecated_for_removal=True),
cfg.BoolOpt('strict_password_check', default=False,
help='If set to true, strict password length checking is '
'performed for password manipulation. If a password '
@@ -104,11 +133,16 @@ FILE_OPTIONS = {
'with an HTTP 403 Forbidden error. If set to false, '
'passwords are automatically truncated to the '
'maximum length.'),
- cfg.StrOpt('secure_proxy_ssl_header',
+ cfg.StrOpt('secure_proxy_ssl_header', default='HTTP_X_FORWARDED_PROTO',
help='The HTTP header used to determine the scheme for the '
'original request, even if it was removed by an SSL '
- 'terminating proxy. Typical value is '
- '"HTTP_X_FORWARDED_PROTO".'),
+ 'terminating proxy.'),
+ cfg.BoolOpt('insecure_debug', default=False,
+ help='If set to true the server will return information '
+ 'in the response that may allow an unauthenticated '
+ 'or authenticated user to get more information than '
+ 'normal, such as why authentication failed. This may '
+ 'be useful for debugging but is insecure.'),
],
'identity': [
cfg.StrOpt('default_domain_id', default='default',
@@ -197,11 +231,17 @@ FILE_OPTIONS = {
'already have assignments for users and '
'groups from the default LDAP domain, and it is '
'acceptable for Keystone to provide the different '
- 'IDs to clients than it did previously. Typically '
+ 'IDs to clients than it did previously. Typically '
'this means that the only time you can set this '
'value to False is when configuring a fresh '
'installation.'),
],
+ 'shadow_users': [
+ cfg.StrOpt('driver',
+ default='sql',
+ help='Entrypoint for the shadow users backend driver '
+ 'in the keystone.identity.shadow_users namespace.'),
+ ],
'trust': [
cfg.BoolOpt('enabled', default=True,
help='Delegation and impersonation features can be '
@@ -215,10 +255,14 @@ FILE_OPTIONS = {
help='Entrypoint for the trust backend driver in the '
'keystone.trust namespace.')],
'os_inherit': [
- cfg.BoolOpt('enabled', default=False,
+ cfg.BoolOpt('enabled', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_INHERIT_MSG,
help='role-assignment inheritance to projects from '
'owning domain or from projects higher in the '
- 'hierarchy can be optionally enabled.'),
+ 'hierarchy can be optionally disabled. In the '
+ 'future, this option will be removed and the '
+ 'hierarchy will be always enabled.'),
],
'fernet_tokens': [
cfg.StrOpt('key_repository',
@@ -279,12 +323,17 @@ FILE_OPTIONS = {
'allow_rescoped_scoped_token to false prevents a user '
'from exchanging a scoped token for any other token.'),
cfg.StrOpt('hash_algorithm', default='md5',
- help="The hash algorithm to use for PKI tokens. This can "
- "be set to any algorithm that hashlib supports. "
- "WARNING: Before changing this value, the auth_token "
- "middleware must be configured with the "
- "hash_algorithms, otherwise token revocation will "
- "not be processed correctly."),
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
+ help='The hash algorithm to use for PKI tokens. This can '
+ 'be set to any algorithm that hashlib supports. '
+ 'WARNING: Before changing this value, the auth_token '
+ 'middleware must be configured with the '
+ 'hash_algorithms, otherwise token revocation will '
+ 'not be processed correctly.'),
+ cfg.BoolOpt('infer_roles', default=True,
+ help='Add roles to token that are not explicitly added, '
+ 'but that are linked implicitly to other roles.'),
],
'revoke': [
cfg.StrOpt('driver',
@@ -306,82 +355,6 @@ FILE_OPTIONS = {
deprecated_opts=[cfg.DeprecatedOpt(
'revocation_cache_time', group='token')]),
],
- 'cache': [
- cfg.StrOpt('config_prefix', default='cache.keystone',
- help='Prefix for building the configuration dictionary '
- 'for the cache region. This should not need to be '
- 'changed unless there is another dogpile.cache '
- 'region with the same configuration name.'),
- cfg.IntOpt('expiration_time', default=600,
- help='Default TTL, in seconds, for any cached item in '
- 'the dogpile.cache region. This applies to any '
- 'cached method that doesn\'t have an explicit '
- 'cache expiration time defined for it.'),
- # NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
- # and other such single-process/thread deployments. Running
- # dogpile.cache.memory in any other configuration has the same pitfalls
- # as the KVS token backend. It is recommended that either Redis or
- # Memcached are used as the dogpile backend for real workloads. To
- # prevent issues with the memory cache ending up in "production"
- # unintentionally, we register a no-op as the keystone default caching
- # backend.
- cfg.StrOpt('backend', default='keystone.common.cache.noop',
- help='Dogpile.cache backend module. It is recommended '
- 'that Memcache with pooling '
- '(keystone.cache.memcache_pool) or Redis '
- '(dogpile.cache.redis) be used in production '
- 'deployments. Small workloads (single process) '
- 'like devstack can use the dogpile.cache.memory '
- 'backend.'),
- cfg.MultiStrOpt('backend_argument', default=[], secret=True,
- help='Arguments supplied to the backend module. '
- 'Specify this option once per argument to be '
- 'passed to the dogpile.cache backend. Example '
- 'format: "<argname>:<value>".'),
- cfg.ListOpt('proxies', default=[],
- help='Proxy classes to import that will affect the way '
- 'the dogpile.cache backend functions. See the '
- 'dogpile.cache documentation on '
- 'changing-backend-behavior.'),
- cfg.BoolOpt('enabled', default=False,
- help='Global toggle for all caching using the '
- 'should_cache_fn mechanism.'),
- cfg.BoolOpt('debug_cache_backend', default=False,
- help='Extra debugging from the cache backend (cache '
- 'keys, get/set/delete/etc calls). This is only '
- 'really useful if you need to see the specific '
- 'cache-backend get/set/delete calls with the '
- 'keys/values. Typically this should be left set '
- 'to false.'),
- cfg.ListOpt('memcache_servers', default=['localhost:11211'],
- help='Memcache servers in the format of "host:port".'
- ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only).'),
- cfg.IntOpt('memcache_dead_retry',
- default=5 * 60,
- help='Number of seconds memcached server is considered dead'
- ' before it is tried again. (dogpile.cache.memcache and'
- ' keystone.cache.memcache_pool backends only).'),
- cfg.IntOpt('memcache_socket_timeout',
- default=3,
- help='Timeout in seconds for every call to a server.'
- ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only).'),
- cfg.IntOpt('memcache_pool_maxsize',
- default=10,
- help='Max total number of open connections to every'
- ' memcached server. (keystone.cache.memcache_pool backend'
- ' only).'),
- cfg.IntOpt('memcache_pool_unused_timeout',
- default=60,
- help='Number of seconds a connection to memcached is held'
- ' unused in the pool before it is closed.'
- ' (keystone.cache.memcache_pool backend only).'),
- cfg.IntOpt('memcache_pool_connection_get_timeout',
- default=10,
- help='Number of seconds that an operation will wait to get '
- 'a memcache client connection.'),
- ],
'ssl': [
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
@@ -400,26 +373,40 @@ FILE_OPTIONS = {
'signing': [
cfg.StrOpt('certfile',
default=_CERTFILE,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the certfile for token signing. For '
'non-production environments, you may be interested '
'in using `keystone-manage pki_setup` to generate '
'self-signed certificates.'),
cfg.StrOpt('keyfile',
default=_KEYFILE,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the keyfile for token signing.'),
cfg.StrOpt('ca_certs',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
default='/etc/keystone/ssl/certs/ca.pem',
help='Path of the CA for token signing.'),
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the CA key for token signing.'),
cfg.IntOpt('key_size', default=2048, min=1024,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Key size (in bits) for token signing cert '
'(auto generated certificate).'),
cfg.IntOpt('valid_days', default=3650,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Days the token signing cert is valid for '
'(auto generated certificate).'),
cfg.StrOpt('cert_subject',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
default=('/C=US/ST=Unset/L=Unset/O=Unset/'
'CN=www.example.com'),
help='Certificate subject (auto generated certificate) for '
@@ -428,16 +415,21 @@ FILE_OPTIONS = {
'assignment': [
cfg.StrOpt('driver',
help='Entrypoint for the assignment backend driver in the '
- 'keystone.assignment namespace. Supplied drivers are '
- 'ldap and sql. If an assignment driver is not '
+ 'keystone.assignment namespace. Only an SQL driver is '
+ 'supplied. If an assignment driver is not '
'specified, the identity driver will choose the '
- 'assignment driver.'),
+ 'assignment driver (driver selection based on '
+ '`[identity]/driver` option is deprecated and will be '
+ 'removed in the "O" release).'),
+ cfg.ListOpt('prohibited_implied_role', default=['admin'],
+ help='A list of role names which are prohibited from '
+ 'being an implied role.'),
],
'resource': [
cfg.StrOpt('driver',
help='Entrypoint for the resource backend driver in the '
- 'keystone.resource namespace. Supplied drivers are '
- 'ldap and sql. If a resource driver is not specified, '
+ 'keystone.resource namespace. Only an SQL driver is '
+ 'supplied. If a resource driver is not specified, '
'the assignment driver will choose the resource '
'driver.'),
cfg.BoolOpt('caching', default=True,
@@ -455,6 +447,30 @@ FILE_OPTIONS = {
group='assignment')],
help='Maximum number of entities that will be returned '
'in a resource collection.'),
+ cfg.StrOpt('admin_project_domain_name',
+ help='Name of the domain that owns the '
+ '`admin_project_name`. Defaults to None.'),
+ cfg.StrOpt('admin_project_name',
+ help='Special project for performing administrative '
+ 'operations on remote services. Tokens scoped to '
+ 'this project will contain the key/value '
+ '`is_admin_project=true`. Defaults to None.'),
+ cfg.StrOpt('project_name_url_safe',
+ choices=['off', 'new', 'strict'], default='off',
+ help='Whether the names of projects are restricted from '
+ 'containing url reserved characters. If set to new, '
+ 'attempts to create or update a project with a url '
+ 'unsafe name will return an error. In addition, if '
+ 'set to strict, attempts to scope a token using '
+ 'an unsafe project name will return an error.'),
+ cfg.StrOpt('domain_name_url_safe',
+ choices=['off', 'new', 'strict'], default='off',
+ help='Whether the names of domains are restricted from '
+ 'containing url reserved characters. If set to new, '
+ 'attempts to create or update a domain with a url '
+ 'unsafe name will return an error. In addition, if '
+ 'set to strict, attempts to scope a token using a '
+ 'domain name which is unsafe will return an error.'),
],
'domain_config': [
cfg.StrOpt('driver',
@@ -496,7 +512,7 @@ FILE_OPTIONS = {
'oauth1': [
cfg.StrOpt('driver',
default='sql',
- help='Entrypoint for hte OAuth backend driver in the '
+ help='Entrypoint for the OAuth backend driver in the '
'keystone.oauth1 namespace.'),
cfg.IntOpt('request_token_duration', default=28800,
help='Duration (in seconds) for the OAuth Request Token.'),
@@ -558,6 +574,8 @@ FILE_OPTIONS = {
'endpoint_policy': [
cfg.BoolOpt('enabled',
default=True,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_EP_MSG,
help='Enable endpoint_policy functionality.'),
cfg.StrOpt('driver',
default='sql',
@@ -566,7 +584,10 @@ FILE_OPTIONS = {
],
'ldap': [
cfg.StrOpt('url', default='ldap://localhost',
- help='URL for connecting to the LDAP server.'),
+ help='URL(s) for connecting to the LDAP server. Multiple '
+ 'LDAP URLs may be specified as a comma separated '
+ 'string. The first URL to successfully bind is used '
+ 'for the connection.'),
cfg.StrOpt('user',
help='User BindDN to query the LDAP server.'),
cfg.StrOpt('password', secret=True,
@@ -618,6 +639,8 @@ FILE_OPTIONS = {
'WARNING: must not be a multivalued attribute.'),
cfg.StrOpt('user_name_attribute', default='sn',
help='LDAP attribute mapped to user name.'),
+ cfg.StrOpt('user_description_attribute', default='description',
+ help='LDAP attribute mapped to user description.'),
cfg.StrOpt('user_mail_attribute', default='mail',
help='LDAP attribute mapped to user email.'),
cfg.StrOpt('user_pass_attribute', default='userPassword',
@@ -655,10 +678,25 @@ FILE_OPTIONS = {
help='LDAP attribute mapped to default_project_id for '
'users.'),
cfg.BoolOpt('user_allow_create', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user creation in LDAP backend.'),
cfg.BoolOpt('user_allow_update', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user updates in LDAP backend.'),
cfg.BoolOpt('user_allow_delete', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user deletion in LDAP backend.'),
cfg.BoolOpt('user_enabled_emulation', default=False,
help='If true, Keystone uses an alternative method to '
@@ -679,146 +717,6 @@ FILE_OPTIONS = {
'mapping format is <ldap_attr>:<user_attr>, where '
'ldap_attr is the attribute in the LDAP entry and '
'user_attr is the Identity API attribute.'),
-
- cfg.StrOpt('project_tree_dn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_tree_dn', group='ldap')],
- deprecated_for_removal=True,
- help='Search base for projects. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('project_filter',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_filter', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP search filter for projects.'),
- cfg.StrOpt('project_objectclass', default='groupOfNames',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_objectclass', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP objectclass for projects.'),
- cfg.StrOpt('project_id_attribute', default='cn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_id_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project id.'),
- cfg.StrOpt('project_member_attribute', default='member',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_member_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project membership for '
- 'user.'),
- cfg.StrOpt('project_name_attribute', default='ou',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_name_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project name.'),
- cfg.StrOpt('project_desc_attribute', default='description',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_desc_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project description.'),
- cfg.StrOpt('project_enabled_attribute', default='enabled',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project enabled.'),
- cfg.StrOpt('project_domain_id_attribute',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_domain_id_attribute', group='ldap')],
- deprecated_for_removal=True,
- default='businessCategory',
- help='LDAP attribute mapped to project domain_id.'),
- cfg.ListOpt('project_attribute_ignore', default=[],
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_attribute_ignore', group='ldap')],
- deprecated_for_removal=True,
- help='List of attributes stripped off the project on '
- 'update.'),
- cfg.BoolOpt('project_allow_create', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_create', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project creation in LDAP backend.'),
- cfg.BoolOpt('project_allow_update', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_update', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project update in LDAP backend.'),
- cfg.BoolOpt('project_allow_delete', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_delete', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project deletion in LDAP backend.'),
- cfg.BoolOpt('project_enabled_emulation', default=False,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_emulation', group='ldap')],
- deprecated_for_removal=True,
- help='If true, Keystone uses an alternative method to '
- 'determine if a project is enabled or not by '
- 'checking if they are a member of the '
- '"project_enabled_emulation_dn" group.'),
- cfg.StrOpt('project_enabled_emulation_dn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_emulation_dn', group='ldap')],
- deprecated_for_removal=True,
- help='DN of the group entry to hold enabled projects when '
- 'using enabled emulation.'),
- cfg.BoolOpt('project_enabled_emulation_use_group_config',
- default=False,
- help='Use the "group_member_attribute" and '
- '"group_objectclass" settings to determine '
- 'membership in the emulated enabled group.'),
- cfg.ListOpt('project_additional_attribute_mapping',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_additional_attribute_mapping', group='ldap')],
- deprecated_for_removal=True,
- default=[],
- help='Additional attribute mappings for projects. '
- 'Attribute mapping format is '
- '<ldap_attr>:<user_attr>, where ldap_attr is the '
- 'attribute in the LDAP entry and user_attr is the '
- 'Identity API attribute.'),
-
- cfg.StrOpt('role_tree_dn',
- deprecated_for_removal=True,
- help='Search base for roles. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('role_filter',
- deprecated_for_removal=True,
- help='LDAP search filter for roles.'),
- cfg.StrOpt('role_objectclass', default='organizationalRole',
- deprecated_for_removal=True,
- help='LDAP objectclass for roles.'),
- cfg.StrOpt('role_id_attribute', default='cn',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role id.'),
- cfg.StrOpt('role_name_attribute', default='ou',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role name.'),
- cfg.StrOpt('role_member_attribute', default='roleOccupant',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role membership.'),
- cfg.ListOpt('role_attribute_ignore', default=[],
- deprecated_for_removal=True,
- help='List of attributes stripped off the role on '
- 'update.'),
- cfg.BoolOpt('role_allow_create', default=True,
- deprecated_for_removal=True,
- help='Allow role creation in LDAP backend.'),
- cfg.BoolOpt('role_allow_update', default=True,
- deprecated_for_removal=True,
- help='Allow role update in LDAP backend.'),
- cfg.BoolOpt('role_allow_delete', default=True,
- deprecated_for_removal=True,
- help='Allow role deletion in LDAP backend.'),
- cfg.ListOpt('role_additional_attribute_mapping',
- deprecated_for_removal=True,
- default=[],
- help='Additional attribute mappings for roles. Attribute '
- 'mapping format is <ldap_attr>:<user_attr>, where '
- 'ldap_attr is the attribute in the LDAP entry and '
- 'user_attr is the Identity API attribute.'),
-
cfg.StrOpt('group_tree_dn',
help='Search base for groups. '
'Defaults to the suffix value.'),
@@ -838,10 +736,25 @@ FILE_OPTIONS = {
help='List of attributes stripped off the group on '
'update.'),
cfg.BoolOpt('group_allow_create', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group creation in LDAP backend.'),
cfg.BoolOpt('group_allow_update', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group update in LDAP backend.'),
cfg.BoolOpt('group_allow_delete', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group deletion in LDAP backend.'),
cfg.ListOpt('group_additional_attribute_mapping',
default=[],
@@ -862,7 +775,7 @@ FILE_OPTIONS = {
choices=['demand', 'never', 'allow'],
help='Specifies what checks to perform on client '
'certificates in an incoming TLS session.'),
- cfg.BoolOpt('use_pool', default=False,
+ cfg.BoolOpt('use_pool', default=True,
help='Enable LDAP connection pooling.'),
cfg.IntOpt('pool_size', default=10,
help='Connection pool size.'),
@@ -876,7 +789,7 @@ FILE_OPTIONS = {
'indefinite wait for response.'),
cfg.IntOpt('pool_connection_lifetime', default=600,
help='Connection lifetime in seconds.'),
- cfg.BoolOpt('use_auth_pool', default=False,
+ cfg.BoolOpt('use_auth_pool', default=True,
help='Enable LDAP connection pooling for end user '
'authentication. If use_pool is disabled, then this '
'setting is meaningless and is not used at all.'),
@@ -884,11 +797,17 @@ FILE_OPTIONS = {
help='End user auth connection pool size.'),
cfg.IntOpt('auth_pool_connection_lifetime', default=60,
help='End user auth connection lifetime in seconds.'),
+ cfg.BoolOpt('group_members_are_ids', default=False,
+ help='If the members of the group objectclass are user '
+ 'IDs rather than DNs, set this to true. This is the '
+ 'case when using posixGroup as the group '
+ 'objectclass and OpenDirectory.'),
],
'auth': [
cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
help='Allowed authentication methods.'),
- cfg.StrOpt('password',
+ cfg.StrOpt('password', # nosec : This is the name of the plugin, not
+ # a password that needs to be protected.
help='Entrypoint for the password auth plugin module in '
'the keystone.auth.password namespace.'),
cfg.StrOpt('token',
@@ -1090,7 +1009,8 @@ FILE_OPTIONS = {
'eventlet application. Defaults to number of CPUs '
'(minimum of 2).'),
cfg.StrOpt('public_bind_host',
- default='0.0.0.0',
+ default='0.0.0.0', # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT'),
cfg.DeprecatedOpt('public_bind_host',
@@ -1098,14 +1018,15 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='The IP address of the network interface for the '
'public service to listen on.'),
- cfg.IntOpt('public_port', default=5000, min=1, max=65535,
- deprecated_name='public_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the public service listens '
- 'on.'),
+ cfg.PortOpt('public_port', default=5000,
+ deprecated_name='public_port',
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ help='The port number which the public service listens '
+ 'on.'),
cfg.StrOpt('admin_bind_host',
- default='0.0.0.0',
+ default='0.0.0.0', # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT'),
cfg.DeprecatedOpt('admin_bind_host',
@@ -1113,21 +1034,21 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='The IP address of the network interface for the '
'admin service to listen on.'),
- cfg.IntOpt('admin_port', default=35357, min=1, max=65535,
- deprecated_name='admin_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the admin service listens '
- 'on.'),
+ cfg.PortOpt('admin_port', default=35357,
+ deprecated_name='admin_port',
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ help='The port number which the admin service listens '
+ 'on.'),
cfg.BoolOpt('wsgi_keep_alive', default=True,
- help="If set to false, disables keepalives on the server; "
- "all connections will be closed after serving one "
- "request."),
+ help='If set to false, disables keepalives on the server; '
+ 'all connections will be closed after serving one '
+ 'request.'),
cfg.IntOpt('client_socket_timeout', default=900,
- help="Timeout for socket operations on a client "
- "connection. If an incoming connection is idle for "
- "this number of seconds it will be closed. A value "
- "of '0' means wait forever."),
+ help='Timeout for socket operations on a client '
+ 'connection. If an incoming connection is idle for '
+ 'this number of seconds it will be closed. A value '
+ 'of "0" means wait forever.'),
cfg.BoolOpt('tcp_keepalive', default=False,
deprecated_name='tcp_keepalive',
deprecated_group='DEFAULT',
@@ -1143,7 +1064,7 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='Sets the value of TCP_KEEPIDLE in seconds for each '
'server socket. Only applies if tcp_keepalive is '
- 'true.'),
+ 'true. Ignored if system does not support it.'),
],
'eventlet_server_ssl': [
cfg.BoolOpt('enable', default=False, deprecated_name='enable',
@@ -1152,7 +1073,7 @@ FILE_OPTIONS = {
help='Toggle for SSL support on the Keystone '
'eventlet servers.'),
cfg.StrOpt('certfile',
- default="/etc/keystone/ssl/certs/keystone.pem",
+ default='/etc/keystone/ssl/certs/keystone.pem',
deprecated_name='certfile', deprecated_group='ssl',
deprecated_for_removal=True,
help='Path of the certfile for SSL. For non-production '
@@ -1173,7 +1094,7 @@ FILE_OPTIONS = {
deprecated_name='cert_required', deprecated_group='ssl',
deprecated_for_removal=True,
help='Require client certificate.'),
- ]
+ ],
}
@@ -1195,6 +1116,67 @@ def setup_authentication(conf=None):
_register_auth_plugin_opt(conf, option)
+def set_default_for_default_log_levels():
+ """Set the default for the default_log_levels option for keystone.
+
+ Keystone uses some packages that other OpenStack services don't use that do
+ logging. This will set the default_log_levels default level for those
+ packages.
+
+ This function needs to be called before CONF().
+
+ """
+ extra_log_level_defaults = [
+ 'dogpile=INFO',
+ 'routes=INFO',
+ ]
+
+ log.register_options(CONF)
+ log.set_defaults(default_log_levels=log.get_default_log_levels() +
+ extra_log_level_defaults)
+
+
+def setup_logging():
+ """Sets up logging for the keystone package."""
+ log.setup(CONF, 'keystone')
+ logging.captureWarnings(True)
+
+
+def find_paste_config():
+ """Find Keystone's paste.deploy configuration file.
+
+ Keystone's paste.deploy configuration file is specified in the
+ ``[paste_deploy]`` section of the main Keystone configuration file,
+ ``keystone.conf``.
+
+ For example::
+
+ [paste_deploy]
+ config_file = keystone-paste.ini
+
+ :returns: The selected configuration filename
+ :raises: exception.ConfigFileNotFound
+
+ """
+ if CONF.paste_deploy.config_file:
+ paste_config = CONF.paste_deploy.config_file
+ paste_config_value = paste_config
+ if not os.path.isabs(paste_config):
+ paste_config = CONF.find_file(paste_config)
+ elif CONF.config_file:
+ paste_config = CONF.config_file[0]
+ paste_config_value = paste_config
+ else:
+ # this provides backwards compatibility for keystone.conf files that
+ # still have the entire paste configuration included, rather than just
+ # a [paste_deploy] configuration section referring to an external file
+ paste_config = CONF.find_file('keystone.conf')
+ paste_config_value = 'keystone.conf'
+ if not paste_config or not os.path.exists(paste_config):
+ raise exception.ConfigFileNotFound(config_file=paste_config_value)
+ return paste_config
+
+
def configure(conf=None):
if conf is None:
conf = CONF
@@ -1206,8 +1188,8 @@ def configure(conf=None):
cfg.StrOpt('pydev-debug-host',
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
- cfg.IntOpt('pydev-debug-port', min=1, max=65535,
- help='Port to connect to for remote debugger.'))
+ cfg.PortOpt('pydev-debug-port',
+ help='Port to connect to for remote debugger.'))
for section in FILE_OPTIONS:
for option in FILE_OPTIONS[section]:
@@ -1218,6 +1200,8 @@ def configure(conf=None):
# register any non-default auth methods here (used by extensions, etc)
setup_authentication(conf)
+ # add oslo.cache related config options
+ cache.configure(conf)
def list_opts():
@@ -1242,3 +1226,34 @@ def list_opts():
:returns: a list of (group_name, opts) tuples
"""
return list(FILE_OPTIONS.items())
+
+
+def set_middleware_defaults():
+ """Update default configuration options for oslo.middleware."""
+ # CORS Defaults
+ # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
+ cfg.set_defaults(cors.CORS_OPTS,
+ allow_headers=['X-Auth-Token',
+ 'X-Openstack-Request-Id',
+ 'X-Subject-Token',
+ 'X-Project-Id',
+ 'X-Project-Name',
+ 'X-Project-Domain-Id',
+ 'X-Project-Domain-Name',
+ 'X-Domain-Id',
+ 'X-Domain-Name'],
+ expose_headers=['X-Auth-Token',
+ 'X-Openstack-Request-Id',
+ 'X-Subject-Token'],
+ allow_methods=['GET',
+ 'PUT',
+ 'POST',
+ 'DELETE',
+ 'PATCH']
+ )
+
+
+def set_config_defaults():
+ """Override all configuration default values for keystone."""
+ set_default_for_default_log_levels()
+ set_middleware_defaults()
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
index 56bc211a..8672525f 100644
--- a/keystone-moon/keystone/common/controller.py
+++ b/keystone-moon/keystone/common/controller.py
@@ -36,21 +36,39 @@ CONF = cfg.CONF
def v2_deprecated(f):
- """No-op decorator in preparation for deprecating Identity API v2.
-
- This is a placeholder for the pending deprecation of v2. The implementation
- of this decorator can be replaced with::
-
- from oslo_log import versionutils
-
-
- v2_deprecated = versionutils.deprecated(
- what='v2 API',
- as_of=versionutils.deprecated.JUNO,
- in_favor_of='v3 API')
-
- """
- return f
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 API',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of='a similar function in the v3 API',
+ remove_in=+4)
+ return deprecated(f)
+ return wrapper()
+
+
+def v2_ec2_deprecated(f):
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 EC2 APIs',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of=('a similar function in the v3 Credential APIs'),
+ remove_in=0)
+ return deprecated(f)
+ return wrapper()
+
+
+def v2_auth_deprecated(f):
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 Authentication APIs',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of=('a similar function in the v3 Authentication APIs'),
+ remove_in=0)
+ return deprecated(f)
+ return wrapper()
def _build_policy_check_credentials(self, action, context, kwargs):
@@ -165,24 +183,32 @@ def protected(callback=None):
return wrapper
-def filterprotected(*filters):
- """Wraps filtered API calls with role based access controls (RBAC)."""
+def filterprotected(*filters, **callback):
+ """Wraps API list calls with role based access controls (RBAC).
+ This handles both the protection of the API parameters as well as any
+ filters supplied.
+
+ More complex API list calls (for example that need to examine the contents
+ of an entity referenced by one of the filters) should pass in a callback
+ function, that will be subsequently called to check protection for these
+ multiple entities. This callback function should gather the appropriate
+ entities needed and then call check_protection() in the V3Controller class.
+
+ """
def _filterprotected(f):
@functools.wraps(f)
def wrapper(self, context, **kwargs):
if not context['is_admin']:
- action = 'identity:%s' % f.__name__
- creds = _build_policy_check_credentials(self, action,
- context, kwargs)
- # Now, build the target dict for policy check. We include:
+ # The target dict for the policy check will include:
#
# - Any query filter parameters
# - Data from the main url (which will be in the kwargs
- # parameter) and would typically include the prime key
- # of a get/update/delete call
+ # parameter), which although most of our APIs do not utilize,
+ # in theory you could have.
#
- # First any query filter parameters
+
+ # First build the dict of filter parameters
target = dict()
if filters:
for item in filters:
@@ -193,15 +219,29 @@ def filterprotected(*filters):
', '.join(['%s=%s' % (item, target[item])
for item in target])))
- # Now any formal url parameters
- for key in kwargs:
- target[key] = kwargs[key]
-
- self.policy_api.enforce(creds,
- action,
- utils.flatten_dict(target))
-
- LOG.debug('RBAC: Authorization granted')
+ if 'callback' in callback and callback['callback'] is not None:
+ # A callback has been specified to load additional target
+ # data, so pass it the formal url params as well as the
+ # list of filters, so it can augment these and then call
+ # the check_protection() method.
+ prep_info = {'f_name': f.__name__,
+ 'input_attr': kwargs,
+ 'filter_attr': target}
+ callback['callback'](self, context, prep_info, **kwargs)
+ else:
+ # No callback, so we are going to check the protection here
+ action = 'identity:%s' % f.__name__
+ creds = _build_policy_check_credentials(self, action,
+ context, kwargs)
+ # Add in any formal url parameters
+ for key in kwargs:
+ target[key] = kwargs[key]
+
+ self.policy_api.enforce(creds,
+ action,
+ utils.flatten_dict(target))
+
+ LOG.debug('RBAC: Authorization granted')
else:
LOG.warning(_LW('RBAC: Bypassing authorization'))
return f(self, context, filters, **kwargs)
@@ -211,6 +251,7 @@ def filterprotected(*filters):
class V2Controller(wsgi.Application):
"""Base controller class for Identity API v2."""
+
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id since v2 calls are not domain-aware.
@@ -224,27 +265,13 @@ class V2Controller(wsgi.Application):
@staticmethod
def filter_domain_id(ref):
"""Remove domain_id since v2 calls are not domain-aware."""
- if 'domain_id' in ref:
- if ref['domain_id'] != CONF.identity.default_domain_id:
- raise exception.Unauthorized(
- _('Non-default domain is not supported'))
- del ref['domain_id']
+ ref.pop('domain_id', None)
return ref
@staticmethod
def filter_domain(ref):
- """Remove domain since v2 calls are not domain-aware.
-
- V3 Fernet tokens builds the users with a domain in the token data.
- This method will ensure that users create in v3 belong to the default
- domain.
-
- """
- if 'domain' in ref:
- if ref['domain'].get('id') != CONF.identity.default_domain_id:
- raise exception.Unauthorized(
- _('Non-default domain is not supported'))
- del ref['domain']
+ """Remove domain since v2 calls are not domain-aware."""
+ ref.pop('domain', None)
return ref
@staticmethod
@@ -287,20 +314,13 @@ class V2Controller(wsgi.Application):
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
- - v2.0 users are not domain aware, and should have domain_id validated
- to be the default domain, and then removed.
-
- - v2.0 users expect the use of tenantId instead of default_project_id.
-
- - v2.0 users have a username attribute.
-
- This method should only be applied to user_refs being returned from the
- v2.0 controller(s).
+ * v2.0 users are not domain aware, and should have domain_id removed
+ * v2.0 users expect the use of tenantId instead of default_project_id
+ * v2.0 users have a username attribute
If ref is a list type, we will iterate through each element and do the
conversion.
"""
-
def _format_default_project_id(ref):
"""Convert default_project_id to tenantId for v2 calls."""
default_project_id = ref.pop('default_project_id', None)
@@ -342,7 +362,6 @@ class V2Controller(wsgi.Application):
If ref is a list type, we will iterate through each element and do the
conversion.
"""
-
def _filter_project_properties(ref):
"""Run through the various filter methods."""
V2Controller.filter_domain_id(ref)
@@ -404,8 +423,6 @@ class V3Controller(wsgi.Application):
Class parameters:
- * `_mutable_parameters` - set of parameters that can be changed by users.
- Usually used by cls.check_immutable_params()
* `_public_parameters` - set of parameters that are exposed to the user.
Usually used by cls.filter_params()
@@ -450,7 +467,6 @@ class V3Controller(wsgi.Application):
True, including the absence of a value
"""
-
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
@@ -545,7 +561,6 @@ class V3Controller(wsgi.Application):
@classmethod
def filter_by_attributes(cls, refs, hints):
"""Filters a list of references by filter values."""
-
def _attr_match(ref_attr, val_attr):
"""Matches attributes allowing for booleans as strings.
@@ -565,7 +580,7 @@ class V3Controller(wsgi.Application):
:param filter: the filter in question
:param ref: the dict to check
- :returns True if there is a match
+ :returns: True if there is a match
"""
comparator = filter['comparator']
@@ -713,6 +728,8 @@ class V3Controller(wsgi.Application):
if token_ref.domain_scoped:
return token_ref.domain_id
+ elif token_ref.project_scoped:
+ return token_ref.project_domain_id
else:
LOG.warning(
_LW('No domain information specified as part of list request'))
@@ -726,7 +743,16 @@ class V3Controller(wsgi.Application):
being used.
"""
- token_ref = utils.get_token_ref(context)
+ try:
+ token_ref = utils.get_token_ref(context)
+ except exception.Unauthorized:
+ if context.get('is_admin'):
+ raise exception.ValidationError(
+ _('You have tried to create a resource using the admin '
+ 'token. As this token is not within a domain you must '
+ 'explicitly include a domain for this resource to '
+ 'belong to.'))
+ raise
if token_ref.domain_scoped:
return token_ref.domain_id
@@ -751,7 +777,7 @@ class V3Controller(wsgi.Application):
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id if not specified in a v3 call."""
- if 'domain_id' not in ref:
+ if not ref.get('domain_id'):
ref['domain_id'] = self._get_domain_id_from_token(context)
return ref
@@ -768,7 +794,7 @@ class V3Controller(wsgi.Application):
additional entities or attributes (passed in target_attr), so that
they can be referenced by policy rules.
- """
+ """
if 'is_admin' in context and context['is_admin']:
LOG.warning(_LW('RBAC: Bypassing authorization'))
else:
@@ -785,43 +811,19 @@ class V3Controller(wsgi.Application):
if target_attr:
policy_dict = {'target': target_attr}
policy_dict.update(prep_info['input_attr'])
+ if 'filter_attr' in prep_info:
+ policy_dict.update(prep_info['filter_attr'])
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
@classmethod
- def check_immutable_params(cls, ref):
- """Raise exception when disallowed parameter is in ref.
-
- Check whether the ref dictionary representing a request has only
- mutable parameters included. If not, raise an exception. This method
- checks only root-level keys from a ref dictionary.
-
- :param ref: a dictionary representing deserialized request to be
- stored
- :raises: :class:`keystone.exception.ImmutableAttributeError`
-
- """
- ref_keys = set(ref.keys())
- blocked_keys = ref_keys.difference(cls._mutable_parameters)
-
- if not blocked_keys:
- # No immutable parameters changed
- return
-
- exception_args = {'target': cls.__name__,
- 'attributes': ', '.join(blocked_keys)}
- raise exception.ImmutableAttributeError(**exception_args)
-
- @classmethod
def filter_params(cls, ref):
"""Remove unspecified parameters from the dictionary.
- This function removes unspecified parameters from the dictionary. See
- check_immutable_parameters for corresponding function that raises
- exceptions. This method checks only root-level keys from a ref
- dictionary.
+ This function removes unspecified parameters from the dictionary.
+ This method checks only root-level keys from a ref dictionary.
:param ref: a dictionary representing deserialized response to be
serialized
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
index e19f705f..d52a1ec5 100644
--- a/keystone-moon/keystone/common/dependency.py
+++ b/keystone-moon/keystone/common/dependency.py
@@ -60,6 +60,7 @@ class UnresolvableDependencyException(Exception):
See ``resolve_future_dependencies()`` for more details.
"""
+
def __init__(self, name, targets):
msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
'name': name, 'targets': targets}
@@ -225,6 +226,5 @@ def reset():
This is useful for unit testing to ensure that tests don't use providers
from previous tests.
"""
-
_REGISTRY.clear()
_future_dependencies.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
index ff0a774c..e7c2f2ef 100644
--- a/keystone-moon/keystone/common/driver_hints.py
+++ b/keystone-moon/keystone/common/driver_hints.py
@@ -13,6 +13,50 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
+from keystone import exception
+from keystone.i18n import _
+
+
+def truncated(f):
+ """Ensure list truncation is detected in Driver list entity methods.
+
+ This is designed to wrap Driver list_{entity} methods in order to
+ calculate if the resultant list has been truncated. Provided a limit dict
+ is found in the hints list, we increment the limit by one so as to ask the
+ wrapped function for one more entity than the limit, and then once the list
+ has been generated, we check to see if the original limit has been
+ exceeded, in which case we truncate back to that limit and set the
+ 'truncated' boolean to 'true' in the hints limit dict.
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, hints, *args, **kwargs):
+ if not hasattr(hints, 'limit'):
+ raise exception.UnexpectedError(
+ _('Cannot truncate a driver call without hints list as '
+ 'first parameter after self '))
+
+ if hints.limit is None:
+ return f(self, hints, *args, **kwargs)
+
+ # A limit is set, so ask for one more entry than we need
+ list_limit = hints.limit['limit']
+ hints.set_limit(list_limit + 1)
+ ref_list = f(self, hints, *args, **kwargs)
+
+ # If we got more than the original limit then trim back the list and
+ # mark it truncated. In both cases, make sure we set the limit back
+ # to its original value.
+ if len(ref_list) > list_limit:
+ hints.set_limit(list_limit, truncated=True)
+ return ref_list[:list_limit]
+ else:
+ hints.set_limit(list_limit)
+ return ref_list
+ return wrapper
+
class Hints(object):
"""Encapsulate driver hints for listing entities.
@@ -39,12 +83,13 @@ class Hints(object):
* ``name``: the name of the attribute being matched
* ``value``: the value against which it is being matched
* ``comparator``: the operation, which can be one of ``equals``,
- ``startswith`` or ``endswith``
+ ``contains``, ``startswith`` or ``endswith``
* ``case_sensitive``: whether any comparison should take account of
case
* ``type``: will always be 'filter'
"""
+
def __init__(self):
self.limit = None
self.filters = list()
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
index 3edf6b0b..6748f115 100644
--- a/keystone-moon/keystone/common/environment/__init__.py
+++ b/keystone-moon/keystone/common/environment/__init__.py
@@ -21,7 +21,7 @@ from oslo_log import log
LOG = log.getLogger(__name__)
-__all__ = ['Server', 'httplib', 'subprocess']
+__all__ = ('Server', 'httplib', 'subprocess')
_configured = False
@@ -95,7 +95,8 @@ def use_stdlib():
global httplib, subprocess
import six.moves.http_client as _httplib
- import subprocess as _subprocess
+ import subprocess as _subprocess # nosec : This is used in .federation.idp
+ # and .common.openssl. See there.
httplib = _httplib
subprocess = _subprocess
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
index 398952e1..430ca3e4 100644
--- a/keystone-moon/keystone/common/environment/eventlet_server.py
+++ b/keystone-moon/keystone/common/environment/eventlet_server.py
@@ -27,7 +27,6 @@ import eventlet.wsgi
import greenlet
from oslo_config import cfg
from oslo_log import log
-from oslo_log import loggers
from oslo_service import service
from keystone.i18n import _LE, _LI
@@ -46,15 +45,16 @@ LOG = log.getLogger(__name__)
POOL_SIZE = 1
-class EventletFilteringLogger(loggers.WritableLogger):
+class EventletFilteringLogger(object):
# NOTE(morganfainberg): This logger is designed to filter out specific
# Tracebacks to limit the amount of data that eventlet can log. In the
# case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge
# volume of data being written to the logs due to ~14 lines+ per traceback.
# The traceback in these cases are, at best, useful for limited debugging
# cases.
- def __init__(self, *args, **kwargs):
- super(EventletFilteringLogger, self).__init__(*args, **kwargs)
+ def __init__(self, logger, level=log.INFO):
+ self.logger = logger
+ self.level = level
self.regex = re.compile(r'errno (%d|%d)' %
(errno.EPIPE, errno.ECONNRESET), re.IGNORECASE)
@@ -73,7 +73,8 @@ class Server(service.ServiceBase):
def __init__(self, application, host=None, port=None, keepalive=False,
keepidle=None):
self.application = application
- self.host = host or '0.0.0.0'
+ self.host = host or '0.0.0.0' # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
self.port = port or 0
# Pool for a green thread in which wsgi server will be running
self.pool = eventlet.GreenPool(POOL_SIZE)
@@ -92,7 +93,6 @@ class Server(service.ServiceBase):
Raises Exception if this has already been called.
"""
-
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix.
@@ -120,7 +120,6 @@ class Server(service.ServiceBase):
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
-
if self.socket is None:
self.listen(key=key, backlog=backlog)
@@ -145,8 +144,13 @@ class Server(service.ServiceBase):
dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if self.keepidle is not None:
- dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
- self.keepidle)
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ dup_socket.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_KEEPIDLE,
+ self.keepidle)
+ else:
+ LOG.warning("System does not support TCP_KEEPIDLE but "
+ "tcp_keepidle has been set. Ignoring.")
self.greenthread = self.pool.spawn(self._run,
self.application,
@@ -168,9 +172,11 @@ class Server(service.ServiceBase):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
- except KeyboardInterrupt:
+ except KeyboardInterrupt: # nosec
+ # If CTRL-C, just break out of the loop.
pass
- except greenlet.GreenletExit:
+ except greenlet.GreenletExit: # nosec
+ # If exiting, break out of the loop.
pass
def reset(self):
@@ -198,7 +204,7 @@ class Server(service.ServiceBase):
socket, application, log=EventletFilteringLogger(logger),
debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive,
socket_timeout=socket_timeout)
- except greenlet.GreenletExit:
+ except greenlet.GreenletExit: # nosec
# Wait until all servers have completed running
pass
except Exception:
diff --git a/keystone-moon/keystone/common/extension.py b/keystone-moon/keystone/common/extension.py
index b2ea80bc..be5de631 100644
--- a/keystone-moon/keystone/common/extension.py
+++ b/keystone-moon/keystone/common/extension.py
@@ -41,5 +41,4 @@ def register_admin_extension(url_prefix, extension_data):
def register_public_extension(url_prefix, extension_data):
"""Same as register_admin_extension but for public extensions."""
-
PUBLIC_EXTENSIONS[url_prefix] = extension_data
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
index c048a356..6876f8af 100644
--- a/keystone-moon/keystone/common/json_home.py
+++ b/keystone-moon/keystone/common/json_home.py
@@ -79,7 +79,6 @@ class Status(object):
def translate_urls(json_home, new_prefix):
"""Given a JSON Home document, sticks new_prefix on each of the urls."""
-
for dummy_rel, resource in json_home['resources'].items():
if 'href' in resource:
resource['href'] = new_prefix + resource['href']
diff --git a/keystone-moon/keystone/common/kvs/__init__.py b/keystone-moon/keystone/common/kvs/__init__.py
index 9a406a85..354bbd8a 100644
--- a/keystone-moon/keystone/common/kvs/__init__.py
+++ b/keystone-moon/keystone/common/kvs/__init__.py
@@ -15,7 +15,6 @@
from dogpile.cache import region
from keystone.common.kvs.core import * # noqa
-from keystone.common.kvs.legacy import Base, DictKvs, INMEMDB # noqa
# NOTE(morganfainberg): Provided backends are registered here in the __init__
diff --git a/keystone-moon/keystone/common/kvs/backends/inmemdb.py b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
index 68072ef4..379b54bf 100644
--- a/keystone-moon/keystone/common/kvs/backends/inmemdb.py
+++ b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
@@ -12,9 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Keystone In-Memory Dogpile.cache backend implementation.
-"""
+"""Keystone In-Memory Dogpile.cache backend implementation."""
import copy
@@ -40,6 +38,7 @@ class MemoryBackend(api.CacheBackend):
'keystone.common.kvs.Memory'
)
"""
+
def __init__(self, arguments):
self._db = {}
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
index f54c1a01..a65cf877 100644
--- a/keystone-moon/keystone/common/kvs/backends/memcached.py
+++ b/keystone-moon/keystone/common/kvs/backends/memcached.py
@@ -12,26 +12,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Keystone Memcached dogpile.cache backend implementation.
-"""
+"""Keystone Memcached dogpile.cache backend implementation."""
import random as _random
import time
from dogpile.cache import api
from dogpile.cache.backends import memcached
+from oslo_cache.backends import memcache_pool
from oslo_config import cfg
-from oslo_log import log
from six.moves import range
-from keystone.common.cache.backends import memcache_pool
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
NO_VALUE = api.NO_VALUE
random = _random.SystemRandom()
@@ -49,6 +45,7 @@ class MemcachedLock(object):
http://amix.dk/blog/post/19386
"""
+
def __init__(self, client_fn, key, lock_timeout, max_lock_attempts):
self.client_fn = client_fn
self.key = "_lock" + key
@@ -63,7 +60,9 @@ class MemcachedLock(object):
elif not wait:
return False
else:
- sleep_time = random.random()
+ sleep_time = random.random() # nosec : random is not used for
+ # crypto or security, it's just the time to delay between
+ # retries.
time.sleep(sleep_time)
raise exception.UnexpectedError(
_('Maximum lock attempts on %s occurred.') % self.key)
@@ -81,6 +80,7 @@ class MemcachedBackend(object):
time `memcached`, `bmemcached`, `pylibmc` and `pooled_memcached` are
valid).
"""
+
def __init__(self, arguments):
self._key_mangler = None
self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
index 6ce7b318..064825f8 100644
--- a/keystone-moon/keystone/common/kvs/core.py
+++ b/keystone-moon/keystone/common/kvs/core.py
@@ -25,6 +25,7 @@ from dogpile.core import nameregistry
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
+from oslo_utils import reflection
from keystone import exception
from keystone.i18n import _
@@ -32,8 +33,8 @@ from keystone.i18n import _LI
from keystone.i18n import _LW
-__all__ = ['KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
- 'get_key_value_store']
+__all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
+ 'get_key_value_store')
BACKENDS_REGISTERED = False
@@ -66,6 +67,23 @@ def _register_backends():
BACKENDS_REGISTERED = True
+def sha1_mangle_key(key):
+ """Wrapper for dogpile's sha1_mangle_key.
+
+ Taken from oslo_cache.core._sha1_mangle_key
+
+ dogpile's sha1_mangle_key function expects an encoded string, so we
+ should take steps to properly handle multiple inputs before passing
+ the key through.
+ """
+ try:
+ key = key.encode('utf-8', errors='xmlcharrefreplace')
+ except (UnicodeError, AttributeError): # nosec
+ # NOTE(stevemar): if encoding fails just continue anyway.
+ pass
+ return dogpile_util.sha1_mangle_key(key)
+
+
class LockTimeout(exception.UnexpectedError):
debug_message_format = _('Lock Timeout occurred for key, %(target)s')
@@ -76,6 +94,7 @@ class KeyValueStore(object):
This manager also supports the concept of locking a given key resource to
allow for a guaranteed atomic transaction to the backend.
"""
+
def __init__(self, kvs_region):
self.locking = True
self._lock_timeout = 0
@@ -95,7 +114,6 @@ class KeyValueStore(object):
this instantiation
:param region_config_args: key-word args passed to the dogpile.cache
backend for configuration
- :return:
"""
if self.is_configured:
# NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
@@ -130,12 +148,16 @@ class KeyValueStore(object):
if issubclass(pxy, proxy.ProxyBackend):
proxies.append(pxy)
else:
+ pxy_cls_name = reflection.get_class_name(
+ pxy, fully_qualified=False)
LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
- pxy.__name__)
+ pxy_cls_name)
for proxy_cls in reversed(proxies):
+ proxy_cls_name = reflection.get_class_name(
+ proxy_cls, fully_qualified=False)
LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
- {'proxy': proxy_cls.__name__,
+ {'proxy': proxy_cls_name,
'name': self._region.name})
self._region.wrap(proxy_cls)
@@ -196,14 +218,14 @@ class KeyValueStore(object):
raise exception.ValidationError(
_('`key_mangler` option must be a function reference'))
else:
- LOG.info(_LI('Using default dogpile sha1_mangle_key as KVS '
- 'region %s key_mangler'), self._region.name)
- # NOTE(morganfainberg): Sane 'default' keymangler is the
- # dogpile sha1_mangle_key function. This ensures that unless
- # explicitly changed, we mangle keys. This helps to limit
- # unintended cases of exceeding cache-key in backends such
- # as memcache.
- self._region.key_mangler = dogpile_util.sha1_mangle_key
+ msg = _LI('Using default keystone.common.kvs.sha1_mangle_key '
+ 'as KVS region %s key_mangler')
+ LOG.info(msg, self._region.name)
+ # NOTE(morganfainberg): Use 'default' keymangler to ensure
+ # that unless explicitly changed, we mangle keys. This helps
+ # to limit unintended cases of exceeding cache-key in backends
+ # such as memcache.
+ self._region.key_mangler = sha1_mangle_key
self._set_keymangler_on_backend(self._region.key_mangler)
else:
LOG.info(_LI('KVS region %s key_mangler disabled.'),
@@ -251,6 +273,7 @@ class KeyValueStore(object):
class _LockWrapper(object):
"""weakref-capable threading.Lock wrapper."""
+
def __init__(self, lock_timeout):
self.lock = threading.Lock()
self.lock_timeout = lock_timeout
@@ -339,8 +362,9 @@ class KeyValueStore(object):
@contextlib.contextmanager
def _action_with_lock(self, key, lock=None):
- """Wrapper context manager to validate and handle the lock and lock
- timeout if passed in.
+ """Wrapper context manager.
+
+ Validates and handles the lock and lock timeout if passed in.
"""
if not isinstance(lock, KeyValueStoreLock):
# NOTE(morganfainberg): Locking only matters if a lock is passed in
@@ -362,11 +386,13 @@ class KeyValueStore(object):
class KeyValueStoreLock(object):
- """Basic KeyValueStoreLock context manager that hooks into the
- dogpile.cache backend mutex allowing for distributed locking on resources.
+ """Basic KeyValueStoreLock context manager.
- This is only a write lock, and will not prevent reads from occurring.
+ Hooks into the dogpile.cache backend mutex allowing for distributed locking
+ on resources. This is only a write lock, and will not prevent reads from
+ occurring.
"""
+
def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
self.mutex = mutex
self.key = key
@@ -407,7 +433,9 @@ class KeyValueStoreLock(object):
def get_key_value_store(name, kvs_region=None):
- """Instantiate a new :class:`.KeyValueStore` or return a previous
+ """Retrieve key value store.
+
+ Instantiate a new :class:`.KeyValueStore` or return a previous
instantiation that has the same name.
"""
global KEY_VALUE_STORE_REGISTRY
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
index 6386ae2a..d94aa04c 100644
--- a/keystone-moon/keystone/common/ldap/core.py
+++ b/keystone-moon/keystone/common/ldap/core.py
@@ -20,12 +20,15 @@ import re
import sys
import weakref
+import ldap.controls
import ldap.filter
import ldappool
from oslo_log import log
+from oslo_utils import reflection
import six
from six.moves import map, zip
+from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
@@ -62,15 +65,17 @@ def utf8_encode(value):
:param value: A basestring
:returns: UTF-8 encoded version of value
- :raises: TypeError if value is not basestring
+ :raises TypeError: If value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
+ value_cls_name = reflection.get_class_name(
+ value, fully_qualified=False)
raise TypeError("value must be basestring, "
- "not %s" % value.__class__.__name__)
+ "not %s" % value_cls_name)
_utf8_decoder = codecs.getdecoder('utf-8')
@@ -84,7 +89,7 @@ def utf8_decode(value):
:param value: value to be returned as unicode
:returns: value as unicode
- :raises: UnicodeDecodeError for invalid UTF-8 encoding
+ :raises UnicodeDecodeError: for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
@@ -110,14 +115,15 @@ def py2ldap(val):
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
-
try:
return LDAP_VALUES[val]
- except KeyError:
+ except KeyError: # nosec
+ # It wasn't a boolean value, will try as an int instead.
pass
try:
return int(val)
- except ValueError:
+ except ValueError: # nosec
+ # It wasn't an int either, will try as utf8 instead.
pass
return utf8_decode(val)
@@ -239,7 +245,6 @@ def is_ava_value_equal(attribute_type, val1, val2):
that function apply here.
"""
-
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
@@ -259,7 +264,6 @@ def is_rdn_equal(rdn1, rdn2):
limitations of that function apply here.
"""
-
if len(rdn1) != len(rdn2):
return False
@@ -292,7 +296,6 @@ def is_dn_equal(dn1, dn2):
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
-
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
@@ -314,7 +317,6 @@ def dn_startswith(descendant_dn, dn):
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
-
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
@@ -419,6 +421,7 @@ class LDAPHandler(object):
derived classes.
"""
+
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@@ -625,6 +628,7 @@ def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
class MsgId(list):
"""Wrapper class to hold connection and msgid."""
+
pass
@@ -665,6 +669,7 @@ class PooledLDAPHandler(LDAPHandler):
the methods in this class.
"""
+
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
@@ -815,7 +820,6 @@ class PooledLDAPHandler(LDAPHandler):
which requested msgId and used it in result3 exits.
"""
-
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@@ -957,7 +961,7 @@ class KeystoneLDAPHandler(LDAPHandler):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
- 'attrs=%s attrsonly=%s'
+ 'attrs=%s attrsonly=%s '
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
@@ -1041,7 +1045,11 @@ class KeystoneLDAPHandler(LDAPHandler):
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
- py_result = convert_ldap_result(ldap_result)
+ # ldap_result returned from result3 is a tuple of
+ # (rtype, rdata, rmsgid, serverctrls). We don't need use of these,
+ # except rdata.
+ rtype, rdata, rmsgid, serverctrls = ldap_result
+ py_result = convert_ldap_result(rdata)
return py_result
def modify_s(self, dn, modlist):
@@ -1221,7 +1229,7 @@ class BaseLdap(object):
try:
ldap_attr, attr_map = item.split(':')
except Exception:
- LOG.warn(_LW(
+ LOG.warning(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
@@ -1337,7 +1345,7 @@ class BaseLdap(object):
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
- LOG.warn(message)
+ LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
@@ -1354,7 +1362,8 @@ class BaseLdap(object):
continue
v = lower_res[map_attr.lower()]
- except KeyError:
+ except KeyError: # nosec
+ # Didn't find the attr, so don't add it.
pass
else:
try:
@@ -1383,7 +1392,8 @@ class BaseLdap(object):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
- except exception.NotFound:
+ except exception.NotFound: # nosec
+ # Didn't find it so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
@@ -1393,7 +1403,8 @@ class BaseLdap(object):
if values.get('id') is not None:
try:
self.get(values['id'])
- except exception.NotFound:
+ except exception.NotFound: # nosec
+ # Didn't find it, so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
@@ -1452,16 +1463,39 @@ class BaseLdap(object):
except IndexError:
return None
- def _ldap_get_all(self, ldap_filter=None):
+ def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
+ with self.get_connection() as conn:
+ try:
+ control = ldap.controls.libldap.SimplePagedResultsControl(
+ criticality=True,
+ size=sizelimit,
+ cookie='')
+ msgid = conn.search_ext(base, scope, filterstr, attrlist,
+ serverctrls=[control])
+ rdata = conn.result3(msgid)
+ return rdata
+ except ldap.NO_SUCH_OBJECT:
+ return []
+
+ @driver_hints.truncated
+ def _ldap_get_all(self, hints, ldap_filter=None):
query = u'(&%s(objectClass=%s)(%s=*))' % (
ldap_filter or self.ldap_filter or '',
self.object_class,
self.id_attr)
+ sizelimit = 0
+ attrs = list(set(([self.id_attr] +
+ list(self.attribute_mapping.values()) +
+ list(self.extra_attr_mapping.keys()))))
+ if hints.limit:
+ sizelimit = hints.limit['limit']
+ return self._ldap_get_limited(self.tree_dn,
+ self.LDAP_SCOPE,
+ query,
+ attrs,
+ sizelimit)
with self.get_connection() as conn:
try:
- attrs = list(set(([self.id_attr] +
- list(self.attribute_mapping.values()) +
- list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
@@ -1501,9 +1535,10 @@ class BaseLdap(object):
except IndexError:
raise self._not_found(name)
- def get_all(self, ldap_filter=None):
+ def get_all(self, ldap_filter=None, hints=None):
+ hints = hints or driver_hints.Hints()
return [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(ldap_filter)]
+ for x in self._ldap_get_all(hints, ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
@@ -1565,7 +1600,7 @@ class BaseLdap(object):
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
- def deleteTree(self, object_id):
+ def delete_tree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
@@ -1609,8 +1644,8 @@ class BaseLdap(object):
:param member_list_dn: DN of group to which the
member will be added.
- :raises: exception.Conflict: If the user was already a member.
- self.NotFound: If the group entry didn't exist.
+ :raises keystone.exception.Conflict: If the user was already a member.
+ :raises self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
@@ -1632,8 +1667,8 @@ class BaseLdap(object):
:param member_list_dn: DN of group from which the
member will be removed.
- :raises: self.NotFound: If the group entry didn't exist.
- ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
+ :raises self.NotFound: If the group entry didn't exist.
+ :raises ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
@@ -1666,11 +1701,12 @@ class BaseLdap(object):
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
- LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
- " delete nonexistent entries %(entries)s%(dots)s"),
- {'search_base': search_base,
- 'entries': not_deleted_nodes[:3],
- 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
+ LOG.warning(_LW("When deleting entries for %(search_base)s, "
+ "could not delete nonexistent entries "
+ "%(entries)s%(dots)s"),
+ {'search_base': search_base,
+ 'entries': not_deleted_nodes[:3],
+ 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
@@ -1823,7 +1859,8 @@ class EnabledEmuMixIn(BaseLdap):
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
- query = '(%s=%s)' % (self.member_attribute, dn)
+ query = '(%s=%s)' % (self.member_attribute,
+ ldap.filter.escape_filter_chars(dn))
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
@@ -1857,7 +1894,8 @@ class EnabledEmuMixIn(BaseLdap):
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
- except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
+ except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec
+ # It's already gone, good.
pass
def create(self, values):
@@ -1880,11 +1918,12 @@ class EnabledEmuMixIn(BaseLdap):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
- def get_all(self, ldap_filter=None):
+ def get_all(self, ldap_filter=None, hints=None):
+ hints = hints or driver_hints.Hints()
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(ldap_filter)
+ for x in self._ldap_get_all(hints, ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for tenant_ref in tenant_list:
@@ -1892,7 +1931,7 @@ class EnabledEmuMixIn(BaseLdap):
tenant_ref['id'], conn)
return tenant_list
else:
- return super(EnabledEmuMixIn, self).get_all(ldap_filter)
+ return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
@@ -1914,23 +1953,3 @@ class EnabledEmuMixIn(BaseLdap):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
-
-
-class ProjectLdapStructureMixin(object):
- """Project LDAP Structure shared between LDAP backends.
-
- This is shared between the resource and assignment LDAP backends.
-
- """
- DEFAULT_OU = 'ou=Groups'
- DEFAULT_STRUCTURAL_CLASSES = []
- DEFAULT_OBJECTCLASS = 'groupOfNames'
- DEFAULT_ID_ATTR = 'cn'
- NotFound = exception.ProjectNotFound
- notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
- options_name = 'project'
- attribute_options_names = {'name': 'name',
- 'description': 'desc',
- 'enabled': 'enabled',
- 'domain_id': 'domain_id'}
- immutable_attrs = ['name']
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
index f98a1763..4ce9f2a6 100644
--- a/keystone-moon/keystone/common/manager.py
+++ b/keystone-moon/keystone/common/manager.py
@@ -13,12 +13,19 @@
# under the License.
import functools
+import inspect
+import time
+import types
from oslo_log import log
from oslo_log import versionutils
from oslo_utils import importutils
+from oslo_utils import reflection
+import six
import stevedore
+from keystone.i18n import _
+
LOG = log.getLogger(__name__)
@@ -70,17 +77,93 @@ def load_driver(namespace, driver_name, *args):
LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
# Ignore failure and continue on.
- @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
- in_favor_of='entrypoints',
- what='direct import of driver')
- def _load_using_import(driver_name, *args):
- return importutils.import_object(driver_name, *args)
+ driver = importutils.import_object(driver_name, *args)
+
+ msg = (_(
+ 'Direct import of driver %(name)r is deprecated as of Liberty in '
+ 'favor of its entrypoint from %(namespace)r and may be removed in '
+ 'N.') %
+ {'name': driver_name, 'namespace': namespace})
+ versionutils.report_deprecated_feature(LOG, msg)
+
+ return driver
- # For backwards-compatibility, an unregistered class reference can
- # still be used.
- return _load_using_import(driver_name, *args)
+class _TraceMeta(type):
+ """A metaclass that, in trace mode, will log entry and exit of methods.
+ This metaclass automatically wraps all methods on the class when
+ instantiated with a decorator that will log entry/exit from a method
+ when keystone is run in Trace log level.
+ """
+
+ @staticmethod
+ def wrapper(__f, __classname):
+ __argspec = inspect.getargspec(__f)
+ __fn_info = '%(module)s.%(classname)s.%(funcname)s' % {
+ 'module': inspect.getmodule(__f).__name__,
+ 'classname': __classname,
+ 'funcname': __f.__name__
+ }
+ # NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs
+ # the index can be calculated at wrap time rather than at runtime.
+ if __argspec.args and __argspec.args[0] in ('self', 'cls'):
+ __arg_idx = 1
+ else:
+ __arg_idx = 0
+
+ @functools.wraps(__f)
+ def wrapped(*args, **kwargs):
+ __exc = None
+ __t = time.time()
+ __do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE
+ __ret_val = None
+ try:
+ if __do_trace:
+ LOG.trace('CALL => %s', __fn_info)
+ __ret_val = __f(*args, **kwargs)
+ except Exception as e: # nosec
+ __exc = e
+ raise
+ finally:
+ if __do_trace:
+ __subst = {
+ 'run_time': (time.time() - __t),
+ 'passed_args': ', '.join([
+ ', '.join([repr(a)
+ for a in args[__arg_idx:]]),
+ ', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v}
+ for k, v in kwargs.items()]),
+ ]),
+ 'function': __fn_info,
+ 'exception': __exc,
+ 'ret_val': __ret_val,
+ }
+ if __exc is not None:
+ __msg = ('[%(run_time)ss] %(function)s '
+ '(%(passed_args)s) => raised '
+ '%(exception)r')
+ else:
+ # TODO(morganfainberg): find a way to indicate if this
+ # was a cache hit or cache miss.
+ __msg = ('[%(run_time)ss] %(function)s'
+ '(%(passed_args)s) => %(ret_val)r')
+ LOG.trace(__msg, __subst)
+ return __ret_val
+ return wrapped
+
+ def __new__(meta, classname, bases, class_dict):
+ final_cls_dict = {}
+ for attr_name, attr in class_dict.items():
+ # NOTE(morganfainberg): only wrap public instances and methods.
+ if (isinstance(attr, types.FunctionType) and
+ not attr_name.startswith('_')):
+ attr = _TraceMeta.wrapper(attr, classname)
+ final_cls_dict[attr_name] = attr
+ return type.__new__(meta, classname, bases, final_cls_dict)
+
+
+@six.add_metaclass(_TraceMeta)
class Manager(object):
"""Base class for intermediary request layer.
@@ -121,16 +204,15 @@ def create_legacy_driver(driver_class):
Driver = create_legacy_driver(CatalogDriverV8)
"""
-
module_name = driver_class.__module__
- class_name = driver_class.__name__
+ class_name = reflection.get_class_name(driver_class)
class Driver(driver_class):
@versionutils.deprecated(
as_of=versionutils.deprecated.LIBERTY,
what='%s.Driver' % module_name,
- in_favor_of='%s.%s' % (module_name, class_name),
+ in_favor_of=class_name,
remove_in=+2)
def __init__(self, *args, **kwargs):
super(Driver, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
index 0bb37319..de996522 100644
--- a/keystone-moon/keystone/common/models.py
+++ b/keystone-moon/keystone/common/models.py
@@ -21,6 +21,7 @@ Unless marked otherwise, all fields are strings.
class Model(dict):
"""Base model class."""
+
def __hash__(self):
return self['id'].__hash__()
@@ -151,6 +152,18 @@ class Role(Model):
optional_keys = tuple()
+class ImpliedRole(Model):
+ """ImpliedRole object.
+
+ Required keys:
+ prior_role_id
+ implied_role_id
+ """
+
+ required_keys = ('prior_role_id', 'implied_role_id')
+ optional_keys = tuple()
+
+
class Trust(Model):
"""Trust object.
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
index be56b9cc..0bea6d8e 100644
--- a/keystone-moon/keystone/common/openssl.py
+++ b/keystone-moon/keystone/common/openssl.py
@@ -63,42 +63,35 @@ class BaseCertificateConfigure(object):
'cert_subject': conf_obj.cert_subject}
try:
- # OpenSSL 1.0 and newer support default_md = default, olders do not
- openssl_ver = environment.subprocess.Popen(
- ['openssl', 'version'],
- stdout=environment.subprocess.PIPE).stdout.read()
- if "OpenSSL 0." in openssl_ver:
+ # OpenSSL 1.0 and newer support default_md = default,
+ # older versions do not
+ openssl_ver = environment.subprocess.check_output( # the arguments
+ # are hardcoded and just check the openssl version
+ ['openssl', 'version'])
+ if b'OpenSSL 0.' in openssl_ver:
self.ssl_dictionary['default_md'] = 'sha1'
- except OSError:
- LOG.warn(_LW('Failed to invoke ``openssl version``, '
- 'assuming is v1.0 or newer'))
+ except environment.subprocess.CalledProcessError:
+ LOG.warning(_LW('Failed to invoke ``openssl version``, '
+ 'assuming is v1.0 or newer'))
self.ssl_dictionary.update(kwargs)
def exec_command(self, command):
- to_exec = []
- for cmd_part in command:
- to_exec.append(cmd_part % self.ssl_dictionary)
+ to_exec = [part % self.ssl_dictionary for part in command]
LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
- # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
- # output can be captured.
- # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
- # So use Popen instead.
- process = environment.subprocess.Popen(
- to_exec,
- stdout=environment.subprocess.PIPE,
- stderr=environment.subprocess.STDOUT)
- output = process.communicate()[0]
- retcode = process.poll()
- if retcode:
- LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
+ try:
+ # NOTE(shaleh): use check_output instead of the simpler
+ # `check_call()` in order to log any output from an error.
+ environment.subprocess.check_output( # the arguments being passed
+ # in are defined in this file and trusted to build CAs, keys
+ # and certs
+ to_exec,
+ stderr=environment.subprocess.STDOUT)
+ except environment.subprocess.CalledProcessError as e:
+ LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
'- %(output)s'),
{'to_exec': to_exec,
- 'retcode': retcode,
- 'output': output})
- e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
- # NOTE(Jeffrey4l): Python 2.6 compatibility:
- # CalledProcessError did not have output keyword argument
- e.output = output
+ 'retcode': e.returncode,
+ 'output': e.output})
raise e
def clean_up_existing_files(self):
@@ -134,9 +127,8 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(self.ssl_config_file_name):
- ssl_config_file = open(self.ssl_config_file_name, 'w')
- ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
- ssl_config_file.close()
+ with open(self.ssl_config_file_name, 'w') as ssl_config_file:
+ ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
utils.set_permissions(self.ssl_config_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -144,9 +136,8 @@ class BaseCertificateConfigure(object):
index_file_name = os.path.join(self.conf_dir, 'index.txt')
if not file_exists(index_file_name):
- index_file = open(index_file_name, 'w')
- index_file.write('')
- index_file.close()
+ with open(index_file_name, 'w') as index_file:
+ index_file.write('')
utils.set_permissions(index_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -154,9 +145,8 @@ class BaseCertificateConfigure(object):
serial_file_name = os.path.join(self.conf_dir, 'serial')
if not file_exists(serial_file_name):
- index_file = open(serial_file_name, 'w')
- index_file.write('01')
- index_file.close()
+ with open(serial_file_name, 'w') as index_file:
+ index_file.write('01')
utils.set_permissions(serial_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
diff --git a/keystone-moon/keystone/common/router.py b/keystone-moon/keystone/common/router.py
index ce4e834d..74e03ad2 100644
--- a/keystone-moon/keystone/common/router.py
+++ b/keystone-moon/keystone/common/router.py
@@ -19,12 +19,14 @@ from keystone.common import wsgi
class Router(wsgi.ComposableRouter):
def __init__(self, controller, collection_key, key,
resource_descriptions=None,
- is_entity_implemented=True):
+ is_entity_implemented=True,
+ method_template=None):
self.controller = controller
self.key = key
self.collection_key = collection_key
self._resource_descriptions = resource_descriptions
self._is_entity_implemented = is_entity_implemented
+ self.method_template = method_template or '%s'
def add_routes(self, mapper):
collection_path = '/%(collection_key)s' % {
@@ -36,27 +38,27 @@ class Router(wsgi.ComposableRouter):
mapper.connect(
collection_path,
controller=self.controller,
- action='create_%s' % self.key,
+ action=self.method_template % 'create_%s' % self.key,
conditions=dict(method=['POST']))
mapper.connect(
collection_path,
controller=self.controller,
- action='list_%s' % self.collection_key,
+ action=self.method_template % 'list_%s' % self.collection_key,
conditions=dict(method=['GET']))
mapper.connect(
entity_path,
controller=self.controller,
- action='get_%s' % self.key,
+ action=self.method_template % 'get_%s' % self.key,
conditions=dict(method=['GET']))
mapper.connect(
entity_path,
controller=self.controller,
- action='update_%s' % self.key,
+ action=self.method_template % 'update_%s' % self.key,
conditions=dict(method=['PATCH']))
mapper.connect(
entity_path,
controller=self.controller,
- action='delete_%s' % self.key,
+ action=self.method_template % 'delete_%s' % self.key,
conditions=dict(method=['DELETE']))
# Add the collection resource and entity resource to the resource
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
index ebd61bb7..cb026356 100644
--- a/keystone-moon/keystone/common/sql/core.py
+++ b/keystone-moon/keystone/common/sql/core.py
@@ -18,14 +18,13 @@ Before using this module, call initialize(). This has to be done before
CONF() because it sets up configuration options.
"""
-import contextlib
import functools
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options as db_options
+from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import models
-from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_serialization import jsonutils
import six
@@ -34,6 +33,7 @@ from sqlalchemy.ext import declarative
from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
from sqlalchemy import types as sql_types
+from keystone.common import driver_hints
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
@@ -68,7 +68,6 @@ flag_modified = flag_modified
def initialize():
"""Initialize the module."""
-
db_options.set_defaults(
CONF,
connection="sqlite:///keystone.db")
@@ -166,77 +165,47 @@ class ModelDictMixin(object):
return {name: getattr(self, name) for name in names}
-_engine_facade = None
+_main_context_manager = None
-def _get_engine_facade():
- global _engine_facade
+def _get_main_context_manager():
+ global _main_context_manager
- if not _engine_facade:
- _engine_facade = db_session.EngineFacade.from_config(CONF)
+ if not _main_context_manager:
+ _main_context_manager = enginefacade.transaction_context()
- return _engine_facade
+ return _main_context_manager
def cleanup():
- global _engine_facade
+ global _main_context_manager
- _engine_facade = None
+ _main_context_manager = None
-def get_engine():
- return _get_engine_facade().get_engine()
+_CONTEXT = None
-def get_session(expire_on_commit=False):
- return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
+def _get_context():
+ global _CONTEXT
+ if _CONTEXT is None:
+ # NOTE(dims): Delay the `threading.local` import to allow for
+ # eventlet/gevent monkeypatching to happen
+ import threading
+ _CONTEXT = threading.local()
+ return _CONTEXT
-@contextlib.contextmanager
-def transaction(expire_on_commit=False):
- """Return a SQLAlchemy session in a scoped transaction."""
- session = get_session(expire_on_commit=expire_on_commit)
- with session.begin():
- yield session
+def session_for_read():
+ return _get_main_context_manager().reader.using(_get_context())
-def truncated(f):
- """Ensure list truncation is detected in Driver list entity methods.
+def session_for_write():
+ return _get_main_context_manager().writer.using(_get_context())
- This is designed to wrap and sql Driver list_{entity} methods in order to
- calculate if the resultant list has been truncated. Provided a limit dict
- is found in the hints list, we increment the limit by one so as to ask the
- wrapped function for one more entity than the limit, and then once the list
- has been generated, we check to see if the original limit has been
- exceeded, in which case we truncate back to that limit and set the
- 'truncated' boolean to 'true' in the hints limit dict.
- """
- @functools.wraps(f)
- def wrapper(self, hints, *args, **kwargs):
- if not hasattr(hints, 'limit'):
- raise exception.UnexpectedError(
- _('Cannot truncate a driver call without hints list as '
- 'first parameter after self '))
-
- if hints.limit is None:
- return f(self, hints, *args, **kwargs)
-
- # A limit is set, so ask for one more entry than we need
- list_limit = hints.limit['limit']
- hints.set_limit(list_limit + 1)
- ref_list = f(self, hints, *args, **kwargs)
-
- # If we got more than the original limit then trim back the list and
- # mark it truncated. In both cases, make sure we set the limit back
- # to its original value.
- if len(ref_list) > list_limit:
- hints.set_limit(list_limit, truncated=True)
- return ref_list[:list_limit]
- else:
- hints.set_limit(list_limit)
- return ref_list
- return wrapper
+def truncated(f):
+ return driver_hints.truncated(f)
class _WontMatch(Exception):
@@ -325,42 +294,41 @@ def _filter(model, query, hints):
satisfied_filters.append(filter_)
return query.filter(query_term)
- def exact_filter(model, filter_, cumulative_filter_dict):
+ def exact_filter(model, query, filter_, satisfied_filters):
"""Applies an exact filter to a query.
:param model: the table model in question
+ :param query: query to apply filters to
:param dict filter_: describes this filter
- :param dict cumulative_filter_dict: describes the set of exact filters
- built up so far
-
+ :param list satisfied_filters: filter_ will be added if it is
+ satisfied.
+ :returns query: query updated to add any exact filters we could
+ satisfy
"""
key = filter_['name']
col = getattr(model, key)
if isinstance(col.property.columns[0].type, sql.types.Boolean):
- cumulative_filter_dict[key] = (
- utils.attr_as_boolean(filter_['value']))
+ filter_val = utils.attr_as_boolean(filter_['value'])
else:
_WontMatch.check(filter_['value'], col)
- cumulative_filter_dict[key] = filter_['value']
+ filter_val = filter_['value']
+
+ satisfied_filters.append(filter_)
+ return query.filter(col == filter_val)
try:
- filter_dict = {}
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in model.attributes:
continue
if filter_['comparator'] == 'equals':
- exact_filter(model, filter_, filter_dict)
- satisfied_filters.append(filter_)
+ query = exact_filter(model, query, filter_,
+ satisfied_filters)
else:
query = inexact_filter(model, query, filter_,
satisfied_filters)
- # Apply any exact filters we built up
- if filter_dict:
- query = query.filter_by(**filter_dict)
-
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
@@ -377,7 +345,7 @@ def _limit(query, hints):
:param query: query to apply filters to
:param hints: contains the list of filters and limit details.
- :returns updated query
+ :returns: updated query
"""
# NOTE(henry-nash): If we were to implement pagination, then we
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/README b/keystone-moon/keystone/common/sql/migrate_repo/README
index 6218f8ca..4ea8dd4f 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/README
+++ b/keystone-moon/keystone/common/sql/migrate_repo/README
@@ -1,4 +1,4 @@
This is a database migration repository.
More information at
-http://code.google.com/p/sqlalchemy-migrate/
+https://git.openstack.org/cgit/openstack/sqlalchemy-migrate
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
index f73dfc12..e69de29b 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
@@ -1,17 +0,0 @@
-# Copyright 2014 Mirantis.inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-DB_INIT_VERSION = 43
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
deleted file mode 100644
index c4b41580..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2014 Mirantis.inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-
-def upgrade(migrate_engine):
-
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- endpoint = sa.Table('endpoint', meta, autoload=True)
-
- # NOTE(i159): MySQL requires indexes on referencing columns, and those
- # indexes create automatically. That those indexes will have different
- # names, depending on version of MySQL used. We shoud make this naming
- # consistent, by reverting index name to a consistent condition.
- if any(i for i in endpoint.indexes if
- list(i.columns.keys()) == ['service_id']
- and i.name != 'service_id'):
- # NOTE(i159): by this action will be made re-creation of an index
- # with the new name. This can be considered as renaming under the
- # MySQL rules.
- sa.Index('service_id', endpoint.c.service_id).create()
-
- user_group_membership = sa.Table('user_group_membership',
- meta, autoload=True)
-
- if any(i for i in user_group_membership.indexes if
- list(i.columns.keys()) == ['group_id']
- and i.name != 'group_id'):
- sa.Index('group_id', user_group_membership.c.group_id).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
deleted file mode 100644
index 59720f6e..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.identity.mapping_backends import mapping
-
-
-MAPPING_TABLE = 'id_mapping'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- mapping_table = sql.Table(
- MAPPING_TABLE,
- meta,
- sql.Column('public_id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('local_id', sql.String(64), nullable=False),
- sql.Column('entity_type', sql.Enum(
- mapping.EntityType.USER,
- mapping.EntityType.GROUP,
- name='entity_type'),
- nullable=False),
- sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- mapping_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
deleted file mode 100644
index 86302a8f..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-_REGION_TABLE_NAME = 'region'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- url_column = sql.Column('url', sql.String(255), nullable=True)
- region_table.create_column(url_column)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
deleted file mode 100644
index c2be48f4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Migrated the endpoint 'region' column to 'region_id.
-
-In addition to the rename, the new column is made a foreign key to the
-respective 'region' in the region table, ensuring that we auto-create
-any regions that are missing. Further, since the old region column
-was 255 chars, and the id column in the region table is 64 chars, the size
-of the id column in the region table is increased to match.
-
-To Upgrade:
-
-
-Region Table
-
-Increase the size of the if column in the region table
-
-Endpoint Table
-
-a. Add the endpoint region_id column, that is a foreign key to the region table
-b. For each endpoint
- i. Ensure there is matching region in region table, and if not, create it
- ii. Assign the id to the region_id column
-c. Remove the column region
-
-"""
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-
-def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
- endpoints = list(endpoint_table.select().execute())
-
- for endpoint in endpoints:
- if endpoint.region is None:
- continue
-
- region = list(region_table.select(
- whereclause=region_table.c.id == endpoint.region).execute())
- if len(region) == 1:
- region_id = region[0].id
- else:
- region_id = endpoint.region
- region = {'id': region_id,
- 'description': '',
- 'extra': '{}'}
- session = sessionmaker(bind=migrate_engine)()
- region_table.insert(region).execute()
- session.commit()
-
- new_values = {'region_id': region_id}
- f = endpoint_table.c.id == endpoint.id
- update = endpoint_table.update().where(f).values(new_values)
- migrate_engine.execute(update)
-
- migrate.ForeignKeyConstraint(
- columns=[endpoint_table.c.region_id],
- refcolumns=[region_table.c.id],
- name='fk_endpoint_region_id').create()
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table('region', meta, autoload=True)
- region_table.c.id.alter(type=sql.String(length=255))
- region_table.c.parent_region_id.alter(type=sql.String(length=255))
- endpoint_table = sql.Table('endpoint', meta, autoload=True)
- region_id_column = sql.Column('region_id',
- sql.String(length=255), nullable=True)
- region_id_column.create(endpoint_table)
-
- _migrate_to_region_id(migrate_engine, region_table, endpoint_table)
-
- endpoint_table.c.region.drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
deleted file mode 100644
index caf4d66f..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-ASSIGNMENT_TABLE = 'assignment'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
- idx = sql.Index('ix_actor_id', assignment.c.actor_id)
- idx.create(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
deleted file mode 100644
index a7f327ea..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Add indexes to `user_id` and `trust_id` columns for the `token` table."""
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- token = sql.Table('token', meta, autoload=True)
-
- sql.Index('ix_token_user_id', token.c.user_id).create()
- sql.Index('ix_token_trust_id', token.c.trust_id).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
deleted file mode 100644
index 8bb40490..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
deleted file mode 100644
index ca9b3ce2..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-_PROJECT_TABLE_NAME = 'project'
-_PARENT_ID_COLUMN_NAME = 'parent_id'
-
-
-def list_constraints(project_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id}]
-
- return constraints
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- parent_id = sql.Column(_PARENT_ID_COLUMN_NAME, sql.String(64),
- nullable=True)
- project_table.create_column(parent_id)
-
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.add_constraints(list_constraints(project_table))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
deleted file mode 100644
index f7a69bb6..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-from keystone.common.sql import migration_helpers
-
-
-def list_constraints(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
- assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
- role_table = sqlalchemy.Table('role', meta, autoload=True)
-
- constraints = [{'table': assignment_table,
- 'fk_column': 'role_id',
- 'ref_column': role_table.c.id}]
- return constraints
-
-
-def upgrade(migrate_engine):
- # SQLite does not support constraints, and querying the constraints
- # raises an exception
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.remove_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
deleted file mode 100644
index e45133ab..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-_REGION_TABLE_NAME = 'region'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- region_table.drop_column('url')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
deleted file mode 100644
index 637f2151..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-from keystone.common.sql import migration_helpers
-
-
-def list_constraints(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
- user_table = sqlalchemy.Table('user', meta, autoload=True)
- group_table = sqlalchemy.Table('group', meta, autoload=True)
- domain_table = sqlalchemy.Table('domain', meta, autoload=True)
-
- constraints = [{'table': user_table,
- 'fk_column': 'domain_id',
- 'ref_column': domain_table.c.id},
- {'table': group_table,
- 'fk_column': 'domain_id',
- 'ref_column': domain_table.c.id}]
- return constraints
-
-
-def upgrade(migrate_engine):
- # SQLite does not support constraints, and querying the constraints
- # raises an exception
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.remove_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
deleted file mode 100644
index 63a86c11..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-WHITELIST_TABLE = 'whitelisted_config'
-SENSITIVE_TABLE = 'sensitive_config'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- whitelist_table = sql.Table(
- WHITELIST_TABLE,
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- whitelist_table.create(migrate_engine, checkfirst=True)
-
- sensitive_table = sql.Table(
- SENSITIVE_TABLE,
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- sensitive_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
deleted file mode 100644
index fe0cee88..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table('service', meta, autoload=True)
- services = list(service_table.select().execute())
-
- for service in services:
- if service.extra is not None:
- extra_dict = jsonutils.loads(service.extra)
- else:
- extra_dict = {}
-
- # Skip records where service is not null
- if extra_dict.get('name') is not None:
- continue
- # Default the name to empty string
- extra_dict['name'] = ''
- new_values = {
- 'extra': jsonutils.dumps(extra_dict),
- }
- f = service_table.c.id == service.id
- update = service_table.update().where(f).values(new_values)
- migrate_engine.execute(update)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py
deleted file mode 100644
index b9df1a55..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- # NOTE(viktors): Migration 062 removed FK from `assignment` table, but
- # MySQL silently creates indexes on FK constraints, so we should remove
- # this index manually.
- if migrate_engine.name == 'mysql':
- meta = sqlalchemy.MetaData(bind=migrate_engine)
- table = sqlalchemy.Table('assignment', meta, autoload=True)
- for index in table.indexes:
- if [c.name for c in index.columns] == ['role_id']:
- index.drop(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
index 6f326ecf..a6dbed67 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
@@ -12,18 +12,15 @@
import migrate
-from oslo_config import cfg
from oslo_log import log
import sqlalchemy as sql
-from sqlalchemy import orm
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql as ks_sql
-from keystone.common.sql import migration_helpers
+from keystone.identity.mapping_backends import mapping as mapping_backend
LOG = log.getLogger(__name__)
-CONF = cfg.CONF
def upgrade(migrate_engine):
@@ -64,12 +61,12 @@ def upgrade(migrate_engine):
sql.Column('id', sql.String(length=64), primary_key=True),
sql.Column('legacy_endpoint_id', sql.String(length=64)),
sql.Column('interface', sql.String(length=8), nullable=False),
- sql.Column('region', sql.String(length=255)),
sql.Column('service_id', sql.String(length=64), nullable=False),
sql.Column('url', sql.Text, nullable=False),
sql.Column('extra', ks_sql.JsonBlob.impl),
sql.Column('enabled', sql.Boolean, nullable=False, default=True,
server_default='1'),
+ sql.Column('region_id', sql.String(length=255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -100,6 +97,7 @@ def upgrade(migrate_engine):
sql.Column('description', sql.Text),
sql.Column('enabled', sql.Boolean),
sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('parent_id', sql.String(64), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -177,9 +175,9 @@ def upgrade(migrate_engine):
region = sql.Table(
'region',
meta,
- sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('id', sql.String(255), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
- sql.Column('parent_region_id', sql.String(64), nullable=True),
+ sql.Column('parent_region_id', sql.String(255), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -202,11 +200,45 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
+ mapping = sql.Table(
+ 'id_mapping',
+ meta,
+ sql.Column('public_id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('local_id', sql.String(64), nullable=False),
+ sql.Column('entity_type', sql.Enum(
+ mapping_backend.EntityType.USER,
+ mapping_backend.EntityType.GROUP,
+ name='entity_type'),
+ nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ domain_config_whitelist = sql.Table(
+ 'whitelisted_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ domain_config_sensitive = sql.Table(
+ 'sensitive_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
# create all tables
- tables = [credential, domain, endpoint, group,
- policy, project, role, service,
- token, trust, trust_role, user,
- user_group_membership, region, assignment]
+ tables = [credential, domain, endpoint, group, policy, project, role,
+ service, token, trust, trust_role, user, user_group_membership,
+ region, assignment, mapping, domain_config_whitelist,
+ domain_config_sensitive]
for table in tables:
try:
@@ -229,11 +261,22 @@ def upgrade(migrate_engine):
name='ixu_project_name_domain_id').create()
migrate.UniqueConstraint(domain.c.name,
name='ixu_domain_name').create()
+ migrate.UniqueConstraint(mapping.c.domain_id,
+ mapping.c.local_id,
+ mapping.c.entity_type,
+ name='domain_id').create()
# Indexes
sql.Index('ix_token_expires', token.c.expires).create()
sql.Index('ix_token_expires_valid', token.c.expires,
token.c.valid).create()
+ sql.Index('ix_actor_id', assignment.c.actor_id).create()
+ sql.Index('ix_token_user_id', token.c.user_id).create()
+ sql.Index('ix_token_trust_id', token.c.trust_id).create()
+ # NOTE(stevemar): The two indexes below were named 'service_id' and
+ # 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved
+ sql.Index('service_id', endpoint.c.service_id).create()
+ sql.Index('group_id', user_group_membership.c.group_id).create()
fkeys = [
{'columns': [endpoint.c.service_id],
@@ -247,33 +290,28 @@ def upgrade(migrate_engine):
'references':[user.c.id],
'name': 'fk_user_group_membership_user_id'},
- {'columns': [user.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_user_domain_id'},
-
- {'columns': [group.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_group_domain_id'},
-
{'columns': [project.c.domain_id],
'references': [domain.c.id],
'name': 'fk_project_domain_id'},
- {'columns': [assignment.c.role_id],
- 'references': [role.c.id]}
+ {'columns': [endpoint.c.region_id],
+ 'references': [region.c.id],
+ 'name': 'fk_endpoint_region_id'},
+
+ {'columns': [project.c.parent_id],
+ 'references': [project.c.id],
+ 'name': 'project_parent_id_fkey'},
]
+ if migrate_engine.name == 'sqlite':
+ # NOTE(stevemar): We need to keep this FK constraint due to 073, but
+ # only for sqlite, once we collapse 073 we can remove this constraint
+ fkeys.append(
+ {'columns': [assignment.c.role_id],
+ 'references': [role.c.id],
+ 'name': 'fk_assignment_role_id'})
+
for fkey in fkeys:
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],
name=fkey.get('name')).create()
-
- # Create the default domain.
- session = orm.sessionmaker(bind=migrate_engine)()
- domain.insert(migration_helpers.get_default_domain()).execute()
- session.commit()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
- 'unsupported.')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
index ffa210c4..205f809e 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
@@ -18,7 +18,7 @@ from keystone.assignment.backends import sql as assignment_sql
def upgrade(migrate_engine):
- """Inserts inherited column to assignment table PK contraints.
+ """Inserts inherited column to assignment table PK constraints.
For non-SQLite databases, it changes the constraint in the existing table.
@@ -26,7 +26,6 @@ def upgrade(migrate_engine):
assignment table with the new PK constraint and migrates the existing data.
"""
-
ASSIGNMENT_TABLE_NAME = 'assignment'
metadata = sql.MetaData()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
new file mode 100644
index 00000000..9f6e8415
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
new file mode 100644
index 00000000..a0c307d0
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
@@ -0,0 +1,54 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='endpoint_policy',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to endpoint_policy extension migration 1. Only
+ # update if it has not been run.
+ if extension_version >= 1:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ endpoint_policy_table = sql.Table(
+ 'policy_association',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('policy_id', sql.String(64),
+ nullable=False),
+ sql.Column('endpoint_id', sql.String(64),
+ nullable=True),
+ sql.Column('service_id', sql.String(64),
+ nullable=True),
+ sql.Column('region_id', sql.String(64),
+ nullable=True),
+ sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ endpoint_policy_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
new file mode 100644
index 00000000..7e426373
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
@@ -0,0 +1,97 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+CONF = cfg.CONF
+_RELAY_STATE_PREFIX = 'relay_state_prefix'
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='federation',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to federation extension migration 8. Only
+ # update if it has not been run.
+ if extension_version >= 8:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ idp_table = sql.Table(
+ 'identity_provider',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ idp_table.create(migrate_engine, checkfirst=True)
+
+ federation_protocol_table = sql.Table(
+ 'federation_protocol',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ primary_key=True),
+ sql.Column('mapping_id', sql.String(64), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ federation_protocol_table.create(migrate_engine, checkfirst=True)
+
+ mapping_table = sql.Table(
+ 'mapping',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('rules', sql.Text(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ mapping_table.create(migrate_engine, checkfirst=True)
+
+ relay_state_prefix_default = CONF.saml.relay_state_prefix
+ sp_table = sql.Table(
+ 'service_provider',
+ meta,
+ sql.Column('auth_url', sql.String(256), nullable=False),
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('sp_url', sql.String(256), nullable=False),
+ sql.Column(_RELAY_STATE_PREFIX, sql.String(256), nullable=False,
+ server_default=relay_state_prefix_default),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ sp_table.create(migrate_engine, checkfirst=True)
+
+ idp_table = sql.Table('identity_provider', meta, autoload=True)
+ remote_id_table = sql.Table(
+ 'idp_remote_ids',
+ meta,
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE')),
+ sql.Column('remote_id', sql.String(255), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ remote_id_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
new file mode 100644
index 00000000..5a859b4b
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
@@ -0,0 +1,75 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='oauth1',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to oauth extension migration 5. Only
+ # update if it has not been run.
+ if extension_version >= 5:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ consumer_table = sql.Table(
+ 'consumer',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('description', sql.String(64), nullable=True),
+ sql.Column('secret', sql.String(64), nullable=False),
+ sql.Column('extra', sql.Text(), nullable=False))
+ consumer_table.create(migrate_engine, checkfirst=True)
+
+ request_token_table = sql.Table(
+ 'request_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('request_secret', sql.String(64), nullable=False),
+ sql.Column('verifier', sql.String(64), nullable=True),
+ sql.Column('authorizing_user_id', sql.String(64), nullable=True),
+ sql.Column('requested_project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=True),
+ sql.Column('consumer_id', sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False, index=True),
+ sql.Column('expires_at', sql.String(64), nullable=True))
+ request_token_table.create(migrate_engine, checkfirst=True)
+
+ access_token_table = sql.Table(
+ 'access_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('access_secret', sql.String(64), nullable=False),
+ sql.Column('authorizing_user_id', sql.String(64),
+ nullable=False, index=True),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=False),
+ sql.Column('consumer_id', sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False, index=True),
+ sql.Column('expires_at', sql.String(64), nullable=True))
+ access_token_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
new file mode 100644
index 00000000..1a28a53c
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
@@ -0,0 +1,55 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='revoke',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to revoke extension migration 2. Only
+ # update if it has not been run.
+ if extension_version >= 2:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ service_table = sql.Table(
+ 'revocation_event',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True))
+
+ service_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
new file mode 100644
index 00000000..5790bd98
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
@@ -0,0 +1,70 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='endpoint_filter',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to endpoint_filter extension migration 2. Only
+ # update if it has not been run.
+ if extension_version >= 2:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ EP_GROUP_ID = 'endpoint_group_id'
+ PROJECT_ID = 'project_id'
+
+ endpoint_filtering_table = sql.Table(
+ 'project_endpoint',
+ meta,
+ sql.Column(
+ 'endpoint_id',
+ sql.String(64),
+ primary_key=True,
+ nullable=False),
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ primary_key=True,
+ nullable=False))
+ endpoint_filtering_table.create(migrate_engine, checkfirst=True)
+
+ endpoint_group_table = sql.Table(
+ 'endpoint_group',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('description', sql.Text, nullable=True),
+ sql.Column('filters', sql.Text(), nullable=False))
+ endpoint_group_table.create(migrate_engine, checkfirst=True)
+
+ project_endpoint_group_table = sql.Table(
+ 'project_endpoint_group',
+ meta,
+ sql.Column(EP_GROUP_ID, sql.String(64),
+ sql.ForeignKey('endpoint_group.id'), nullable=False),
+ sql.Column(PROJECT_ID, sql.String(64), nullable=False),
+ sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID))
+ project_endpoint_group_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
index 2a98fb90..2b115ea4 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
@@ -1,3 +1,6 @@
+# Copyright 2015 Intel Corporation
+# All Rights Reserved
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,12 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+from migrate import UniqueConstraint
+from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
- pass
+ meta = MetaData(bind=migrate_engine)
+ trusts = Table('trust', meta, autoload=True)
+
+ UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id',
+ 'impersonation', 'expires_at', table=trusts,
+ name='duplicate_trust_constraint').create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
new file mode 100644
index 00000000..7713ce8f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+import sqlalchemy as sql
+
+
+ROLE_TABLE = 'role'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ implied_role = sql.Table(
+ 'implied_role', meta,
+ sql.Column('prior_role_id', sql.String(length=64), primary_key=True),
+ sql.Column(
+ 'implied_role_id', sql.String(length=64), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ implied_role.create()
+ role = sql.Table(ROLE_TABLE, meta, autoload=True)
+ fkeys = [
+ {'columns': [implied_role.c.prior_role_id],
+ 'references': [role.c.id]},
+ {'columns': [implied_role.c.implied_role_id],
+ 'references': [role.c.id]},
+ ]
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name')).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
new file mode 100644
index 00000000..8b792dfa
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+
+_ROLE_NAME_NEW_CONSTRAINT = 'ixu_role_name_domain_id'
+_ROLE_TABLE_NAME = 'role'
+_ROLE_NAME_COLUMN_NAME = 'name'
+_DOMAIN_ID_COLUMN_NAME = 'domain_id'
+_NULL_DOMAIN_ID = '<<null>>'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
+ domain_id = sql.Column(_DOMAIN_ID_COLUMN_NAME, sql.String(64),
+ nullable=False, server_default=_NULL_DOMAIN_ID)
+
+ # NOTE(morganfainberg): the `role_name` unique constraint is not
+ # guaranteed to be a fixed name, such as 'ixu_role_name`, so we need to
+ # search for the correct constraint that only affects role_table.c.name
+ # and drop that constraint.
+ to_drop = None
+ if migrate_engine.name == 'mysql':
+ for c in role_table.indexes:
+ if (c.unique and len(c.columns) == 1 and
+ _ROLE_NAME_COLUMN_NAME in c.columns):
+ to_drop = c
+ break
+ else:
+ for c in role_table.constraints:
+ if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
+ to_drop = c
+ break
+
+ if to_drop is not None:
+ migrate.UniqueConstraint(role_table.c.name,
+ name=to_drop.name).drop()
+
+ # perform changes after constraint is dropped.
+ if 'domain_id' not in role_table.columns:
+ # Only create the column if it doesn't already exist.
+ role_table.create_column(domain_id)
+
+ migrate.UniqueConstraint(role_table.c.name,
+ role_table.c.domain_id,
+ name=_ROLE_NAME_NEW_CONSTRAINT).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
new file mode 100644
index 00000000..477c719a
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
@@ -0,0 +1,76 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+_PROJECT_TABLE_NAME = 'project'
+_DOMAIN_TABLE_NAME = 'domain'
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+
+
+def upgrade(migrate_engine):
+
+ def _generate_root_domain_project():
+ # Generate a project that will act as a root for all domains, in order
+ # for use to be able to use a FK constraint on domain_id. Projects
+ # acting as a domain will not reference this as their parent_id, just
+ # as domain_id.
+ #
+ # This special project is filtered out by the driver, so is never
+ # visible to the manager or API.
+
+ project_ref = {
+ 'id': NULL_DOMAIN_ID,
+ 'name': NULL_DOMAIN_ID,
+ 'enabled': False,
+ 'description': '',
+ 'domain_id': NULL_DOMAIN_ID,
+ 'is_domain': True,
+ 'parent_id': None,
+ 'extra': '{}'
+ }
+ return project_ref
+
+ def _generate_root_domain():
+ # Generate a similar root for the domain table, this is an interim
+ # step so as to allow continuation of current project domain_id FK.
+ #
+ # This special domain is filtered out by the driver, so is never
+ # visible to the manager or API.
+
+ domain_ref = {
+ 'id': NULL_DOMAIN_ID,
+ 'name': NULL_DOMAIN_ID,
+ 'enabled': False,
+ 'extra': '{}'
+ }
+ return domain_ref
+
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
+
+ root_domain = _generate_root_domain()
+ new_entry = domain_table.insert().values(**root_domain)
+ session.execute(new_entry)
+ session.commit()
+
+ root_domain_project = _generate_root_domain_project()
+ new_entry = project_table.insert().values(**root_domain_project)
+ session.execute(new_entry)
+ session.commit()
+
+ session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
new file mode 100644
index 00000000..800ba47e
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user = sql.Table('user', meta, autoload=True)
+
+ local_user = sql.Table(
+ 'local_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('user_id', sql.String(64),
+ sql.ForeignKey(user.c.id, ondelete='CASCADE'),
+ nullable=False, unique=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.UniqueConstraint('domain_id', 'name'))
+ local_user.create(migrate_engine, checkfirst=True)
+
+ password = sql.Table(
+ 'password',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('local_user_id', sql.Integer,
+ sql.ForeignKey(local_user.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('password', sql.String(128), nullable=False))
+ password.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
new file mode 100644
index 00000000..1f41fd89
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
@@ -0,0 +1,66 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+from sqlalchemy import func
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user_table = sql.Table('user', meta, autoload=True)
+ local_user_table = sql.Table('local_user', meta, autoload=True)
+ password_table = sql.Table('password', meta, autoload=True)
+
+ # migrate data to local_user table
+ local_user_values = []
+ for row in user_table.select().execute():
+ # skip the row that already exists in `local_user`, this could
+ # happen if run into a partially-migrated table due to the
+ # bug #1549705.
+ filter_by = local_user_table.c.user_id == row['id']
+ user_count = sql.select([func.count()]).select_from(
+ local_user_table).where(filter_by).execute().fetchone()[0]
+ if user_count == 0:
+ local_user_values.append({'user_id': row['id'],
+ 'domain_id': row['domain_id'],
+ 'name': row['name']})
+ if local_user_values:
+ local_user_table.insert().values(local_user_values).execute()
+
+ # migrate data to password table
+ sel = (
+ sql.select([user_table, local_user_table], use_labels=True)
+ .select_from(user_table.join(local_user_table, user_table.c.id ==
+ local_user_table.c.user_id))
+ )
+ user_rows = sel.execute()
+ password_values = []
+ for row in user_rows:
+ if row['user_password']:
+ password_values.append({'local_user_id': row['local_user_id'],
+ 'password': row['user_password']})
+ if password_values:
+ password_table.insert().values(password_values).execute()
+
+ # remove domain_id and name unique constraint
+ if migrate_engine.name != 'sqlite':
+ migrate.UniqueConstraint(user_table.c.domain_id,
+ user_table.c.name,
+ name='ixu_user_name_domain_id').drop()
+
+ # drop user columns
+ user_table.c.domain_id.drop()
+ user_table.c.name.drop()
+ user_table.c.password.drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
new file mode 100644
index 00000000..5e841899
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+import sqlalchemy as sql
+
+
+ROLE_TABLE = 'role'
+IMPLIED_ROLE_TABLE = 'implied_role'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role = sql.Table(ROLE_TABLE, meta, autoload=True)
+ implied_role = sql.Table(IMPLIED_ROLE_TABLE, meta, autoload=True)
+
+ fkeys = [
+ {'columns': [implied_role.c.prior_role_id],
+ 'references': [role.c.id]},
+ {'columns': [implied_role.c.implied_role_id],
+ 'references': [role.c.id]},
+ ]
+
+ # NOTE(stevemar): We need to divide these into two separate loops otherwise
+ # they may clobber each other and only end up with one foreign key.
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name')).drop()
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name'),
+ ondelete="CASCADE").create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
new file mode 100644
index 00000000..f6bba7d9
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
@@ -0,0 +1,125 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+_PROJECT_TABLE_NAME = 'project'
+_DOMAIN_TABLE_NAME = 'domain'
+_PARENT_ID_COLUMN_NAME = 'parent_id'
+_DOMAIN_ID_COLUMN_NAME = 'domain_id'
+
+# Above the driver level, the domain_id of a project acting as a domain is
+# None. However, in order to enable sql integrity constraints to still operate
+# on this column, we create a special "root of all domains" row, with an ID of
+# NULL_DOMAIN_ID, which all projects acting as a domain reference in their
+# domain_id attribute. This special row, as well as NULL_DOMAIN_ID, are never
+# exposed outside of sql driver layer.
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+
+
+def list_existing_project_constraints(project_table, domain_table):
+ constraints = [{'table': project_table,
+ 'fk_column': _PARENT_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id},
+ {'table': project_table,
+ 'fk_column': _DOMAIN_ID_COLUMN_NAME,
+ 'ref_column': domain_table.c.id}]
+
+ return constraints
+
+
+def list_new_project_constraints(project_table):
+ constraints = [{'table': project_table,
+ 'fk_column': _PARENT_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id},
+ {'table': project_table,
+ 'fk_column': _DOMAIN_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id}]
+
+ return constraints
+
+
+def upgrade(migrate_engine):
+
+ def _project_from_domain(domain):
+ # Creates a project dict with is_domain=True from the provided
+ # domain.
+
+ description = None
+ extra = {}
+ if domain.extra is not None:
+ # 'description' property is an extra attribute in domains but a
+ # first class attribute in projects
+ extra = json.loads(domain.extra)
+ description = extra.pop('description', None)
+
+ return {
+ 'id': domain.id,
+ 'name': domain.name,
+ 'enabled': domain.enabled,
+ 'description': description,
+ 'domain_id': NULL_DOMAIN_ID,
+ 'is_domain': True,
+ 'parent_id': None,
+ 'extra': json.dumps(extra)
+ }
+
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
+
+ # NOTE(htruta): Remove the parent_id constraint during the migration
+ # because for every root project inside this domain, we will set
+ # the project domain_id to be its parent_id. We re-enable the constraint
+ # in the end of this method. We also remove the domain_id constraint,
+ # while be recreated a FK to the project_id at the end.
+ migration_helpers.remove_constraints(
+ list_existing_project_constraints(project_table, domain_table))
+
+ # For each domain, create a project acting as a domain. We ignore the
+ # "root of all domains" row, since we already have one of these in the
+ # project table.
+ domains = list(domain_table.select().execute())
+ for domain in domains:
+ if domain.id == NULL_DOMAIN_ID:
+ continue
+ is_domain_project = _project_from_domain(domain)
+ new_entry = project_table.insert().values(**is_domain_project)
+ session.execute(new_entry)
+ session.commit()
+
+ # For each project, that has no parent (i.e. a top level project), update
+ # it's parent_id to point at the project acting as its domain. We ignore
+ # the "root of all domains" row, since its parent_id must always be None.
+ projects = list(project_table.select().execute())
+ for project in projects:
+ if (project.parent_id is not None or project.is_domain or
+ project.id == NULL_DOMAIN_ID):
+ continue
+ values = {'parent_id': project.domain_id}
+ update = project_table.update().where(
+ project_table.c.id == project.id).values(values)
+ session.execute(update)
+ session.commit()
+
+ migration_helpers.add_constraints(
+ list_new_project_constraints(project_table))
+
+ session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
new file mode 100644
index 00000000..6fd3f051
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user_table = sql.Table('user', meta, autoload=True)
+ idp_table = sql.Table('identity_provider', meta, autoload=True)
+ protocol_table = sql.Table('federation_protocol', meta, autoload=True)
+
+ federated_table = sql.Table(
+ 'federated_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('user_id', sql.String(64),
+ sql.ForeignKey(user_table.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey(idp_table.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('protocol_id', sql.String(64), nullable=False),
+ sql.Column('unique_id', sql.String(255), nullable=False),
+ sql.Column('display_name', sql.String(255), nullable=True),
+ sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'))
+ federated_table.create(migrate_engine, checkfirst=True)
+
+ migrate.ForeignKeyConstraint(
+ columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
+ refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
new file mode 100644
index 00000000..7a75f7b1
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ # You can specify primary keys when creating tables, however adding
+ # auto-increment integer primary keys for existing tables is not
+ # cross-engine compatibility supported. Thus, the approach is to:
+ # (1) create a new revocation_event table with an int pkey,
+ # (2) migrate data from the old table to the new table,
+ # (3) delete the old revocation_event table
+ # (4) rename the new revocation_event table
+ revocation_table = sql.Table('revocation_event', meta, autoload=True)
+
+ revocation_table_new = sql.Table(
+ 'revocation_event_new',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True))
+ revocation_table_new.create(migrate_engine, checkfirst=True)
+
+ revocation_table_new.insert().from_select(['domain_id',
+ 'project_id',
+ 'user_id',
+ 'role_id',
+ 'trust_id',
+ 'consumer_id',
+ 'access_token_id',
+ 'issued_before',
+ 'expires_at',
+ 'revoked_at',
+ 'audit_id',
+ 'audit_chain_id'],
+ revocation_table.select())
+
+ revocation_table.drop()
+ revocation_table_new.rename('revocation_event')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
new file mode 100644
index 00000000..0156de21
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+_ROLE_TABLE_NAME = 'role'
+_ROLE_NAME_COLUMN_NAME = 'name'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
+
+ # NOTE(morganfainberg): the `role_name` unique constraint is not
+ # guaranteed to be named 'ixu_role_name', so we need to search for the
+ # correct constraint that only affects role_table.c.name and drop
+ # that constraint.
+ #
+ # This is an idempotent change that reflects the fix to migration
+ # 88 if the role_name unique constraint was not named consistently and
+ # someone manually fixed the migrations / db without dropping the
+ # old constraint.
+ to_drop = None
+ if migrate_engine.name == 'mysql':
+ for c in role_table.indexes:
+ if (c.unique and len(c.columns) == 1 and
+ _ROLE_NAME_COLUMN_NAME in c.columns):
+ to_drop = c
+ break
+ else:
+ for c in role_table.constraints:
+ if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
+ to_drop = c
+ break
+
+ if to_drop is not None:
+ migrate.UniqueConstraint(role_table.c.name,
+ name=to_drop.name).drop()
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
index aaa59f70..40c1fbb5 100644
--- a/keystone-moon/keystone/common/sql/migration_helpers.py
+++ b/keystone-moon/keystone/common/sql/migration_helpers.py
@@ -21,37 +21,25 @@ import migrate
from migrate import exceptions
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
-from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import sqlalchemy
from keystone.common import sql
-from keystone.common.sql import migrate_repo
from keystone import contrib
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
-DEFAULT_EXTENSIONS = ['endpoint_filter',
- 'endpoint_policy',
- 'federation',
- 'oauth1',
- 'revoke',
- ]
-
-
-def get_default_domain():
- # Return the reference used for the default domain structure during
- # sql migrations.
- return {
- 'id': CONF.identity.default_domain_id,
- 'name': 'Default',
- 'enabled': True,
- 'extra': jsonutils.dumps({'description': 'Owns users and tenants '
- '(i.e. projects) available '
- 'on Identity API v2.'})}
+DEFAULT_EXTENSIONS = []
+
+MIGRATED_EXTENSIONS = ['endpoint_policy',
+ 'federation',
+ 'oauth1',
+ 'revoke',
+ 'endpoint_filter'
+ ]
# Different RDBMSs use different schemes for naming the Foreign Key
@@ -117,9 +105,8 @@ def rename_tables_with_constraints(renames, constraints, engine):
`renames` is a dict, mapping {'to_table_name': from_table, ...}
"""
-
if engine.name != 'sqlite':
- # Sqlite doesn't support constraints, so nothing to remove.
+ # SQLite doesn't support constraints, so nothing to remove.
remove_constraints(constraints)
for to_table_name in renames:
@@ -141,11 +128,34 @@ def find_migrate_repo(package=None, repo_name='migrate_repo'):
def _sync_common_repo(version):
abs_path = find_migrate_repo()
- init_version = migrate_repo.DB_INIT_VERSION
- engine = sql.get_engine()
- _assert_not_schema_downgrade(version=version)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
+ init_version = get_init_version()
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
+ _assert_not_schema_downgrade(version=version)
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version, sanity_check=False)
+
+
+def get_init_version(abs_path=None):
+ """Get the initial version of a migrate repository
+
+ :param abs_path: Absolute path to migrate repository.
+ :return: initial version number or None, if DB is empty.
+ """
+ if abs_path is None:
+ abs_path = find_migrate_repo()
+
+ repo = migrate.versioning.repository.Repository(abs_path)
+
+ # Sadly, Repository has a `latest` but not an `oldest`.
+ # The value is a VerNum object which needs to be converted into an int.
+ oldest = int(min(repo.versions.versions))
+
+ if oldest < 1:
+ return None
+
+ # The initial version is one less
+ return oldest - 1
def _assert_not_schema_downgrade(extension=None, version=None):
@@ -153,40 +163,46 @@ def _assert_not_schema_downgrade(extension=None, version=None):
try:
current_ver = int(six.text_type(get_db_version(extension)))
if int(version) < current_ver:
- raise migration.exception.DbMigrationError()
- except exceptions.DatabaseNotControlledError:
+ raise migration.exception.DbMigrationError(
+ _("Unable to downgrade schema"))
+ except exceptions.DatabaseNotControlledError: # nosec
# NOTE(morganfainberg): The database is not controlled, this action
# cannot be a downgrade.
pass
def _sync_extension_repo(extension, version):
- init_version = 0
- engine = sql.get_engine()
+ if extension in MIGRATED_EXTENSIONS:
+ raise exception.MigrationMovedFailure(extension=extension)
+
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
- try:
- package_name = '.'.join((contrib.__name__, extension))
- package = importutils.import_module(package_name)
- except ImportError:
- raise ImportError(_("%s extension does not exist.")
- % package_name)
- try:
- abs_path = find_migrate_repo(package)
try:
- migration.db_version_control(sql.get_engine(), abs_path)
- # Register the repo with the version control API
- # If it already knows about the repo, it will throw
- # an exception that we can safely ignore
- except exceptions.DatabaseAlreadyControlledError:
- pass
- except exception.MigrationNotProvided as e:
- print(e)
- sys.exit(1)
+ package_name = '.'.join((contrib.__name__, extension))
+ package = importutils.import_module(package_name)
+ except ImportError:
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
+ try:
+ abs_path = find_migrate_repo(package)
+ try:
+ migration.db_version_control(engine, abs_path)
+ # Register the repo with the version control API
+ # If it already knows about the repo, it will throw
+ # an exception that we can safely ignore
+ except exceptions.DatabaseAlreadyControlledError: # nosec
+ pass
+ except exception.MigrationNotProvided as e:
+ print(e)
+ sys.exit(1)
+
+ _assert_not_schema_downgrade(extension=extension, version=version)
- _assert_not_schema_downgrade(extension=extension, version=version)
+ init_version = get_init_version(abs_path=abs_path)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version, sanity_check=False)
def sync_database_to_version(extension=None, version=None):
@@ -203,8 +219,10 @@ def sync_database_to_version(extension=None, version=None):
def get_db_version(extension=None):
if not extension:
- return migration.db_version(sql.get_engine(), find_migrate_repo(),
- migrate_repo.DB_INIT_VERSION)
+ with sql.session_for_write() as session:
+ return migration.db_version(session.get_bind(),
+ find_migrate_repo(),
+ get_init_version())
try:
package_name = '.'.join((contrib.__name__, extension))
@@ -213,8 +231,9 @@ def get_db_version(extension=None):
raise ImportError(_("%s extension does not exist.")
% package_name)
- return migration.db_version(
- sql.get_engine(), find_migrate_repo(package), 0)
+ with sql.session_for_write() as session:
+ return migration.db_version(
+ session.get_bind(), find_migrate_repo(package), 0)
def print_db_version(extension=None):
diff --git a/keystone-moon/keystone/common/tokenless_auth.py b/keystone-moon/keystone/common/tokenless_auth.py
index 7388b83c..fd9c1592 100644
--- a/keystone-moon/keystone/common/tokenless_auth.py
+++ b/keystone-moon/keystone/common/tokenless_auth.py
@@ -20,9 +20,9 @@ from oslo_log import log
from keystone.auth import controllers
from keystone.common import dependency
-from keystone.contrib.federation import constants as federation_constants
-from keystone.contrib.federation import utils
from keystone import exception
+from keystone.federation import constants as federation_constants
+from keystone.federation import utils
from keystone.i18n import _
@@ -45,7 +45,6 @@ class TokenlessAuthHelper(object):
SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
:type env: dict
"""
-
self.env = env
def _build_scope_info(self):
@@ -86,13 +85,13 @@ class TokenlessAuthHelper(object):
def get_scope(self):
auth = {}
- # NOTE(chioleong): auth methods here are insignificant because
+ # NOTE(chioleong): Auth methods here are insignificant because
# we only care about using auth.controllers.AuthInfo
# to validate the scope information. Therefore,
# we don't provide any identity.
auth['scope'] = self._build_scope_info()
- # NOTE(chioleong): we'll let AuthInfo validate the scope for us
+ # NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = controllers.AuthInfo.create({}, auth, scope_only=True)
return auth_info.get_scope()
@@ -189,5 +188,5 @@ class TokenlessAuthHelper(object):
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
- hashed_idp = hashlib.sha256(idp)
+ hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest()
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
index 48336af7..5438ad43 100644
--- a/keystone-moon/keystone/common/utils.py
+++ b/keystone-moon/keystone/common/utils.py
@@ -22,10 +22,12 @@ import grp
import hashlib
import os
import pwd
+import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
+from oslo_utils import reflection
from oslo_utils import strutils
from oslo_utils import timeutils
import passlib.hash
@@ -42,6 +44,26 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__)
+# NOTE(stevermar): This UUID must stay the same, forever, across
+# all of keystone to preserve its value as a URN namespace, which is
+# used for ID transformation.
+RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153')
+
+
+def resource_uuid(value):
+ """Converts input to valid UUID hex digits."""
+ try:
+ uuid.UUID(value)
+ return value
+ except ValueError:
+ if len(value) <= 64:
+ if six.PY2 and isinstance(value, six.text_type):
+ value = value.encode('utf-8')
+ return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex
+ raise ValueError(_('Length of transformable resource id > 64, '
+ 'which is max allowed characters'))
+
+
def flatten_dict(d, parent_key=''):
"""Flatten a nested dictionary
@@ -81,6 +103,7 @@ def read_cached_file(filename, cache_info, reload_func=None):
class SmarterEncoder(jsonutils.json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
+
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
@@ -89,6 +112,7 @@ class SmarterEncoder(jsonutils.json.JSONEncoder):
class PKIEncoder(SmarterEncoder):
"""Special encoder to make token JSON a bit shorter."""
+
item_separator = ','
key_separator = ':'
@@ -113,6 +137,8 @@ def verify_length_and_trunc_password(password):
def hash_access_key(access):
hash_ = hashlib.sha256()
+ if not isinstance(access, six.binary_type):
+ access = access.encode('utf-8')
hash_.update(access)
return hash_.hexdigest()
@@ -206,7 +232,7 @@ def auth_str_equal(provided, known):
:params provided: the first string
:params known: the second string
- :return: True if the strings are equal.
+ :returns: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
@@ -271,10 +297,9 @@ def get_unix_user(user=None):
:param object user: string, int or None specifying the user to
lookup.
- :return: tuple of (uid, name)
+ :returns: tuple of (uid, name)
"""
-
if isinstance(user, six.string_types):
try:
user_info = pwd.getpwnam(user)
@@ -295,8 +320,10 @@ def get_unix_user(user=None):
elif user is None:
user_info = pwd.getpwuid(os.geteuid())
else:
+ user_cls_name = reflection.get_class_name(user,
+ fully_qualified=False)
raise TypeError('user must be string, int or None; not %s (%r)' %
- (user.__class__.__name__, user))
+ (user_cls_name, user))
return user_info.pw_uid, user_info.pw_name
@@ -328,10 +355,9 @@ def get_unix_group(group=None):
:param object group: string, int or None specifying the group to
lookup.
- :return: tuple of (gid, name)
+ :returns: tuple of (gid, name)
"""
-
if isinstance(group, six.string_types):
try:
group_info = grp.getgrnam(group)
@@ -354,8 +380,10 @@ def get_unix_group(group=None):
elif group is None:
group_info = grp.getgrgid(os.getegid())
else:
+ group_cls_name = reflection.get_class_name(group,
+ fully_qualified=False)
raise TypeError('group must be string, int or None; not %s (%r)' %
- (group.__class__.__name__, group))
+ (group_cls_name, group))
return group_info.gr_gid, group_info.gr_name
@@ -380,7 +408,6 @@ def set_permissions(path, mode=None, user=None, group=None, log=None):
if None no logging is performed.
"""
-
if user is None:
user_uid, user_name = None, None
else:
@@ -447,7 +474,6 @@ def make_dirs(path, mode=None, user=None, group=None, log=None):
if None no logging is performed.
"""
-
if log:
if mode is None:
mode_string = str(mode)
@@ -483,7 +509,6 @@ _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
-
# Python provides a similar instance method for datetime.datetime objects
# called isoformat(). The format of the strings generated by isoformat()
# have a couple of problems:
@@ -515,7 +540,7 @@ def get_token_ref(context):
"""Retrieves KeystoneToken object from the auth context and returns it.
:param dict context: The request context.
- :raises: exception.Unauthorized if auth context cannot be found.
+ :raises keystone.exception.Unauthorized: If auth context cannot be found.
:returns: The KeystoneToken object.
"""
try:
@@ -526,3 +551,48 @@ def get_token_ref(context):
except KeyError:
LOG.warning(_LW("Couldn't find the auth context."))
raise exception.Unauthorized()
+
+
+URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;="
+
+
+def is_not_url_safe(name):
+ """Check if a string contains any url reserved characters."""
+ return len(list_url_unsafe_chars(name)) > 0
+
+
+def list_url_unsafe_chars(name):
+ """Return a list of the reserved characters."""
+ reserved_chars = ''
+ for i in name:
+ if i in URL_RESERVED_CHARS:
+ reserved_chars += i
+ return reserved_chars
+
+
+def lower_case_hostname(url):
+ """Change the URL's hostname to lowercase"""
+ # NOTE(gyee): according to
+ # https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion
+ # of the URL is case-insensitive
+ parsed = moves.urllib.parse.urlparse(url)
+ # Note: _replace method for named tuples is public and defined in docs
+ replaced = parsed._replace(netloc=parsed.netloc.lower())
+ return moves.urllib.parse.urlunparse(replaced)
+
+
+def remove_standard_port(url):
+ # remove the default ports specified in RFC2616 and 2818
+ o = moves.urllib.parse.urlparse(url)
+ separator = ':'
+ (host, separator, port) = o.netloc.partition(':')
+ if o.scheme.lower() == 'http' and port == '80':
+ # NOTE(gyee): _replace() is not a private method. It has an
+ # an underscore prefix to prevent conflict with field names.
+ # See https://docs.python.org/2/library/collections.html#
+ # collections.namedtuple
+ o = o._replace(netloc=host)
+ if o.scheme.lower() == 'https' and port == '443':
+ o = o._replace(netloc=host)
+
+ return moves.urllib.parse.urlunparse(o)
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
index 1e5cc6a5..9d812f40 100644
--- a/keystone-moon/keystone/common/validation/__init__.py
+++ b/keystone-moon/keystone/common/validation/__init__.py
@@ -28,8 +28,7 @@ def validated(request_body_schema, resource_to_validate):
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: the reference to validate
:raises keystone.exception.ValidationError: if `resource_to_validate` is
- not passed by or passed with an empty value (see wrapper method
- below).
+ None. (see wrapper method below).
:raises TypeError: at decoration time when the expected resource to
validate isn't found in the decorated method's
signature
@@ -49,15 +48,15 @@ def validated(request_body_schema, resource_to_validate):
@functools.wraps(func)
def wrapper(*args, **kwargs):
- if kwargs.get(resource_to_validate):
+ if (resource_to_validate in kwargs and
+ kwargs[resource_to_validate] is not None):
schema_validator.validate(kwargs[resource_to_validate])
else:
try:
resource = args[arg_index]
- # If resource to be validated is empty, no need to do
- # validation since the message given by jsonschema doesn't
- # help in this case.
- if resource:
+ # If the resource to be validated is not None but
+ # empty, it is possible to be validated by jsonschema.
+ if resource is not None:
schema_validator.validate(resource)
else:
raise exception.ValidationError(
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
index 1bc81383..c0753827 100644
--- a/keystone-moon/keystone/common/validation/parameter_types.py
+++ b/keystone-moon/keystone/common/validation/parameter_types.py
@@ -43,6 +43,13 @@ id_string = {
'pattern': '^[a-zA-Z0-9-]+$'
}
+mapping_id_string = {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 64,
+ 'pattern': '^[a-zA-Z0-9-_]+$'
+}
+
description = {
'type': 'string'
}
@@ -54,7 +61,7 @@ url = {
# NOTE(edmondsw): we could do more to validate per various RFCs, but
# decision was made to err on the side of leniency. The following is based
# on rfc1738 section 2.1
- 'pattern': '[a-zA-Z0-9+.-]+:.+'
+ 'pattern': '^[a-zA-Z0-9+.-]+:.+'
}
email = {
diff --git a/keystone-moon/keystone/common/validation/validators.py b/keystone-moon/keystone/common/validation/validators.py
index a4574176..c6d52e9a 100644
--- a/keystone-moon/keystone/common/validation/validators.py
+++ b/keystone-moon/keystone/common/validation/validators.py
@@ -20,7 +20,6 @@ from keystone.i18n import _
class SchemaValidator(object):
"""Resource reference validator class."""
- validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema):
@@ -43,7 +42,7 @@ class SchemaValidator(object):
except jsonschema.ValidationError as ex:
# NOTE: For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
- if len(ex.path) > 0:
+ if ex.path:
# NOTE(lbragstad): Here we could think about using iter_errors
# as a method of providing invalid parameters back to the
# user.
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
index 8b99c87d..04528a0c 100644
--- a/keystone-moon/keystone/common/wsgi.py
+++ b/keystone-moon/keystone/common/wsgi.py
@@ -20,6 +20,7 @@
import copy
import itertools
+import re
import wsgiref.util
from oslo_config import cfg
@@ -71,9 +72,6 @@ def validate_token_bind(context, token_ref):
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
- # get the named mode if bind_mode is not one of the known
- name = None if permissive or bind_mode == 'required' else bind_mode
-
if not bind:
if permissive:
# no bind provided and none required
@@ -82,6 +80,9 @@ def validate_token_bind(context, token_ref):
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
+ # get the named mode if bind_mode is not one of the known
+ name = None if permissive or bind_mode == 'required' else bind_mode
+
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
@@ -112,10 +113,11 @@ def validate_token_bind(context, token_ref):
def best_match_language(req):
- """Determines the best available locale from the Accept-Language
- HTTP header passed in the request.
- """
+ """Determines the best available locale.
+ This returns best available locale based on the Accept-Language HTTP
+ header passed in the request.
+ """
if not req.accept_language:
return None
return req.accept_language.best_match(
@@ -208,8 +210,7 @@ class Application(BaseApplication):
context['headers'] = dict(req.headers.items())
context['path'] = req.environ['PATH_INFO']
- scheme = (None if not CONF.secure_proxy_ssl_header
- else req.environ.get(CONF.secure_proxy_ssl_header))
+ scheme = req.environ.get(CONF.secure_proxy_ssl_header)
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
@@ -305,7 +306,6 @@ class Application(BaseApplication):
does not have the admin role
"""
-
if not context['is_admin']:
user_token_ref = utils.get_token_ref(context)
@@ -329,9 +329,7 @@ class Application(BaseApplication):
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
- """Returns true if the attribute in the given ref (which is a
- dict) is empty or None.
- """
+ """Determine if the attribute in ref is empty or None."""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
@@ -378,13 +376,19 @@ class Application(BaseApplication):
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
+ elif 'environment' in context:
+ url = wsgiref.util.application_uri(context['environment'])
+ # remove version from the URL as it may be part of SCRIPT_NAME but
+ # it should not be part of base URL
+ url = re.sub(r'/v(3|(2\.0))/*$', '', url)
+
+ # now remove the standard port
+ url = utils.remove_standard_port(url)
else:
- # NOTE(jamielennox): if url is not set via the config file we
- # should set it relative to the url that the user used to get here
- # so as not to mess with version discovery. This is not perfect.
- # host_url omits the path prefix, but there isn't another good
- # solution that will work for all urls.
- url = context['host_url']
+ # if we don't have enough information to come up with a base URL,
+ # then fall back to localhost. This should never happen in
+ # production environment.
+ url = 'http://localhost:%d' % CONF.eventlet_server.public_port
return url.rstrip('/')
@@ -400,32 +404,10 @@ class Middleware(Application):
"""
@classmethod
- def factory(cls, global_config, **local_config):
- """Used for paste app factories in paste.deploy config files.
-
- Any local configuration (that is, values under the [filter:APPNAME]
- section of the paste config) will be passed into the `__init__` method
- as kwargs.
-
- A hypothetical configuration would look like:
-
- [filter:analytics]
- redis_host = 127.0.0.1
- paste.filter_factory = keystone.analytics:Analytics.factory
-
- which would result in a call to the `Analytics` class as
-
- import keystone.analytics
- keystone.analytics.Analytics(app, redis_host='127.0.0.1')
-
- You could of course re-implement the `factory` method in subclasses,
- but using the kwarg passing it shouldn't be necessary.
-
- """
+ def factory(cls, global_config):
+ """Used for paste app factories in paste.deploy config files."""
def _factory(app):
- conf = global_config.copy()
- conf.update(local_config)
- return cls(app, **local_config)
+ return cls(app)
return _factory
def __init__(self, application):
@@ -601,6 +583,7 @@ class ExtensionRouter(Router):
Expects to be subclassed.
"""
+
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
@@ -737,8 +720,8 @@ class V3ExtensionRouter(ExtensionRouter, RoutersBase):
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
- response.body = jsonutils.dumps(response_data,
- cls=utils.SmarterEncoder)
+ response.body = jsonutils.dump_as_bytes(response_data,
+ cls=utils.SmarterEncoder)
return response
@@ -751,7 +734,7 @@ def render_response(body=None, status=None, headers=None, method=None):
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
- body = ''
+ body = b''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
@@ -761,11 +744,41 @@ def render_response(body=None, status=None, headers=None, method=None):
content_type = None
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
- body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
+ body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
+ # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and
+ # requires the value in response header to be binary type(str) on python2,
+ # unicode based string(str) on python3, or else keystone will not work
+ # under apache with `mod_wsgi`.
+ # keystone needs to check the data type of each header and convert the
+ # type if needed.
+ # see bug:
+ # https://bugs.launchpad.net/keystone/+bug/1528981
+ # see pep-3333:
+ # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
+ # see source from mod_wsgi:
+ # https://github.com/GrahamDumpleton/mod_wsgi(methods:
+ # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...)
+ # and wsgi_validate_header_value(...)).
+ def _convert_to_str(headers):
+ str_headers = []
+ for header in headers:
+ str_header = []
+ for value in header:
+ if not isinstance(value, str):
+ str_header.append(str(value))
+ else:
+ str_header.append(value)
+ # convert the list to the immutable tuple to build the headers.
+ # header's key/value will be guaranteed to be str type.
+ str_headers.append(tuple(str_header))
+ return str_headers
+
+ headers = _convert_to_str(headers)
+
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
@@ -789,7 +802,6 @@ def render_response(body=None, status=None, headers=None, method=None):
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
-
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
@@ -806,18 +818,15 @@ def render_exception(error, context=None, request=None, user_locale=None):
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
- url = CONF.public_endpoint
- if not url:
- if request:
- context = {'host_url': request.host_url}
- if context:
- url = Application.base_url(context, 'public')
- else:
- url = 'http://localhost:%d' % CONF.eventlet_server.public_port
- else:
- substitutions = dict(
- itertools.chain(CONF.items(), CONF.eventlet_server.items()))
- url = url % substitutions
+ # NOTE(gyee): we only care about the request environment in the
+ # context. Also, its OK to pass the environemt as it is read-only in
+ # Application.base_url()
+ local_context = {}
+ if request:
+ local_context = {'environment': request.environ}
+ elif context and 'environment' in context:
+ local_context = {'environment': context['environment']}
+ url = Application.base_url(local_context, 'public')
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),